summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-06-29 17:26:51 +0200
committerRyan Dahl <ry@tinyclouds.org>2011-06-29 17:26:51 +0200
commit33af2720f26c2b25bc7f75ce7eb454ff99db6d35 (patch)
tree9a38f0c96420edf503eebd6325dd8d2d8249f653 /deps/v8/src
parent6afdca885adeeeed9eef8cbb01c3d97af0bc084d (diff)
downloadnode-33af2720f26c2b25bc7f75ce7eb454ff99db6d35.tar.gz
Upgrade V8 to 3.4.8
Diffstat (limited to 'deps/v8/src')
-rwxr-xr-xdeps/v8/src/SConscript86
-rw-r--r--deps/v8/src/accessors.cc309
-rw-r--r--deps/v8/src/accessors.h2
-rw-r--r--deps/v8/src/allocation-inl.h (renamed from deps/v8/src/x64/codegen-x64-inl.h)19
-rw-r--r--deps/v8/src/allocation.cc82
-rw-r--r--deps/v8/src/allocation.h51
-rw-r--r--deps/v8/src/api.cc3477
-rw-r--r--deps/v8/src/api.h111
-rw-r--r--deps/v8/src/apinatives.js10
-rw-r--r--deps/v8/src/apiutils.h7
-rw-r--r--deps/v8/src/arguments.h29
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h19
-rw-r--r--deps/v8/src/arm/assembler-arm.cc692
-rw-r--r--deps/v8/src/arm/assembler-arm.h253
-rw-r--r--deps/v8/src/arm/builtins-arm.cc170
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc3702
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h615
-rw-r--r--deps/v8/src/arm/codegen-arm-inl.h48
-rw-r--r--deps/v8/src/arm/codegen-arm.cc7360
-rw-r--r--deps/v8/src/arm/codegen-arm.h512
-rw-r--r--deps/v8/src/arm/constants-arm.h24
-rw-r--r--deps/v8/src/arm/cpu-arm.cc93
-rw-r--r--deps/v8/src/arm/debug-arm.cc14
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc80
-rw-r--r--deps/v8/src/arm/disasm-arm.cc145
-rw-r--r--deps/v8/src/arm/frames-arm.h5
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc1069
-rw-r--r--deps/v8/src/arm/ic-arm.cc685
-rw-r--r--deps/v8/src/arm/jump-target-arm.cc174
-rw-r--r--deps/v8/src/arm/lithium-arm.cc648
-rw-r--r--deps/v8/src/arm/lithium-arm.h570
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc1832
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h52
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc2
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc844
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h217
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc45
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h3
-rw-r--r--deps/v8/src/arm/register-allocator-arm-inl.h100
-rw-r--r--deps/v8/src/arm/simulator-arm.cc555
-rw-r--r--deps/v8/src/arm/simulator-arm.h87
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc1558
-rw-r--r--deps/v8/src/arm/virtual-frame-arm-inl.h59
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc843
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h520
-rw-r--r--deps/v8/src/array.js189
-rw-r--r--deps/v8/src/assembler.cc593
-rw-r--r--deps/v8/src/assembler.h334
-rw-r--r--deps/v8/src/ast-inl.h11
-rw-r--r--deps/v8/src/ast.cc393
-rw-r--r--deps/v8/src/ast.h459
-rw-r--r--deps/v8/src/atomicops.h2
-rw-r--r--deps/v8/src/atomicops_internals_mips_gcc.h169
-rw-r--r--deps/v8/src/atomicops_internals_x86_gcc.cc13
-rw-r--r--deps/v8/src/atomicops_internals_x86_gcc.h6
-rw-r--r--deps/v8/src/bootstrapper.cc1210
-rw-r--r--deps/v8/src/bootstrapper.h123
-rw-r--r--deps/v8/src/builtins.cc580
-rw-r--r--deps/v8/src/builtins.h294
-rw-r--r--deps/v8/src/char-predicates.h4
-rw-r--r--deps/v8/src/checks.cc4
-rw-r--r--deps/v8/src/checks.h4
-rw-r--r--deps/v8/src/code-stubs.cc73
-rw-r--r--deps/v8/src/code-stubs.h226
-rw-r--r--deps/v8/src/code.h2
-rw-r--r--deps/v8/src/codegen.cc315
-rw-r--r--deps/v8/src/codegen.h163
-rw-r--r--deps/v8/src/compilation-cache.cc298
-rw-r--r--deps/v8/src/compilation-cache.h235
-rwxr-xr-xdeps/v8/src/compiler.cc340
-rw-r--r--deps/v8/src/compiler.h64
-rw-r--r--deps/v8/src/contexts.cc172
-rw-r--r--deps/v8/src/contexts.h98
-rw-r--r--deps/v8/src/conversions-inl.h4
-rw-r--r--deps/v8/src/conversions.cc132
-rw-r--r--deps/v8/src/conversions.h23
-rw-r--r--deps/v8/src/counters.cc23
-rw-r--r--deps/v8/src/counters.h44
-rw-r--r--deps/v8/src/cpu-profiler-inl.h22
-rw-r--r--deps/v8/src/cpu-profiler.cc202
-rw-r--r--deps/v8/src/cpu-profiler.h70
-rw-r--r--deps/v8/src/cpu.h4
-rw-r--r--deps/v8/src/d8-debug.cc2
-rw-r--r--deps/v8/src/d8-posix.cc13
-rw-r--r--deps/v8/src/d8-readline.cc2
-rw-r--r--deps/v8/src/d8.cc443
-rw-r--r--deps/v8/src/d8.gyp8
-rw-r--r--deps/v8/src/d8.h26
-rw-r--r--deps/v8/src/d8.js10
-rw-r--r--deps/v8/src/data-flow.cc479
-rw-r--r--deps/v8/src/data-flow.h182
-rw-r--r--deps/v8/src/date.js4
-rw-r--r--deps/v8/src/dateparser-inl.h8
-rw-r--r--deps/v8/src/dateparser.h13
-rw-r--r--deps/v8/src/debug-agent.cc23
-rw-r--r--deps/v8/src/debug-agent.h18
-rw-r--r--deps/v8/src/debug-debugger.js78
-rw-r--r--deps/v8/src/debug.cc572
-rw-r--r--deps/v8/src/debug.h475
-rw-r--r--deps/v8/src/deoptimizer.cc242
-rw-r--r--deps/v8/src/deoptimizer.h134
-rw-r--r--deps/v8/src/disasm.h3
-rw-r--r--deps/v8/src/disassembler.cc27
-rw-r--r--deps/v8/src/disassembler.h2
-rw-r--r--deps/v8/src/execution.cc391
-rw-r--r--deps/v8/src/execution.h133
-rw-r--r--deps/v8/src/extensions/experimental/break-iterator.cc252
-rw-r--r--deps/v8/src/extensions/experimental/break-iterator.h89
-rw-r--r--deps/v8/src/extensions/experimental/collator.cc222
-rw-r--r--deps/v8/src/extensions/experimental/collator.h68
-rw-r--r--deps/v8/src/extensions/experimental/datetime-format.cc384
-rw-r--r--deps/v8/src/extensions/experimental/datetime-format.h83
-rw-r--r--deps/v8/src/extensions/experimental/experimental.gyp54
-rw-r--r--deps/v8/src/extensions/experimental/i18n-extension.cc230
-rw-r--r--deps/v8/src/extensions/experimental/i18n-extension.h18
-rw-r--r--deps/v8/src/extensions/experimental/i18n-js2c.py126
-rw-r--r--deps/v8/src/extensions/experimental/i18n-locale.cc111
-rw-r--r--deps/v8/src/extensions/experimental/i18n-locale.h (renamed from deps/v8/src/mips/register-allocator-mips.h)34
-rw-r--r--deps/v8/src/extensions/experimental/i18n-natives.h (renamed from deps/v8/src/ia32/register-allocator-ia32.h)16
-rw-r--r--deps/v8/src/extensions/experimental/i18n-utils.cc (renamed from deps/v8/src/mips/fast-codegen-mips.cc)78
-rw-r--r--deps/v8/src/extensions/experimental/i18n-utils.h69
-rw-r--r--deps/v8/src/extensions/experimental/i18n.js380
-rw-r--r--deps/v8/src/extensions/experimental/language-matcher.cc252
-rw-r--r--deps/v8/src/extensions/experimental/language-matcher.h95
-rw-r--r--deps/v8/src/extensions/experimental/number-format.cc356
-rw-r--r--deps/v8/src/extensions/experimental/number-format.h71
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc4
-rw-r--r--deps/v8/src/extensions/gc-extension.cc2
-rw-r--r--deps/v8/src/factory.cc501
-rw-r--r--deps/v8/src/factory.h352
-rw-r--r--deps/v8/src/flag-definitions.h58
-rw-r--r--deps/v8/src/frame-element.h277
-rw-r--r--deps/v8/src/frames-inl.h36
-rw-r--r--deps/v8/src/frames.cc194
-rw-r--r--deps/v8/src/frames.h109
-rw-r--r--deps/v8/src/full-codegen.cc490
-rw-r--r--deps/v8/src/full-codegen.h139
-rw-r--r--deps/v8/src/func-name-inferrer.cc48
-rw-r--r--deps/v8/src/func-name-inferrer.h28
-rw-r--r--deps/v8/src/gdb-jit.cc11
-rw-r--r--deps/v8/src/gdb-jit.h5
-rw-r--r--deps/v8/src/global-handles.cc733
-rw-r--r--deps/v8/src/global-handles.h226
-rw-r--r--deps/v8/src/globals.h4
-rw-r--r--deps/v8/src/handles-inl.h111
-rw-r--r--deps/v8/src/handles.cc407
-rw-r--r--deps/v8/src/handles.h117
-rw-r--r--deps/v8/src/hashmap.h10
-rw-r--r--deps/v8/src/heap-inl.h167
-rw-r--r--deps/v8/src/heap-profiler.cc1057
-rw-r--r--deps/v8/src/heap-profiler.h317
-rw-r--r--deps/v8/src/heap.cc1661
-rw-r--r--deps/v8/src/heap.h1290
-rw-r--r--deps/v8/src/hydrogen-instructions.cc675
-rw-r--r--deps/v8/src/hydrogen-instructions.h1419
-rw-r--r--deps/v8/src/hydrogen.cc4691
-rw-r--r--deps/v8/src/hydrogen.h467
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h45
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc580
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h150
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc292
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc3510
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h344
-rw-r--r--deps/v8/src/ia32/codegen-ia32-inl.h46
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc10115
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h723
-rw-r--r--deps/v8/src/ia32/cpu-ia32.cc11
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc17
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc68
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc157
-rw-r--r--deps/v8/src/ia32/frames-ia32.h10
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc1138
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc776
-rw-r--r--deps/v8/src/ia32/jump-target-ia32.cc437
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc1639
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h38
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc33
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc615
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h550
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc540
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h172
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc66
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h5
-rw-r--r--deps/v8/src/ia32/register-allocator-ia32-inl.h82
-rw-r--r--deps/v8/src/ia32/register-allocator-ia32.cc157
-rw-r--r--deps/v8/src/ia32/simulator-ia32.h12
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc1439
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc1360
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.h646
-rw-r--r--deps/v8/src/ic-inl.h5
-rw-r--r--deps/v8/src/ic.cc1383
-rw-r--r--deps/v8/src/ic.h365
-rw-r--r--deps/v8/src/interpreter-irregexp.cc46
-rw-r--r--deps/v8/src/interpreter-irregexp.h3
-rw-r--r--deps/v8/src/isolate-inl.h (renamed from deps/v8/src/frame-element.cc)22
-rw-r--r--deps/v8/src/isolate.cc1871
-rw-r--r--deps/v8/src/isolate.h1363
-rw-r--r--deps/v8/src/json-parser.h598
-rw-r--r--deps/v8/src/json.js69
-rw-r--r--deps/v8/src/jsregexp.cc408
-rw-r--r--deps/v8/src/jsregexp.h24
-rw-r--r--deps/v8/src/jump-target-heavy-inl.h51
-rw-r--r--deps/v8/src/jump-target-heavy.cc430
-rw-r--r--deps/v8/src/jump-target-heavy.h244
-rw-r--r--deps/v8/src/jump-target-light-inl.h56
-rw-r--r--deps/v8/src/jump-target-light.cc111
-rw-r--r--deps/v8/src/jump-target-light.h193
-rw-r--r--deps/v8/src/jump-target.h90
-rw-r--r--deps/v8/src/list-inl.h12
-rw-r--r--deps/v8/src/list.h14
-rw-r--r--deps/v8/src/lithium-allocator-inl.h58
-rw-r--r--deps/v8/src/lithium-allocator.cc139
-rw-r--r--deps/v8/src/lithium-allocator.h50
-rw-r--r--deps/v8/src/lithium.cc27
-rw-r--r--deps/v8/src/lithium.h65
-rw-r--r--deps/v8/src/liveedit.cc395
-rw-r--r--deps/v8/src/liveedit.h17
-rw-r--r--deps/v8/src/liveobjectlist.cc141
-rw-r--r--deps/v8/src/liveobjectlist.h18
-rw-r--r--deps/v8/src/log-utils.cc184
-rw-r--r--deps/v8/src/log-utils.h73
-rw-r--r--deps/v8/src/log.cc993
-rw-r--r--deps/v8/src/log.h402
-rw-r--r--deps/v8/src/macros.py16
-rw-r--r--deps/v8/src/mark-compact.cc1199
-rw-r--r--deps/v8/src/mark-compact.h233
-rw-r--r--deps/v8/src/messages.cc61
-rw-r--r--deps/v8/src/messages.h7
-rw-r--r--deps/v8/src/messages.js85
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h183
-rw-r--r--deps/v8/src/mips/assembler-mips.cc1416
-rw-r--r--deps/v8/src/mips/assembler-mips.h855
-rw-r--r--deps/v8/src/mips/builtins-mips.cc1543
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc6896
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h668
-rw-r--r--deps/v8/src/mips/codegen-mips.cc1401
-rw-r--r--deps/v8/src/mips/codegen-mips.h383
-rw-r--r--deps/v8/src/mips/constants-mips.cc76
-rw-r--r--deps/v8/src/mips/constants-mips.h360
-rw-r--r--deps/v8/src/mips/cpu-mips.cc33
-rw-r--r--deps/v8/src/mips/debug-mips.cc207
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc (renamed from deps/v8/src/arm/register-allocator-arm.cc)60
-rw-r--r--deps/v8/src/mips/disasm-mips.cc609
-rw-r--r--deps/v8/src/mips/frames-mips.cc53
-rw-r--r--deps/v8/src/mips/frames-mips.h83
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc4182
-rw-r--r--deps/v8/src/mips/ic-mips.cc1654
-rw-r--r--deps/v8/src/mips/jump-target-mips.cc175
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h (renamed from deps/v8/src/arm/register-allocator-arm.h)39
-rw-r--r--deps/v8/src/mips/lithium-mips.h307
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc3814
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h1112
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc1251
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.h252
-rw-r--r--deps/v8/src/mips/register-allocator-mips-inl.h137
-rw-r--r--deps/v8/src/mips/register-allocator-mips.cc63
-rw-r--r--deps/v8/src/mips/simulator-mips.cc1796
-rw-r--r--deps/v8/src/mips/simulator-mips.h210
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc4203
-rw-r--r--deps/v8/src/mips/virtual-frame-mips.cc319
-rw-r--r--deps/v8/src/mips/virtual-frame-mips.h548
-rw-r--r--deps/v8/src/mirror-debugger.js18
-rw-r--r--deps/v8/src/misc-intrinsics.h (renamed from deps/v8/src/jump-target.cc)84
-rw-r--r--deps/v8/src/mksnapshot.cc203
-rw-r--r--deps/v8/src/natives.h10
-rw-r--r--deps/v8/src/objects-debug.cc82
-rw-r--r--deps/v8/src/objects-inl.h1063
-rw-r--r--deps/v8/src/objects-printer.cc72
-rw-r--r--deps/v8/src/objects-visiting.cc17
-rw-r--r--deps/v8/src/objects-visiting.h57
-rw-r--r--deps/v8/src/objects.cc3867
-rw-r--r--deps/v8/src/objects.h1281
-rw-r--r--deps/v8/src/parser.cc1315
-rw-r--r--deps/v8/src/parser.h130
-rw-r--r--deps/v8/src/platform-cygwin.cc323
-rw-r--r--deps/v8/src/platform-freebsd.cc340
-rw-r--r--deps/v8/src/platform-linux.cc461
-rw-r--r--deps/v8/src/platform-macos.cc341
-rw-r--r--deps/v8/src/platform-nullos.cc51
-rw-r--r--deps/v8/src/platform-openbsd.cc371
-rw-r--r--deps/v8/src/platform-posix.cc47
-rw-r--r--deps/v8/src/platform-solaris.cc76
-rw-r--r--deps/v8/src/platform-tls-mac.h (renamed from deps/v8/src/mips/codegen-mips-inl.h)52
-rw-r--r--deps/v8/src/platform-tls-win32.h (renamed from deps/v8/src/codegen-inl.h)56
-rw-r--r--deps/v8/src/platform-tls.h (renamed from deps/v8/src/virtual-frame-inl.h)29
-rw-r--r--deps/v8/src/platform-win32.cc357
-rw-r--r--deps/v8/src/platform.h129
-rw-r--r--deps/v8/src/preparse-data-format.h (renamed from deps/v8/src/x64/register-allocator-x64.h)35
-rw-r--r--deps/v8/src/preparse-data.cc12
-rw-r--r--deps/v8/src/preparse-data.h46
-rw-r--r--deps/v8/src/preparser-api.cc24
-rw-r--r--deps/v8/src/preparser.cc564
-rw-r--r--deps/v8/src/preparser.h274
-rw-r--r--deps/v8/src/prettyprinter.cc220
-rw-r--r--deps/v8/src/prettyprinter.h8
-rw-r--r--deps/v8/src/profile-generator.cc1014
-rw-r--r--deps/v8/src/profile-generator.h222
-rw-r--r--deps/v8/src/property.cc9
-rw-r--r--deps/v8/src/property.h56
-rw-r--r--deps/v8/src/proxy.js137
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.cc5
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.h2
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc2
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.h2
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc54
-rw-r--r--deps/v8/src/regexp-macro-assembler.h26
-rw-r--r--deps/v8/src/regexp-stack.cc28
-rw-r--r--deps/v8/src/regexp-stack.h66
-rw-r--r--deps/v8/src/regexp.js6
-rw-r--r--deps/v8/src/register-allocator-inl.h141
-rw-r--r--deps/v8/src/register-allocator.cc104
-rw-r--r--deps/v8/src/register-allocator.h320
-rw-r--r--deps/v8/src/rewriter.cc804
-rw-r--r--deps/v8/src/rewriter.h11
-rw-r--r--deps/v8/src/runtime-profiler.cc262
-rw-r--r--deps/v8/src/runtime-profiler.h156
-rw-r--r--deps/v8/src/runtime.cc4652
-rw-r--r--deps/v8/src/runtime.h103
-rw-r--r--deps/v8/src/runtime.js52
-rw-r--r--deps/v8/src/safepoint-table.cc3
-rw-r--r--deps/v8/src/safepoint-table.h13
-rw-r--r--deps/v8/src/scanner-base.cc179
-rw-r--r--deps/v8/src/scanner-base.h123
-rwxr-xr-xdeps/v8/src/scanner.cc261
-rw-r--r--deps/v8/src/scanner.h65
-rw-r--r--deps/v8/src/scopeinfo.cc52
-rw-r--r--deps/v8/src/scopeinfo.h51
-rw-r--r--deps/v8/src/scopes.cc284
-rw-r--r--deps/v8/src/scopes.h117
-rw-r--r--deps/v8/src/serialize.cc354
-rw-r--r--deps/v8/src/serialize.h19
-rw-r--r--deps/v8/src/small-pointer-list.h163
-rw-r--r--deps/v8/src/snapshot-common.cc7
-rw-r--r--deps/v8/src/snapshot-empty.cc4
-rw-r--r--deps/v8/src/snapshot.h20
-rw-r--r--deps/v8/src/spaces-inl.h55
-rw-r--r--deps/v8/src/spaces.cc637
-rw-r--r--deps/v8/src/spaces.h343
-rw-r--r--deps/v8/src/splay-tree.h2
-rw-r--r--deps/v8/src/string-search.cc7
-rw-r--r--deps/v8/src/string-search.h41
-rw-r--r--deps/v8/src/string-stream.cc56
-rw-r--r--deps/v8/src/string.js93
-rw-r--r--deps/v8/src/stub-cache.cc785
-rw-r--r--deps/v8/src/stub-cache.h420
-rw-r--r--deps/v8/src/token.cc6
-rw-r--r--deps/v8/src/token.h10
-rw-r--r--deps/v8/src/top.cc1153
-rw-r--r--deps/v8/src/top.h608
-rw-r--r--deps/v8/src/type-info.cc436
-rw-r--r--deps/v8/src/type-info.h97
-rw-r--r--deps/v8/src/unbound-queue.h2
-rw-r--r--deps/v8/src/unicode.cc2
-rw-r--r--deps/v8/src/unicode.h2
-rw-r--r--deps/v8/src/uri.js10
-rw-r--r--deps/v8/src/utils-inl.h (renamed from deps/v8/src/jump-target-inl.h)26
-rw-r--r--deps/v8/src/utils.h58
-rw-r--r--deps/v8/src/v8-counters.cc29
-rw-r--r--deps/v8/src/v8-counters.h57
-rw-r--r--deps/v8/src/v8.cc185
-rw-r--r--deps/v8/src/v8.h23
-rw-r--r--deps/v8/src/v8dll-main.cc11
-rw-r--r--deps/v8/src/v8globals.h64
-rw-r--r--deps/v8/src/v8memory.h (renamed from deps/v8/src/memory.h)0
-rw-r--r--deps/v8/src/v8natives.js238
-rw-r--r--deps/v8/src/v8threads.cc301
-rw-r--r--deps/v8/src/v8threads.h93
-rw-r--r--deps/v8/src/v8utils.h45
-rw-r--r--deps/v8/src/variables.cc36
-rw-r--r--deps/v8/src/variables.h58
-rw-r--r--deps/v8/src/version.cc31
-rw-r--r--deps/v8/src/version.h4
-rw-r--r--deps/v8/src/virtual-frame-heavy-inl.h190
-rw-r--r--deps/v8/src/virtual-frame-heavy.cc312
-rw-r--r--deps/v8/src/virtual-frame-light-inl.h170
-rw-r--r--deps/v8/src/virtual-frame-light.cc52
-rw-r--r--deps/v8/src/virtual-frame.cc49
-rw-r--r--deps/v8/src/virtual-frame.h59
-rw-r--r--deps/v8/src/vm-state-inl.h38
-rw-r--r--deps/v8/src/vm-state.h13
-rw-r--r--deps/v8/src/win32-headers.h1
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h31
-rw-r--r--deps/v8/src/x64/assembler-x64.cc525
-rw-r--r--deps/v8/src/x64/assembler-x64.h142
-rw-r--r--deps/v8/src/x64/builtins-x64.cc187
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc3128
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h328
-rw-r--r--deps/v8/src/x64/codegen-x64.cc8695
-rw-r--r--deps/v8/src/x64/codegen-x64.h674
-rw-r--r--deps/v8/src/x64/cpu-x64.cc10
-rw-r--r--deps/v8/src/x64/debug-x64.cc15
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc75
-rw-r--r--deps/v8/src/x64/disasm-x64.cc148
-rw-r--r--deps/v8/src/x64/frames-x64.h2
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc1263
-rw-r--r--deps/v8/src/x64/ic-x64.cc604
-rw-r--r--deps/v8/src/x64/jump-target-x64.cc437
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc1611
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h63
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc8
-rw-r--r--deps/v8/src/x64/lithium-x64.cc576
-rw-r--r--deps/v8/src/x64/lithium-x64.h541
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc1575
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h1035
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc88
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h12
-rw-r--r--deps/v8/src/x64/register-allocator-x64-inl.h87
-rw-r--r--deps/v8/src/x64/register-allocator-x64.cc91
-rw-r--r--deps/v8/src/x64/simulator-x64.h11
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc1137
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc1292
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h593
-rw-r--r--deps/v8/src/zone-inl.h63
-rw-r--r--deps/v8/src/zone.cc83
-rw-r--r--deps/v8/src/zone.h75
415 files changed, 94673 insertions, 88144 deletions
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 598e4af56..5b2f272a5 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -68,7 +68,6 @@ SOURCES = {
execution.cc
factory.cc
flags.cc
- frame-element.cc
frames.cc
full-codegen.cc
func-name-inferrer.cc
@@ -85,8 +84,8 @@ SOURCES = {
ic.cc
inspector.cc
interpreter-irregexp.cc
+ isolate.cc
jsregexp.cc
- jump-target.cc
lithium-allocator.cc
lithium.cc
liveedit.cc
@@ -106,7 +105,6 @@ SOURCES = {
regexp-macro-assembler-irregexp.cc
regexp-macro-assembler.cc
regexp-stack.cc
- register-allocator.cc
rewriter.cc
runtime.cc
runtime-profiler.cc
@@ -123,7 +121,6 @@ SOURCES = {
strtod.cc
stub-cache.cc
token.cc
- top.cc
type-info.cc
unicode.cc
utils.cc
@@ -132,14 +129,11 @@ SOURCES = {
v8threads.cc
variables.cc
version.cc
- virtual-frame.cc
zone.cc
extensions/gc-extension.cc
extensions/externalize-string-extension.cc
"""),
'arch:arm': Split("""
- jump-target-light.cc
- virtual-frame-light.cc
arm/builtins-arm.cc
arm/code-stubs-arm.cc
arm/codegen-arm.cc
@@ -151,37 +145,32 @@ SOURCES = {
arm/frames-arm.cc
arm/full-codegen-arm.cc
arm/ic-arm.cc
- arm/jump-target-arm.cc
arm/lithium-arm.cc
arm/lithium-codegen-arm.cc
arm/lithium-gap-resolver-arm.cc
arm/macro-assembler-arm.cc
arm/regexp-macro-assembler-arm.cc
- arm/register-allocator-arm.cc
arm/stub-cache-arm.cc
- arm/virtual-frame-arm.cc
arm/assembler-arm.cc
"""),
'arch:mips': Split("""
mips/assembler-mips.cc
mips/builtins-mips.cc
+ mips/code-stubs-mips.cc
mips/codegen-mips.cc
mips/constants-mips.cc
mips/cpu-mips.cc
mips/debug-mips.cc
+ mips/deoptimizer-mips.cc
mips/disasm-mips.cc
- mips/full-codegen-mips.cc
mips/frames-mips.cc
+ mips/full-codegen-mips.cc
mips/ic-mips.cc
- mips/jump-target-mips.cc
mips/macro-assembler-mips.cc
- mips/register-allocator-mips.cc
+ mips/regexp-macro-assembler-mips.cc
mips/stub-cache-mips.cc
- mips/virtual-frame-mips.cc
"""),
'arch:ia32': Split("""
- jump-target-heavy.cc
- virtual-frame-heavy.cc
ia32/assembler-ia32.cc
ia32/builtins-ia32.cc
ia32/code-stubs-ia32.cc
@@ -193,19 +182,14 @@ SOURCES = {
ia32/frames-ia32.cc
ia32/full-codegen-ia32.cc
ia32/ic-ia32.cc
- ia32/jump-target-ia32.cc
ia32/lithium-codegen-ia32.cc
ia32/lithium-gap-resolver-ia32.cc
ia32/lithium-ia32.cc
ia32/macro-assembler-ia32.cc
ia32/regexp-macro-assembler-ia32.cc
- ia32/register-allocator-ia32.cc
ia32/stub-cache-ia32.cc
- ia32/virtual-frame-ia32.cc
"""),
'arch:x64': Split("""
- jump-target-heavy.cc
- virtual-frame-heavy.cc
x64/assembler-x64.cc
x64/builtins-x64.cc
x64/code-stubs-x64.cc
@@ -217,15 +201,12 @@ SOURCES = {
x64/frames-x64.cc
x64/full-codegen-x64.cc
x64/ic-x64.cc
- x64/jump-target-x64.cc
x64/lithium-codegen-x64.cc
x64/lithium-gap-resolver-x64.cc
x64/lithium-x64.cc
x64/macro-assembler-x64.cc
x64/regexp-macro-assembler-x64.cc
- x64/register-allocator-x64.cc
x64/stub-cache-x64.cc
- x64/virtual-frame-x64.cc
"""),
'simulator:arm': ['arm/simulator-arm.cc'],
'simulator:mips': ['mips/simulator-mips.cc'],
@@ -245,6 +226,20 @@ SOURCES = {
}
+PREPARSER_SOURCES = {
+ 'all': Split("""
+ allocation.cc
+ hashmap.cc
+ preparse-data.cc
+ preparser.cc
+ preparser-api.cc
+ scanner-base.cc
+ token.cc
+ unicode.cc
+ """)
+}
+
+
D8_FILES = {
'all': [
'd8.cc', 'd8-debug.cc'
@@ -300,6 +295,11 @@ debug-debugger.js
'''.split()
+EXPERIMENTAL_LIBRARY_FILES = '''
+proxy.js
+'''.split()
+
+
def Abort(message):
print message
sys.exit(1)
@@ -310,13 +310,22 @@ def ConfigureObjectFiles():
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
- env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
+ if 'ENABLE_LOGGING_AND_PROFILING' in env['CPPDEFINES']:
+ env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
+ else:
+ env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET')
+
+ def BuildJS2CEnv(type):
+ js2c_env = { 'TYPE': type, 'COMPRESSION': 'off' }
+ if 'COMPRESS_STARTUP_DATA_BZ2' in env['CPPDEFINES']:
+ js2c_env['COMPRESSION'] = 'bz2'
+ return js2c_env
# Build the standard platform-independent source files.
source_files = context.GetRelevantSources(SOURCES)
d8_files = context.GetRelevantSources(D8_FILES)
- d8_js = env.JS2C('d8-js.cc', 'd8.js', TYPE='D8')
+ d8_js = env.JS2C('d8-js.cc', 'd8.js', **{'TYPE': 'D8', 'COMPRESSION': 'off'})
d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.'])
d8_objs = [context.ConfigureObject(env, [d8_files]), d8_js_obj]
@@ -324,12 +333,25 @@ def ConfigureObjectFiles():
# compile it.
library_files = [s for s in LIBRARY_FILES]
library_files.append('macros.py')
- libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files, TYPE='CORE')
+ libraries_src = env.JS2C(
+ ['libraries.cc'], library_files, **BuildJS2CEnv('CORE'))
libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
+ # Combine the experimental JavaScript library files into a C++ file
+ # and compile it.
+ experimental_library_files = [ s for s in EXPERIMENTAL_LIBRARY_FILES ]
+ experimental_library_files.append('macros.py')
+ experimental_libraries_src = env.JS2C(['experimental-libraries.cc'],
+ experimental_library_files,
+ **BuildJS2CEnv('EXPERIMENTAL'))
+ experimental_libraries_obj = context.ConfigureObject(env, experimental_libraries_src, CPPPATH=['.'])
+
source_objs = context.ConfigureObject(env, source_files)
non_snapshot_files = [source_objs]
+ preparser_source_files = context.GetRelevantSources(PREPARSER_SOURCES)
+ preparser_objs = context.ConfigureObject(env, preparser_source_files)
+
# Create snapshot if necessary. For cross compilation you should either
# do without snapshots and take the performance hit or you should build a
# host VM with the simulator=arm and snapshot=on options and then take the
@@ -340,7 +362,7 @@ def ConfigureObjectFiles():
mksnapshot_env = env.Copy()
mksnapshot_env.Replace(**context.flags['mksnapshot'])
mksnapshot_src = 'mksnapshot.cc'
- mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
+ mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, experimental_libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
if context.use_snapshot:
if context.build_snapshot:
snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
@@ -349,9 +371,9 @@ def ConfigureObjectFiles():
snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
else:
snapshot_obj = empty_snapshot_obj
- library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
- return (library_objs, d8_objs, [mksnapshot])
+ library_objs = [non_snapshot_files, libraries_obj, experimental_libraries_obj, snapshot_obj]
+ return (library_objs, d8_objs, [mksnapshot], preparser_objs)
-(library_objs, d8_objs, mksnapshot) = ConfigureObjectFiles()
-Return('library_objs d8_objs mksnapshot')
+(library_objs, d8_objs, mksnapshot, preparser_objs) = ConfigureObjectFiles()
+Return('library_objs d8_objs mksnapshot preparser_objs')
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 8cbdc09ed..806c679f4 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -32,9 +32,9 @@
#include "deoptimizer.h"
#include "execution.h"
#include "factory.h"
+#include "list-inl.h"
#include "safepoint-table.h"
#include "scopeinfo.h"
-#include "top.h"
namespace v8 {
namespace internal {
@@ -43,8 +43,9 @@ namespace internal {
template <class C>
static C* FindInPrototypeChain(Object* obj, bool* found_it) {
ASSERT(!*found_it);
+ Heap* heap = HEAP;
while (!Is<C>(obj)) {
- if (obj == Heap::null_value()) return NULL;
+ if (obj == heap->null_value()) return NULL;
obj = obj->GetPrototype();
}
*found_it = true;
@@ -90,24 +91,34 @@ MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
Object* Accessors::FlattenNumber(Object* value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value);
- ASSERT(
- Top::context()->global_context()->number_function()->has_initial_map());
- Map* number_map =
- Top::context()->global_context()->number_function()->initial_map();
+ ASSERT(Isolate::Current()->context()->global_context()->number_function()->
+ has_initial_map());
+ Map* number_map = Isolate::Current()->context()->global_context()->
+ number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value();
return value;
}
MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
+ Isolate* isolate = object->GetIsolate();
+
+ // This means one of the object's prototypes is a JSArray and the
+ // object does not have a 'length' property. Calling SetProperty
+ // causes an infinite loop.
+ if (!object->IsJSArray()) {
+ return object->SetLocalPropertyIgnoreAttributes(
+ isolate->heap()->length_symbol(), value, NONE);
+ }
+
value = FlattenNumber(value);
// Need to call methods that may trigger GC.
- HandleScope scope;
+ HandleScope scope(isolate);
// Protect raw pointers.
- Handle<JSObject> object_handle(object);
- Handle<Object> value_handle(value);
+ Handle<JSObject> object_handle(object, isolate);
+ Handle<Object> value_handle(value, isolate);
bool has_exception;
Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
@@ -115,23 +126,12 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
if (has_exception) return Failure::Exception();
- // Restore raw pointers,
- object = *object_handle;
- value = *value_handle;
-
if (uint32_v->Number() == number_v->Number()) {
- if (object->IsJSArray()) {
- return JSArray::cast(object)->SetElementsLength(*uint32_v);
- } else {
- // This means one of the object's prototypes is a JSArray and
- // the object does not have a 'length' property.
- // Calling SetProperty causes an infinite loop.
- return object->SetLocalPropertyIgnoreAttributes(Heap::length_symbol(),
- value, NONE);
- }
+ return Handle<JSArray>::cast(object_handle)->SetElementsLength(*uint32_v);
}
- return Top::Throw(*Factory::NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
+ return isolate->Throw(
+ *isolate->factory()->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
}
@@ -314,15 +314,18 @@ const AccessorDescriptor Accessors::ScriptCompilationType = {
MaybeObject* Accessors::ScriptGetLineEnds(Object* object, void*) {
- HandleScope scope;
- Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
+ JSValue* wrapper = JSValue::cast(object);
+ Isolate* isolate = wrapper->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Script> script(Script::cast(wrapper->value()), isolate);
InitScriptLineEnds(script);
ASSERT(script->line_ends()->IsFixedArray());
Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
// We do not want anyone to modify this array from JS.
- ASSERT(*line_ends == Heap::empty_fixed_array() ||
- line_ends->map() == Heap::fixed_cow_array_map());
- Handle<JSArray> js_array = Factory::NewJSArrayWithElements(line_ends);
+ ASSERT(*line_ends == isolate->heap()->empty_fixed_array() ||
+ line_ends->map() == isolate->heap()->fixed_cow_array_map());
+ Handle<JSArray> js_array =
+ isolate->factory()->NewJSArrayWithElements(line_ends);
return *js_array;
}
@@ -368,7 +371,7 @@ MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
return *GetScriptWrapper(eval_from_script);
}
}
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
@@ -391,7 +394,7 @@ MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
// If this is not a script compiled through eval there is no eval position.
int compilation_type = Smi::cast(script->compilation_type())->value();
if (compilation_type != Script::COMPILATION_TYPE_EVAL) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
// Get the function from where eval was called and find the source position
@@ -443,9 +446,10 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
+ Heap* heap = Isolate::Current()->heap();
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return Heap::undefined_value();
+ if (!found_it) return heap->undefined_value();
while (!function->should_have_prototype()) {
found_it = false;
function = FindInPrototypeChain<JSFunction>(object->GetPrototype(),
@@ -456,7 +460,7 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
if (!function->has_prototype()) {
Object* prototype;
- { MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function);
+ { MaybeObject* maybe_prototype = heap->AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
}
Object* result;
@@ -471,12 +475,13 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
Object* value,
void*) {
+ Heap* heap = object->GetHeap();
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return Heap::undefined_value();
+ if (!found_it) return heap->undefined_value();
if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
- return object->SetLocalPropertyIgnoreAttributes(Heap::prototype_symbol(),
+ return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(),
value,
NONE);
}
@@ -545,7 +550,7 @@ const AccessorDescriptor Accessors::FunctionLength = {
MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
bool found_it = false;
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return Heap::undefined_value();
+ if (!found_it) return HEAP->undefined_value();
return holder->shared()->name();
}
@@ -561,183 +566,20 @@ const AccessorDescriptor Accessors::FunctionName = {
// Accessors::FunctionArguments
//
-static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
- if (slot_index >= 0) {
- const int offset = JavaScriptFrameConstants::kLocal0Offset;
- return frame->fp() + offset - (slot_index * kPointerSize);
- } else {
- const int offset = JavaScriptFrameConstants::kSavedRegistersOffset;
- return frame->fp() + offset - ((slot_index + 1) * kPointerSize);
- }
-}
-
-
-// We can't intermix stack decoding and allocations because
-// deoptimization infrastracture is not GC safe.
-// Thus we build a temporary structure in malloced space.
-class SlotRef BASE_EMBEDDED {
- public:
- enum SlotRepresentation {
- UNKNOWN,
- TAGGED,
- INT32,
- DOUBLE,
- LITERAL
- };
-
- SlotRef()
- : addr_(NULL), representation_(UNKNOWN) { }
-
- SlotRef(Address addr, SlotRepresentation representation)
- : addr_(addr), representation_(representation) { }
-
- explicit SlotRef(Object* literal)
- : literal_(literal), representation_(LITERAL) { }
-
- Handle<Object> GetValue() {
- switch (representation_) {
- case TAGGED:
- return Handle<Object>(Memory::Object_at(addr_));
-
- case INT32: {
- int value = Memory::int32_at(addr_);
- if (Smi::IsValid(value)) {
- return Handle<Object>(Smi::FromInt(value));
- } else {
- return Factory::NewNumberFromInt(value);
- }
- }
-
- case DOUBLE: {
- double value = Memory::double_at(addr_);
- return Factory::NewNumber(value);
- }
-
- case LITERAL:
- return literal_;
-
- default:
- UNREACHABLE();
- return Handle<Object>::null();
- }
- }
-
- private:
- Address addr_;
- Handle<Object> literal_;
- SlotRepresentation representation_;
-};
-
-
-static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame) {
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::FRAME:
- // Peeled off before getting here.
- break;
-
- case Translation::ARGUMENTS_OBJECT:
- // This can be only emitted for local slots not for argument slots.
- break;
-
- case Translation::REGISTER:
- case Translation::INT32_REGISTER:
- case Translation::DOUBLE_REGISTER:
- case Translation::DUPLICATE:
- // We are at safepoint which corresponds to call. All registers are
- // saved by caller so there would be no live registers at this
- // point. Thus these translation commands should not be used.
- break;
-
- case Translation::STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::TAGGED);
- }
-
- case Translation::INT32_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::INT32);
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::DOUBLE);
- }
-
- case Translation::LITERAL: {
- int literal_index = iterator->Next();
- return SlotRef(data->LiteralArray()->get(literal_index));
- }
- }
-
- UNREACHABLE();
- return SlotRef();
-}
-
-
-
-
-
-static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
- int inlined_frame_index,
- Vector<SlotRef>* args_slots) {
- AssertNoAllocation no_gc;
- int deopt_index = AstNode::kNoNumber;
- DeoptimizationInputData* data =
- static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
- TranslationIterator it(data->TranslationByteArray(),
- data->TranslationIndex(deopt_index)->value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::BEGIN);
- int frame_count = it.Next();
- USE(frame_count);
- ASSERT(frame_count > inlined_frame_index);
- int frames_to_skip = inlined_frame_index;
- while (true) {
- opcode = static_cast<Translation::Opcode>(it.Next());
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- if (opcode == Translation::FRAME) {
- if (frames_to_skip == 0) {
- // We reached the frame corresponding to the inlined function
- // in question. Process the translation commands for the
- // arguments.
- //
- // Skip the translation command for the receiver.
- it.Skip(Translation::NumberOfOperandsFor(
- static_cast<Translation::Opcode>(it.Next())));
- // Compute slots for arguments.
- for (int i = 0; i < args_slots->length(); ++i) {
- (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
- }
- return;
- }
- frames_to_skip--;
- }
- }
-
- UNREACHABLE();
-}
-
static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
JavaScriptFrame* frame,
Handle<JSFunction> inlined_function,
int inlined_frame_index) {
+ Factory* factory = Isolate::Current()->factory();
int args_count = inlined_function->shared()->formal_parameter_count();
ScopedVector<SlotRef> args_slots(args_count);
- ComputeSlotMappingForArguments(frame, inlined_frame_index, &args_slots);
+ SlotRef::ComputeSlotMappingForArguments(frame,
+ inlined_frame_index,
+ &args_slots);
Handle<JSObject> arguments =
- Factory::NewArgumentsObject(inlined_function, args_count);
- Handle<FixedArray> array = Factory::NewFixedArray(args_count);
+ factory->NewArgumentsObject(inlined_function, args_count);
+ Handle<FixedArray> array = factory->NewFixedArray(args_count);
for (int i = 0; i < args_count; ++i) {
Handle<Object> value = args_slots[i].GetValue();
array->set(i, *value);
@@ -750,15 +592,16 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
- HandleScope scope;
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
bool found_it = false;
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return Heap::undefined_value();
- Handle<JSFunction> function(holder);
+ if (!found_it) return isolate->heap()->undefined_value();
+ Handle<JSFunction> function(holder, isolate);
// Find the top invocation of the function by traversing frames.
List<JSFunction*> functions(2);
- for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+ for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
frame->GetFunctions(&functions);
for (int i = functions.length() - 1; i >= 0; i--) {
@@ -776,9 +619,9 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
if (!frame->is_optimized()) {
// If there is an arguments variable in the stack, we return that.
Handle<SerializedScopeInfo> info(function->shared()->scope_info());
- int index = info->StackSlotIndex(Heap::arguments_symbol());
+ int index = info->StackSlotIndex(isolate->heap()->arguments_symbol());
if (index >= 0) {
- Handle<Object> arguments(frame->GetExpression(index));
+ Handle<Object> arguments(frame->GetExpression(index), isolate);
if (!arguments->IsArgumentsMarker()) return *arguments;
}
}
@@ -792,15 +635,13 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
// Get the number of arguments and construct an arguments object
// mirror for the right frame.
const int length = frame->ComputeParametersCount();
- Handle<JSObject> arguments = Factory::NewArgumentsObject(function,
- length);
- Handle<FixedArray> array = Factory::NewFixedArray(length);
+ Handle<JSObject> arguments = isolate->factory()->NewArgumentsObject(
+ function, length);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
// Copy the parameters to the arguments object.
ASSERT(array->length() == length);
- for (int i = 0; i < length; i++) {
- array->set(i, frame->GetParameter(i));
- }
+ for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
arguments->set_elements(*array);
// Return the freshly allocated arguments object.
@@ -810,7 +651,7 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
}
// No frame corresponding to the given function found. Return null.
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
@@ -826,16 +667,30 @@ const AccessorDescriptor Accessors::FunctionArguments = {
//
+static MaybeObject* CheckNonStrictCallerOrThrow(
+ Isolate* isolate,
+ JSFunction* caller) {
+ DisableAssertNoAllocation enable_allocation;
+ if (caller->shared()->strict_mode()) {
+ return isolate->Throw(
+ *isolate->factory()->NewTypeError("strict_caller",
+ HandleVector<Object>(NULL, 0)));
+ }
+ return caller;
+}
+
+
MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
- HandleScope scope;
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
AssertNoAllocation no_alloc;
bool found_it = false;
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return Heap::undefined_value();
- Handle<JSFunction> function(holder);
+ if (!found_it) return isolate->heap()->undefined_value();
+ Handle<JSFunction> function(holder, isolate);
List<JSFunction*> functions(2);
- for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+ for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
frame->GetFunctions(&functions);
for (int i = functions.length() - 1; i >= 0; i--) {
@@ -845,18 +700,18 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
// frames, e.g. frames for scripts not functions.
if (i > 0) {
ASSERT(!functions[i - 1]->shared()->is_toplevel());
- return functions[i - 1];
+ return CheckNonStrictCallerOrThrow(isolate, functions[i - 1]);
} else {
for (it.Advance(); !it.done(); it.Advance()) {
frame = it.frame();
functions.Rewind(0);
frame->GetFunctions(&functions);
if (!functions.last()->shared()->is_toplevel()) {
- return functions.last();
+ return CheckNonStrictCallerOrThrow(isolate, functions.last());
}
ASSERT(functions.length() == 1);
}
- if (it.done()) return Heap::null_value();
+ if (it.done()) return isolate->heap()->null_value();
break;
}
}
@@ -865,7 +720,7 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
}
// No frame corresponding to the given function found. Return null.
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 14ccc8fb8..385536d22 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -28,6 +28,8 @@
#ifndef V8_ACCESSORS_H_
#define V8_ACCESSORS_H_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x64/codegen-x64-inl.h b/deps/v8/src/allocation-inl.h
index 53caf9197..04a3fe667 100644
--- a/deps/v8/src/x64/codegen-x64-inl.h
+++ b/deps/v8/src/allocation-inl.h
@@ -25,22 +25,25 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_ALLOCATION_INL_H_
+#define V8_ALLOCATION_INL_H_
-#ifndef V8_X64_CODEGEN_X64_INL_H_
-#define V8_X64_CODEGEN_X64_INL_H_
+#include "allocation.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-// Platform-specific inline functions.
+void* PreallocatedStorage::New(size_t size) {
+ return Isolate::Current()->PreallocatedStorageNew(size);
+}
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-#undef __
+void PreallocatedStorage::Delete(void* p) {
+ return Isolate::Current()->PreallocatedStorageDelete(p);
+}
+
} } // namespace v8::internal
-#endif // V8_X64_CODEGEN_X64_INL_H_
+#endif // V8_ALLOCATION_INL_H_
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index d74c37cd7..119b087c1 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdlib.h>
-
#include "../include/v8stdint.h"
#include "globals.h"
#include "checks.h"
@@ -37,7 +35,6 @@ namespace v8 {
namespace internal {
void* Malloced::New(size_t size) {
- ASSERT(NativeAllocationChecker::allocation_allowed());
void* result = malloc(size);
if (result == NULL) {
v8::internal::FatalProcessOutOfMemory("Malloced operator new");
@@ -103,85 +100,6 @@ char* StrNDup(const char* str, int n) {
}
-int NativeAllocationChecker::allocation_disallowed_ = 0;
-
-
-PreallocatedStorage PreallocatedStorage::in_use_list_(0);
-PreallocatedStorage PreallocatedStorage::free_list_(0);
-bool PreallocatedStorage::preallocated_ = false;
-
-
-void PreallocatedStorage::Init(size_t size) {
- ASSERT(free_list_.next_ == &free_list_);
- ASSERT(free_list_.previous_ == &free_list_);
- PreallocatedStorage* free_chunk =
- reinterpret_cast<PreallocatedStorage*>(new char[size]);
- free_list_.next_ = free_list_.previous_ = free_chunk;
- free_chunk->next_ = free_chunk->previous_ = &free_list_;
- free_chunk->size_ = size - sizeof(PreallocatedStorage);
- preallocated_ = true;
-}
-
-
-void* PreallocatedStorage::New(size_t size) {
- if (!preallocated_) {
- return FreeStoreAllocationPolicy::New(size);
- }
- ASSERT(free_list_.next_ != &free_list_);
- ASSERT(free_list_.previous_ != &free_list_);
-
- size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
- // Search for exact fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ == size) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Search for first fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- PreallocatedStorage* left_over =
- reinterpret_cast<PreallocatedStorage*>(
- reinterpret_cast<char*>(storage + 1) + size);
- left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
- ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
- storage->size_);
- storage->size_ = size;
- left_over->LinkTo(&free_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Allocation failure.
- ASSERT(false);
- return NULL;
-}
-
-
-// We don't attempt to coalesce.
-void PreallocatedStorage::Delete(void* p) {
- if (p == NULL) {
- return;
- }
- if (!preallocated_) {
- FreeStoreAllocationPolicy::Delete(p);
- return;
- }
- PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
- ASSERT(storage->next_->previous_ == storage);
- ASSERT(storage->previous_->next_ == storage);
- storage->Unlink();
- storage->LinkTo(&free_list_);
-}
-
-
void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
next_ = other->next_;
other->next_->previous_ = this;
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 394366ea4..75aba35d8 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -39,38 +39,6 @@ namespace internal {
// processing.
void FatalProcessOutOfMemory(const char* message);
-// A class that controls whether allocation is allowed. This is for
-// the C++ heap only!
-class NativeAllocationChecker {
- public:
- typedef enum { ALLOW, DISALLOW } NativeAllocationAllowed;
- explicit inline NativeAllocationChecker(NativeAllocationAllowed allowed)
- : allowed_(allowed) {
-#ifdef DEBUG
- if (allowed == DISALLOW) {
- allocation_disallowed_++;
- }
-#endif
- }
- ~NativeAllocationChecker() {
-#ifdef DEBUG
- if (allowed_ == DISALLOW) {
- allocation_disallowed_--;
- }
-#endif
- ASSERT(allocation_disallowed_ >= 0);
- }
- static inline bool allocation_allowed() {
- return allocation_disallowed_ == 0;
- }
- private:
- // This static counter ensures that NativeAllocationCheckers can be nested.
- static int allocation_disallowed_;
- // This flag applies to this particular instance.
- NativeAllocationAllowed allowed_;
-};
-
-
// Superclass for classes managed with new & delete.
class Malloced {
public:
@@ -114,7 +82,6 @@ class AllStatic {
template <typename T>
static T* NewArray(int size) {
- ASSERT(NativeAllocationChecker::allocation_allowed());
T* result = new T[size];
if (result == NULL) Malloced::FatalProcessOutOfMemory();
return result;
@@ -146,27 +113,27 @@ class FreeStoreAllocationPolicy {
// Allocation policy for allocating in preallocated space.
// Used as an allocation policy for ScopeInfo when generating
// stack traces.
-class PreallocatedStorage : public AllStatic {
+class PreallocatedStorage {
public:
explicit PreallocatedStorage(size_t size);
size_t size() { return size_; }
- static void* New(size_t size);
- static void Delete(void* p);
- // Preallocate a set number of bytes.
- static void Init(size_t size);
+ // TODO(isolates): Get rid of these-- we'll have to change the allocator
+ // interface to include a pointer to an isolate to do this
+ // efficiently.
+ static inline void* New(size_t size);
+ static inline void Delete(void* p);
private:
size_t size_;
PreallocatedStorage* previous_;
PreallocatedStorage* next_;
- static bool preallocated_;
-
- static PreallocatedStorage in_use_list_;
- static PreallocatedStorage free_list_;
void LinkTo(PreallocatedStorage* other);
void Unlink();
+
+ friend class Isolate;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
};
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index b77e450cf..b33e14c01 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -38,13 +38,13 @@
#include "global-handles.h"
#include "heap-profiler.h"
#include "messages.h"
+#include "natives.h"
#include "parser.h"
#include "platform.h"
#include "profile-generator-inl.h"
#include "runtime-profiler.h"
#include "serialize.h"
#include "snapshot.h"
-#include "top.h"
#include "v8threads.h"
#include "version.h"
#include "vm-state-inl.h"
@@ -52,81 +52,81 @@
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
-#define LOG_API(expr) LOG(ApiEntryCall(expr))
+#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
#ifdef ENABLE_VMSTATE_TRACKING
-#define ENTER_V8 ASSERT(i::V8::IsRunning()); i::VMState __state__(i::OTHER)
-#define LEAVE_V8 i::VMState __state__(i::EXTERNAL)
+#define ENTER_V8(isolate) \
+ ASSERT((isolate)->IsInitialized()); \
+ i::VMState __state__((isolate), i::OTHER)
+#define LEAVE_V8(isolate) \
+ i::VMState __state__((isolate), i::EXTERNAL)
#else
-#define ENTER_V8 ((void) 0)
-#define LEAVE_V8 ((void) 0)
+#define ENTER_V8(isolate) ((void) 0)
+#define LEAVE_V8(isolate) ((void) 0)
#endif
namespace v8 {
-#define ON_BAILOUT(location, code) \
- if (IsDeadCheck(location) || v8::V8::IsExecutionTerminating()) { \
+#define ON_BAILOUT(isolate, location, code) \
+ if (IsDeadCheck(isolate, location) || \
+ IsExecutionTerminatingCheck(isolate)) { \
code; \
UNREACHABLE(); \
}
-#define EXCEPTION_PREAMBLE() \
- thread_local.IncrementCallDepth(); \
- ASSERT(!i::Top::external_caught_exception()); \
+#define EXCEPTION_PREAMBLE(isolate) \
+ (isolate)->handle_scope_implementer()->IncrementCallDepth(); \
+ ASSERT(!(isolate)->external_caught_exception()); \
bool has_pending_exception = false
-#define EXCEPTION_BAILOUT_CHECK(value) \
+#define EXCEPTION_BAILOUT_CHECK(isolate, value) \
do { \
- thread_local.DecrementCallDepth(); \
+ i::HandleScopeImplementer* handle_scope_implementer = \
+ (isolate)->handle_scope_implementer(); \
+ handle_scope_implementer->DecrementCallDepth(); \
if (has_pending_exception) { \
- if (thread_local.CallDepthIsZero() && i::Top::is_out_of_memory()) { \
- if (!thread_local.ignore_out_of_memory()) \
+ if (handle_scope_implementer->CallDepthIsZero() && \
+ (isolate)->is_out_of_memory()) { \
+ if (!handle_scope_implementer->ignore_out_of_memory()) \
i::V8::FatalProcessOutOfMemory(NULL); \
} \
- bool call_depth_is_zero = thread_local.CallDepthIsZero(); \
- i::Top::OptionalRescheduleException(call_depth_is_zero); \
+ bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero(); \
+ (isolate)->OptionalRescheduleException(call_depth_is_zero); \
return value; \
} \
} while (false)
-#define API_ENTRY_CHECK(msg) \
+#define API_ENTRY_CHECK(isolate, msg) \
do { \
if (v8::Locker::IsActive()) { \
- ApiCheck(i::ThreadManager::IsLockedByCurrentThread(), \
+ ApiCheck(isolate->thread_manager()->IsLockedByCurrentThread(), \
msg, \
"Entering the V8 API without proper locking in place"); \
} \
} while (false)
-// --- D a t a t h a t i s s p e c i f i c t o a t h r e a d ---
-
-
-static i::HandleScopeImplementer thread_local;
-
-
// --- E x c e p t i o n B e h a v i o r ---
-static FatalErrorCallback exception_behavior = NULL;
-
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
#ifdef ENABLE_VMSTATE_TRACKING
- i::VMState __state__(i::OTHER);
+ i::VMState __state__(i::Isolate::Current(), i::OTHER);
#endif
API_Fatal(location, message);
}
-static FatalErrorCallback& GetFatalErrorHandler() {
- if (exception_behavior == NULL) {
- exception_behavior = DefaultFatalErrorHandler;
+static FatalErrorCallback GetFatalErrorHandler() {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (isolate->exception_behavior() == NULL) {
+ isolate->set_exception_behavior(DefaultFatalErrorHandler);
}
- return exception_behavior;
+ return isolate->exception_behavior();
}
@@ -175,8 +175,8 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.pending_global_handle_count = &pending_global_handle_count;
int near_death_global_handle_count;
heap_stats.near_death_global_handle_count = &near_death_global_handle_count;
- int destroyed_global_handle_count;
- heap_stats.destroyed_global_handle_count = &destroyed_global_handle_count;
+ int free_global_handle_count;
+ heap_stats.free_global_handle_count = &free_global_handle_count;
intptr_t memory_allocator_size;
heap_stats.memory_allocator_size = &memory_allocator_size;
intptr_t memory_allocator_capacity;
@@ -189,11 +189,12 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.os_error = &os_error;
int end_marker;
heap_stats.end_marker = &end_marker;
- i::Heap::RecordStats(&heap_stats, take_snapshot);
+ i::Isolate* isolate = i::Isolate::Current();
+ isolate->heap()->RecordStats(&heap_stats, take_snapshot);
i::V8::SetFatalError();
FatalErrorCallback callback = GetFatalErrorHandler();
{
- LEAVE_V8;
+ LEAVE_V8(isolate);
callback(location, "Allocation failed - process out of memory");
}
// If the callback returns, we stop execution.
@@ -201,11 +202,6 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
}
-void V8::SetFatalErrorHandler(FatalErrorCallback that) {
- exception_behavior = that;
-}
-
-
bool Utils::ReportApiFailure(const char* location, const char* message) {
FatalErrorCallback callback = GetFatalErrorHandler();
callback(location, message);
@@ -252,12 +248,22 @@ static bool ReportEmptyHandle(const char* location) {
* advantage over ON_BAILOUT that it actually initializes the VM if this has not
* yet been done.
*/
-static inline bool IsDeadCheck(const char* location) {
- return !i::V8::IsRunning()
+static inline bool IsDeadCheck(i::Isolate* isolate, const char* location) {
+ return !isolate->IsInitialized()
&& i::V8::IsDead() ? ReportV8Dead(location) : false;
}
+static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
+ if (!isolate->IsInitialized()) return false;
+ if (isolate->has_scheduled_exception()) {
+ return isolate->scheduled_exception() ==
+ isolate->heap()->termination_exception();
+ }
+ return false;
+}
+
+
static inline bool EmptyCheck(const char* location, v8::Handle<v8::Data> obj) {
return obj.IsEmpty() ? ReportEmptyHandle(location) : false;
}
@@ -270,56 +276,185 @@ static inline bool EmptyCheck(const char* location, const v8::Data* obj) {
// --- S t a t i c s ---
-static i::StringInputBuffer write_input_buffer;
+static bool InitializeHelper() {
+ if (i::Snapshot::Initialize()) return true;
+ return i::V8::Initialize(NULL);
+}
-static inline bool EnsureInitialized(const char* location) {
- if (i::V8::IsRunning()) {
- return true;
+static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
+ const char* location) {
+ if (IsDeadCheck(isolate, location)) return false;
+ if (isolate != NULL) {
+ if (isolate->IsInitialized()) return true;
}
- if (IsDeadCheck(location)) {
- return false;
+ ASSERT(isolate == i::Isolate::Current());
+ return ApiCheck(InitializeHelper(), location, "Error initializing V8");
+}
+
+// Some initializing API functions are called early and may be
+// called on a thread different from static initializer thread.
+// If Isolate API is used, Isolate::Enter() will initialize TLS so
+// Isolate::Current() works. If it's a legacy case, then the thread
+// may not have TLS initialized yet. However, in initializing APIs it
+// may be too early to call EnsureInitialized() - some pre-init
+// parameters still have to be configured.
+static inline i::Isolate* EnterIsolateIfNeeded() {
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ if (isolate != NULL)
+ return isolate;
+
+ i::Isolate::EnterDefaultIsolate();
+ isolate = i::Isolate::Current();
+ return isolate;
+}
+
+
+StartupDataDecompressor::StartupDataDecompressor()
+ : raw_data(i::NewArray<char*>(V8::GetCompressedStartupDataCount())) {
+ for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
+ raw_data[i] = NULL;
}
- return ApiCheck(v8::V8::Initialize(), location, "Error initializing V8");
}
-ImplementationUtilities::HandleScopeData*
- ImplementationUtilities::CurrentHandleScope() {
- return &i::HandleScope::current_;
+StartupDataDecompressor::~StartupDataDecompressor() {
+ for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
+ i::DeleteArray(raw_data[i]);
+ }
+ i::DeleteArray(raw_data);
}
-#ifdef DEBUG
-void ImplementationUtilities::ZapHandleRange(i::Object** begin,
- i::Object** end) {
- i::HandleScope::ZapRange(begin, end);
+int StartupDataDecompressor::Decompress() {
+ int compressed_data_count = V8::GetCompressedStartupDataCount();
+ StartupData* compressed_data =
+ i::NewArray<StartupData>(compressed_data_count);
+ V8::GetCompressedStartupData(compressed_data);
+ for (int i = 0; i < compressed_data_count; ++i) {
+ char* decompressed = raw_data[i] =
+ i::NewArray<char>(compressed_data[i].raw_size);
+ if (compressed_data[i].compressed_size != 0) {
+ int result = DecompressData(decompressed,
+ &compressed_data[i].raw_size,
+ compressed_data[i].data,
+ compressed_data[i].compressed_size);
+ if (result != 0) return result;
+ } else {
+ ASSERT_EQ(0, compressed_data[i].raw_size);
+ }
+ compressed_data[i].data = decompressed;
+ }
+ V8::SetDecompressedStartupData(compressed_data);
+ return 0;
+}
+
+
+StartupData::CompressionAlgorithm V8::GetCompressedStartupDataAlgorithm() {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+ return StartupData::kBZip2;
+#else
+ return StartupData::kUncompressed;
+#endif
}
+
+
+enum CompressedStartupDataItems {
+ kSnapshot = 0,
+ kSnapshotContext,
+ kLibraries,
+ kExperimentalLibraries,
+ kCompressedStartupDataCount
+};
+
+int V8::GetCompressedStartupDataCount() {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+ return kCompressedStartupDataCount;
+#else
+ return 0;
+#endif
+}
+
+
+void V8::GetCompressedStartupData(StartupData* compressed_data) {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+ compressed_data[kSnapshot].data =
+ reinterpret_cast<const char*>(i::Snapshot::data());
+ compressed_data[kSnapshot].compressed_size = i::Snapshot::size();
+ compressed_data[kSnapshot].raw_size = i::Snapshot::raw_size();
+
+ compressed_data[kSnapshotContext].data =
+ reinterpret_cast<const char*>(i::Snapshot::context_data());
+ compressed_data[kSnapshotContext].compressed_size =
+ i::Snapshot::context_size();
+ compressed_data[kSnapshotContext].raw_size = i::Snapshot::context_raw_size();
+
+ i::Vector<const i::byte> libraries_source = i::Natives::GetScriptsSource();
+ compressed_data[kLibraries].data =
+ reinterpret_cast<const char*>(libraries_source.start());
+ compressed_data[kLibraries].compressed_size = libraries_source.length();
+ compressed_data[kLibraries].raw_size = i::Natives::GetRawScriptsSize();
+
+ i::Vector<const i::byte> exp_libraries_source =
+ i::ExperimentalNatives::GetScriptsSource();
+ compressed_data[kExperimentalLibraries].data =
+ reinterpret_cast<const char*>(exp_libraries_source.start());
+ compressed_data[kExperimentalLibraries].compressed_size =
+ exp_libraries_source.length();
+ compressed_data[kExperimentalLibraries].raw_size =
+ i::ExperimentalNatives::GetRawScriptsSize();
#endif
+}
-v8::Handle<v8::Primitive> ImplementationUtilities::Undefined() {
- if (!EnsureInitialized("v8::Undefined()")) return v8::Handle<v8::Primitive>();
- return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::undefined_value()));
+void V8::SetDecompressedStartupData(StartupData* decompressed_data) {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+ ASSERT_EQ(i::Snapshot::raw_size(), decompressed_data[kSnapshot].raw_size);
+ i::Snapshot::set_raw_data(
+ reinterpret_cast<const i::byte*>(decompressed_data[kSnapshot].data));
+
+ ASSERT_EQ(i::Snapshot::context_raw_size(),
+ decompressed_data[kSnapshotContext].raw_size);
+ i::Snapshot::set_context_raw_data(
+ reinterpret_cast<const i::byte*>(
+ decompressed_data[kSnapshotContext].data));
+
+ ASSERT_EQ(i::Natives::GetRawScriptsSize(),
+ decompressed_data[kLibraries].raw_size);
+ i::Vector<const char> libraries_source(
+ decompressed_data[kLibraries].data,
+ decompressed_data[kLibraries].raw_size);
+ i::Natives::SetRawScriptsSource(libraries_source);
+
+ ASSERT_EQ(i::ExperimentalNatives::GetRawScriptsSize(),
+ decompressed_data[kExperimentalLibraries].raw_size);
+ i::Vector<const char> exp_libraries_source(
+ decompressed_data[kExperimentalLibraries].data,
+ decompressed_data[kExperimentalLibraries].raw_size);
+ i::ExperimentalNatives::SetRawScriptsSource(exp_libraries_source);
+#endif
}
-v8::Handle<v8::Primitive> ImplementationUtilities::Null() {
- if (!EnsureInitialized("v8::Null()")) return v8::Handle<v8::Primitive>();
- return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::null_value()));
+void V8::SetFatalErrorHandler(FatalErrorCallback that) {
+ i::Isolate* isolate = EnterIsolateIfNeeded();
+ isolate->set_exception_behavior(that);
}
-v8::Handle<v8::Boolean> ImplementationUtilities::True() {
- if (!EnsureInitialized("v8::True()")) return v8::Handle<v8::Boolean>();
- return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::true_value()));
+void V8::SetAllowCodeGenerationFromStringsCallback(
+ AllowCodeGenerationFromStringsCallback callback) {
+ i::Isolate* isolate = EnterIsolateIfNeeded();
+ isolate->set_allow_code_gen_callback(callback);
}
-v8::Handle<v8::Boolean> ImplementationUtilities::False() {
- if (!EnsureInitialized("v8::False()")) return v8::Handle<v8::Boolean>();
- return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::false_value()));
+#ifdef DEBUG
+void ImplementationUtilities::ZapHandleRange(i::Object** begin,
+ i::Object** end) {
+ i::HandleScope::ZapRange(begin, end);
}
+#endif
void V8::SetFlagsFromString(const char* str, int length) {
@@ -333,14 +468,17 @@ void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) {
- if (IsDeadCheck("v8::ThrowException()")) return v8::Handle<Value>();
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::ThrowException()")) {
+ return v8::Handle<Value>();
+ }
+ ENTER_V8(isolate);
// If we're passed an empty handle, we throw an undefined exception
// to deal more gracefully with out of memory situations.
if (value.IsEmpty()) {
- i::Top::ScheduleThrow(i::Heap::undefined_value());
+ isolate->ScheduleThrow(isolate->heap()->undefined_value());
} else {
- i::Top::ScheduleThrow(*Utils::OpenHandle(*value));
+ isolate->ScheduleThrow(*Utils::OpenHandle(*value));
}
return v8::Undefined();
}
@@ -354,8 +492,8 @@ RegisteredExtension::RegisteredExtension(Extension* extension)
void RegisteredExtension::Register(RegisteredExtension* that) {
- that->next_ = RegisteredExtension::first_extension_;
- RegisteredExtension::first_extension_ = that;
+ that->next_ = first_extension_;
+ first_extension_ = that;
}
@@ -377,26 +515,42 @@ Extension::Extension(const char* name,
v8::Handle<Primitive> Undefined() {
- LOG_API("Undefined");
- return ImplementationUtilities::Undefined();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!EnsureInitializedForIsolate(isolate, "v8::Undefined()")) {
+ return v8::Handle<v8::Primitive>();
+ }
+ return v8::Handle<Primitive>(ToApi<Primitive>(
+ isolate->factory()->undefined_value()));
}
v8::Handle<Primitive> Null() {
- LOG_API("Null");
- return ImplementationUtilities::Null();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!EnsureInitializedForIsolate(isolate, "v8::Null()")) {
+ return v8::Handle<v8::Primitive>();
+ }
+ return v8::Handle<Primitive>(
+ ToApi<Primitive>(isolate->factory()->null_value()));
}
v8::Handle<Boolean> True() {
- LOG_API("True");
- return ImplementationUtilities::True();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!EnsureInitializedForIsolate(isolate, "v8::True()")) {
+ return v8::Handle<Boolean>();
+ }
+ return v8::Handle<Boolean>(
+ ToApi<Boolean>(isolate->factory()->true_value()));
}
v8::Handle<Boolean> False() {
- LOG_API("False");
- return ImplementationUtilities::False();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!EnsureInitializedForIsolate(isolate, "v8::False()")) {
+ return v8::Handle<Boolean>();
+ }
+ return v8::Handle<Boolean>(
+ ToApi<Boolean>(isolate->factory()->false_value()));
}
@@ -408,74 +562,96 @@ ResourceConstraints::ResourceConstraints()
bool SetResourceConstraints(ResourceConstraints* constraints) {
+ i::Isolate* isolate = EnterIsolateIfNeeded();
+
int young_space_size = constraints->max_young_space_size();
int old_gen_size = constraints->max_old_space_size();
int max_executable_size = constraints->max_executable_size();
if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) {
- bool result = i::Heap::ConfigureHeap(young_space_size / 2,
- old_gen_size,
- max_executable_size);
+ // After initialization it's too late to change Heap constraints.
+ ASSERT(!isolate->IsInitialized());
+ bool result = isolate->heap()->ConfigureHeap(young_space_size / 2,
+ old_gen_size,
+ max_executable_size);
if (!result) return false;
}
if (constraints->stack_limit() != NULL) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
- i::StackGuard::SetStackLimit(limit);
+ isolate->stack_guard()->SetStackLimit(limit);
}
return true;
}
i::Object** V8::GlobalizeReference(i::Object** obj) {
- if (IsDeadCheck("V8::Persistent::New")) return NULL;
- LOG_API("Persistent::New");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "V8::Persistent::New")) return NULL;
+ LOG_API(isolate, "Persistent::New");
i::Handle<i::Object> result =
- i::GlobalHandles::Create(*obj);
+ isolate->global_handles()->Create(*obj);
return result.location();
}
void V8::MakeWeak(i::Object** object, void* parameters,
WeakReferenceCallback callback) {
- LOG_API("MakeWeak");
- i::GlobalHandles::MakeWeak(object, parameters, callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "MakeWeak");
+ isolate->global_handles()->MakeWeak(object, parameters,
+ callback);
}
void V8::ClearWeak(i::Object** obj) {
- LOG_API("ClearWeak");
- i::GlobalHandles::ClearWeakness(obj);
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "ClearWeak");
+ isolate->global_handles()->ClearWeakness(obj);
+}
+
+
+void V8::MarkIndependent(i::Object** object) {
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "MakeIndependent");
+ isolate->global_handles()->MarkIndependent(object);
}
bool V8::IsGlobalNearDeath(i::Object** obj) {
- LOG_API("IsGlobalNearDeath");
- if (!i::V8::IsRunning()) return false;
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "IsGlobalNearDeath");
+ if (!isolate->IsInitialized()) return false;
return i::GlobalHandles::IsNearDeath(obj);
}
bool V8::IsGlobalWeak(i::Object** obj) {
- LOG_API("IsGlobalWeak");
- if (!i::V8::IsRunning()) return false;
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "IsGlobalWeak");
+ if (!isolate->IsInitialized()) return false;
return i::GlobalHandles::IsWeak(obj);
}
void V8::DisposeGlobal(i::Object** obj) {
- LOG_API("DisposeGlobal");
- if (!i::V8::IsRunning()) return;
- i::GlobalHandles::Destroy(obj);
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "DisposeGlobal");
+ if (!isolate->IsInitialized()) return;
+ isolate->global_handles()->Destroy(obj);
}
// --- H a n d l e s ---
-HandleScope::HandleScope()
- : prev_next_(i::HandleScope::current_.next),
- prev_limit_(i::HandleScope::current_.limit),
- is_closed_(false) {
- API_ENTRY_CHECK("HandleScope::HandleScope");
- i::HandleScope::current_.level++;
+HandleScope::HandleScope() {
+ i::Isolate* isolate = i::Isolate::Current();
+ API_ENTRY_CHECK(isolate, "HandleScope::HandleScope");
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate->handle_scope_data();
+ isolate_ = isolate;
+ prev_next_ = current->next;
+ prev_limit_ = current->limit;
+ is_closed_ = false;
+ current->level++;
}
@@ -487,12 +663,15 @@ HandleScope::~HandleScope() {
void HandleScope::Leave() {
- i::HandleScope::current_.level--;
- ASSERT(i::HandleScope::current_.level >= 0);
- i::HandleScope::current_.next = prev_next_;
- if (i::HandleScope::current_.limit != prev_limit_) {
- i::HandleScope::current_.limit = prev_limit_;
- i::HandleScope::DeleteExtensions();
+ ASSERT(isolate_ == i::Isolate::Current());
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate_->handle_scope_data();
+ current->level--;
+ ASSERT(current->level >= 0);
+ current->next = prev_next_;
+ if (current->limit != prev_limit_) {
+ current->limit = prev_limit_;
+ i::HandleScope::DeleteExtensions(isolate_);
}
#ifdef DEBUG
@@ -502,70 +681,83 @@ void HandleScope::Leave() {
int HandleScope::NumberOfHandles() {
+ EnsureInitializedForIsolate(
+ i::Isolate::Current(), "HandleScope::NumberOfHandles");
return i::HandleScope::NumberOfHandles();
}
-i::Object** v8::HandleScope::CreateHandle(i::Object* value) {
- return i::HandleScope::CreateHandle(value);
+i::Object** HandleScope::CreateHandle(i::Object* value) {
+ return i::HandleScope::CreateHandle(value, i::Isolate::Current());
+}
+
+
+i::Object** HandleScope::CreateHandle(i::HeapObject* value) {
+ ASSERT(value->IsHeapObject());
+ return reinterpret_cast<i::Object**>(
+ i::HandleScope::CreateHandle(value, value->GetIsolate()));
}
void Context::Enter() {
- if (IsDeadCheck("v8::Context::Enter()")) return;
- ENTER_V8;
i::Handle<i::Context> env = Utils::OpenHandle(this);
- thread_local.EnterContext(env);
+ i::Isolate* isolate = env->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Context::Enter()")) return;
+ ENTER_V8(isolate);
+
+ isolate->handle_scope_implementer()->EnterContext(env);
- thread_local.SaveContext(i::Top::context());
- i::Top::set_context(*env);
+ isolate->handle_scope_implementer()->SaveContext(isolate->context());
+ isolate->set_context(*env);
}
void Context::Exit() {
- if (!i::V8::IsRunning()) return;
- if (!ApiCheck(thread_local.LeaveLastContext(),
+ // Exit is essentially a static function and doesn't use the
+ // receiver, so we have to get the current isolate from the thread
+ // local.
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!isolate->IsInitialized()) return;
+
+ if (!ApiCheck(isolate->handle_scope_implementer()->LeaveLastContext(),
"v8::Context::Exit()",
"Cannot exit non-entered context")) {
return;
}
// Content of 'last_context' could be NULL.
- i::Context* last_context = thread_local.RestoreContext();
- i::Top::set_context(last_context);
+ i::Context* last_context =
+ isolate->handle_scope_implementer()->RestoreContext();
+ isolate->set_context(last_context);
}
void Context::SetData(v8::Handle<String> data) {
- if (IsDeadCheck("v8::Context::SetData()")) return;
- ENTER_V8;
- {
- HandleScope scope;
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
- ASSERT(env->IsGlobalContext());
- if (env->IsGlobalContext()) {
- env->set_data(*raw_data);
- }
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Isolate* isolate = env->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
+ i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
+ ASSERT(env->IsGlobalContext());
+ if (env->IsGlobalContext()) {
+ env->set_data(*raw_data);
}
}
v8::Local<v8::Value> Context::GetData() {
- if (IsDeadCheck("v8::Context::GetData()")) return v8::Local<Value>();
- ENTER_V8;
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Isolate* isolate = env->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Context::GetData()")) {
+ return v8::Local<Value>();
+ }
i::Object* raw_result = NULL;
- {
- HandleScope scope;
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- ASSERT(env->IsGlobalContext());
- if (env->IsGlobalContext()) {
- raw_result = env->data();
- } else {
- return Local<Value>();
- }
+ ASSERT(env->IsGlobalContext());
+ if (env->IsGlobalContext()) {
+ raw_result = env->data();
+ } else {
+ return Local<Value>();
}
- i::Handle<i::Object> result(raw_result);
+ i::Handle<i::Object> result(raw_result, isolate);
return Utils::ToLocal(result);
}
@@ -576,7 +768,7 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) {
"Local scope has already been closed")) {
return 0;
}
- LOG_API("CloseHandleScope");
+ LOG_API(isolate_, "CloseHandleScope");
// Read the result before popping the handle block.
i::Object* result = NULL;
@@ -605,10 +797,11 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) {
// NeanderObject constructor. When you add one to the site calling the
// constructor you should check that you ensured the VM was not dead first.
NeanderObject::NeanderObject(int size) {
- EnsureInitialized("v8::Nowhere");
- ENTER_V8;
- value_ = i::Factory::NewNeanderObject();
- i::Handle<i::FixedArray> elements = i::Factory::NewFixedArray(size);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Nowhere");
+ ENTER_V8(isolate);
+ value_ = isolate->factory()->NewNeanderObject();
+ i::Handle<i::FixedArray> elements = isolate->factory()->NewFixedArray(size);
value_->set_elements(*elements);
}
@@ -644,7 +837,7 @@ void NeanderArray::add(i::Handle<i::Object> value) {
int length = this->length();
int size = obj_.size();
if (length == size - 1) {
- i::Handle<i::FixedArray> new_elms = i::Factory::NewFixedArray(2 * size);
+ i::Handle<i::FixedArray> new_elms = FACTORY->NewFixedArray(2 * size);
for (int i = 0; i < length; i++)
new_elms->set(i + 1, get(i));
obj_.value()->set_elements(*new_elms);
@@ -670,9 +863,10 @@ static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
v8::PropertyAttribute attribute) {
- if (IsDeadCheck("v8::Template::Set()")) return;
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Template::Set()")) return;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list());
if (list->IsUndefined()) {
list = NeanderArray().value();
@@ -690,14 +884,16 @@ static void InitializeFunctionTemplate(
i::Handle<i::FunctionTemplateInfo> info) {
info->set_tag(i::Smi::FromInt(Consts::FUNCTION_TEMPLATE));
info->set_flag(0);
+ info->set_prototype_attributes(i::Smi::FromInt(v8::None));
}
Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
- if (IsDeadCheck("v8::FunctionTemplate::PrototypeTemplate()")) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::PrototypeTemplate()")) {
return Local<ObjectTemplate>();
}
- ENTER_V8;
+ ENTER_V8(isolate);
i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template());
if (result->IsUndefined()) {
result = Utils::OpenHandle(*ObjectTemplate::New());
@@ -708,28 +904,27 @@ Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
- if (IsDeadCheck("v8::FunctionTemplate::Inherit()")) return;
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::Inherit()")) return;
+ ENTER_V8(isolate);
Utils::OpenHandle(this)->set_parent_template(*Utils::OpenHandle(*value));
}
-// To distinguish the function templates, so that we can find them in the
-// function cache of the global context.
-static int next_serial_number = 0;
-
-
Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
v8::Handle<Value> data, v8::Handle<Signature> signature) {
- EnsureInitialized("v8::FunctionTemplate::New()");
- LOG_API("FunctionTemplate::New");
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
+ LOG_API(isolate, "FunctionTemplate::New");
+ ENTER_V8(isolate);
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
+ isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
i::Handle<i::FunctionTemplateInfo> obj =
i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
InitializeFunctionTemplate(obj);
- obj->set_serial_number(i::Smi::FromInt(next_serial_number++));
+ int next_serial_number = isolate->next_serial_number();
+ isolate->set_next_serial_number(next_serial_number + 1);
+ obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != 0) {
if (data.IsEmpty()) data = v8::Undefined();
Utils::ToLocal(obj)->SetCallHandler(callback, data);
@@ -745,16 +940,17 @@ Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
int argc, Handle<FunctionTemplate> argv[]) {
- EnsureInitialized("v8::Signature::New()");
- LOG_API("Signature::New");
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Signature::New()");
+ LOG_API(isolate, "Signature::New");
+ ENTER_V8(isolate);
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::SIGNATURE_INFO_TYPE);
+ isolate->factory()->NewStruct(i::SIGNATURE_INFO_TYPE);
i::Handle<i::SignatureInfo> obj =
i::Handle<i::SignatureInfo>::cast(struct_obj);
if (!receiver.IsEmpty()) obj->set_receiver(*Utils::OpenHandle(*receiver));
if (argc > 0) {
- i::Handle<i::FixedArray> args = i::Factory::NewFixedArray(argc);
+ i::Handle<i::FixedArray> args = isolate->factory()->NewFixedArray(argc);
for (int i = 0; i < argc; i++) {
if (!argv[i].IsEmpty())
args->set(i, *Utils::OpenHandle(*argv[i]));
@@ -772,14 +968,15 @@ Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
Local<TypeSwitch> TypeSwitch::New(int argc, Handle<FunctionTemplate> types[]) {
- EnsureInitialized("v8::TypeSwitch::New()");
- LOG_API("TypeSwitch::New");
- ENTER_V8;
- i::Handle<i::FixedArray> vector = i::Factory::NewFixedArray(argc);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::TypeSwitch::New()");
+ LOG_API(isolate, "TypeSwitch::New");
+ ENTER_V8(isolate);
+ i::Handle<i::FixedArray> vector = isolate->factory()->NewFixedArray(argc);
for (int i = 0; i < argc; i++)
vector->set(i, *Utils::OpenHandle(*types[i]));
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::TYPE_SWITCH_INFO_TYPE);
+ isolate->factory()->NewStruct(i::TYPE_SWITCH_INFO_TYPE);
i::Handle<i::TypeSwitchInfo> obj =
i::Handle<i::TypeSwitchInfo>::cast(struct_obj);
obj->set_types(*vector);
@@ -788,7 +985,9 @@ Local<TypeSwitch> TypeSwitch::New(int argc, Handle<FunctionTemplate> types[]) {
int TypeSwitch::match(v8::Handle<Value> value) {
- LOG_API("TypeSwitch::match");
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "TypeSwitch::match");
+ USE(isolate);
i::Handle<i::Object> obj = Utils::OpenHandle(*value);
i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
i::FixedArray* types = i::FixedArray::cast(info->types());
@@ -800,19 +999,20 @@ int TypeSwitch::match(v8::Handle<Value> value) {
}
-#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
- i::Handle<i::Object> proxy = FromCData(cdata); \
- (obj)->setter(*proxy); \
+#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
+ i::Handle<i::Object> foreign = FromCData(cdata); \
+ (obj)->setter(*foreign); \
} while (false)
void FunctionTemplate::SetCallHandler(InvocationCallback callback,
v8::Handle<Value> data) {
- if (IsDeadCheck("v8::FunctionTemplate::SetCallHandler()")) return;
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
@@ -829,7 +1029,7 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::Handle<Value> data,
v8::AccessControl settings,
v8::PropertyAttribute attributes) {
- i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
+ i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo();
ASSERT(getter != NULL);
SET_FIELD_WRAPPED(obj, set_getter, getter);
SET_FIELD_WRAPPED(obj, set_setter, setter);
@@ -851,11 +1051,13 @@ void FunctionTemplate::AddInstancePropertyAccessor(
v8::Handle<Value> data,
v8::AccessControl settings,
v8::PropertyAttribute attributes) {
- if (IsDeadCheck("v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate,
+ "v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
return;
}
- ENTER_V8;
- HandleScope scope;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name,
getter, setter, data,
@@ -871,10 +1073,11 @@ void FunctionTemplate::AddInstancePropertyAccessor(
Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
- if (IsDeadCheck("v8::FunctionTemplate::InstanceTemplate()")
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::InstanceTemplate()")
|| EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
return Local<ObjectTemplate>();
- ENTER_V8;
+ ENTER_V8(isolate);
if (Utils::OpenHandle(this)->instance_template()->IsUndefined()) {
Local<ObjectTemplate> templ =
ObjectTemplate::New(v8::Handle<FunctionTemplate>(this));
@@ -887,19 +1090,34 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
void FunctionTemplate::SetClassName(Handle<String> name) {
- if (IsDeadCheck("v8::FunctionTemplate::SetClassName()")) return;
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetClassName()")) return;
+ ENTER_V8(isolate);
Utils::OpenHandle(this)->set_class_name(*Utils::OpenHandle(*name));
}
void FunctionTemplate::SetHiddenPrototype(bool value) {
- if (IsDeadCheck("v8::FunctionTemplate::SetHiddenPrototype()")) return;
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetHiddenPrototype()")) {
+ return;
+ }
+ ENTER_V8(isolate);
Utils::OpenHandle(this)->set_hidden_prototype(value);
}
+void FunctionTemplate::SetPrototypeAttributes(int attributes) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetPrototypeAttributes()")) {
+ return;
+ }
+ ENTER_V8(isolate);
+ Utils::OpenHandle(this)->set_prototype_attributes(
+ i::Smi::FromInt(attributes));
+}
+
+
void FunctionTemplate::SetNamedInstancePropertyHandler(
NamedPropertyGetter getter,
NamedPropertySetter setter,
@@ -907,13 +1125,15 @@ void FunctionTemplate::SetNamedInstancePropertyHandler(
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data) {
- if (IsDeadCheck("v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate,
+ "v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
return;
}
- ENTER_V8;
- HandleScope scope;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
@@ -936,14 +1156,15 @@ void FunctionTemplate::SetIndexedInstancePropertyHandler(
IndexedPropertyDeleter remover,
IndexedPropertyEnumerator enumerator,
Handle<Value> data) {
- if (IsDeadCheck(
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate,
"v8::FunctionTemplate::SetIndexedInstancePropertyHandler()")) {
return;
}
- ENTER_V8;
- HandleScope scope;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
@@ -962,13 +1183,15 @@ void FunctionTemplate::SetIndexedInstancePropertyHandler(
void FunctionTemplate::SetInstanceCallAsFunctionHandler(
InvocationCallback callback,
Handle<Value> data) {
- if (IsDeadCheck("v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate,
+ "v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
return;
}
- ENTER_V8;
- HandleScope scope;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
@@ -988,12 +1211,15 @@ Local<ObjectTemplate> ObjectTemplate::New() {
Local<ObjectTemplate> ObjectTemplate::New(
v8::Handle<FunctionTemplate> constructor) {
- if (IsDeadCheck("v8::ObjectTemplate::New()")) return Local<ObjectTemplate>();
- EnsureInitialized("v8::ObjectTemplate::New()");
- LOG_API("ObjectTemplate::New");
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::New()")) {
+ return Local<ObjectTemplate>();
+ }
+ EnsureInitializedForIsolate(isolate, "v8::ObjectTemplate::New()");
+ LOG_API(isolate, "ObjectTemplate::New");
+ ENTER_V8(isolate);
i::Handle<i::Struct> struct_obj =
- i::Factory::NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
+ isolate->factory()->NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
i::Handle<i::ObjectTemplateInfo> obj =
i::Handle<i::ObjectTemplateInfo>::cast(struct_obj);
InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
@@ -1022,9 +1248,10 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
v8::Handle<Value> data,
AccessControl settings,
PropertyAttribute attribute) {
- if (IsDeadCheck("v8::ObjectTemplate::SetAccessor()")) return;
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
EnsureConstructor(this);
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
@@ -1044,9 +1271,12 @@ void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
NamedPropertyDeleter remover,
NamedPropertyEnumerator enumerator,
Handle<Value> data) {
- if (IsDeadCheck("v8::ObjectTemplate::SetNamedPropertyHandler()")) return;
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
+ return;
+ }
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
EnsureConstructor(this);
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
@@ -1061,9 +1291,10 @@ void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
void ObjectTemplate::MarkAsUndetectable() {
- if (IsDeadCheck("v8::ObjectTemplate::MarkAsUndetectable()")) return;
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::MarkAsUndetectable()")) return;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
EnsureConstructor(this);
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
@@ -1077,13 +1308,16 @@ void ObjectTemplate::SetAccessCheckCallbacks(
IndexedSecurityCallback indexed_callback,
Handle<Value> data,
bool turned_on_by_default) {
- if (IsDeadCheck("v8::ObjectTemplate::SetAccessCheckCallbacks()")) return;
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessCheckCallbacks()")) {
+ return;
+ }
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
EnsureConstructor(this);
i::Handle<i::Struct> struct_info =
- i::Factory::NewStruct(i::ACCESS_CHECK_INFO_TYPE);
+ isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
i::Handle<i::AccessCheckInfo> info =
i::Handle<i::AccessCheckInfo>::cast(struct_info);
@@ -1108,9 +1342,12 @@ void ObjectTemplate::SetIndexedPropertyHandler(
IndexedPropertyDeleter remover,
IndexedPropertyEnumerator enumerator,
Handle<Value> data) {
- if (IsDeadCheck("v8::ObjectTemplate::SetIndexedPropertyHandler()")) return;
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetIndexedPropertyHandler()")) {
+ return;
+ }
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
EnsureConstructor(this);
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
@@ -1126,9 +1363,13 @@ void ObjectTemplate::SetIndexedPropertyHandler(
void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
Handle<Value> data) {
- if (IsDeadCheck("v8::ObjectTemplate::SetCallAsFunctionHandler()")) return;
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate,
+ "v8::ObjectTemplate::SetCallAsFunctionHandler()")) {
+ return;
+ }
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
EnsureConstructor(this);
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
@@ -1138,7 +1379,8 @@ void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
int ObjectTemplate::InternalFieldCount() {
- if (IsDeadCheck("v8::ObjectTemplate::InternalFieldCount()")) {
+ if (IsDeadCheck(Utils::OpenHandle(this)->GetIsolate(),
+ "v8::ObjectTemplate::InternalFieldCount()")) {
return 0;
}
return i::Smi::cast(Utils::OpenHandle(this)->internal_field_count())->value();
@@ -1146,13 +1388,16 @@ int ObjectTemplate::InternalFieldCount() {
void ObjectTemplate::SetInternalFieldCount(int value) {
- if (IsDeadCheck("v8::ObjectTemplate::SetInternalFieldCount()")) return;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetInternalFieldCount()")) {
+ return;
+ }
if (!ApiCheck(i::Smi::IsValid(value),
"v8::ObjectTemplate::SetInternalFieldCount()",
"Invalid internal field count")) {
return;
}
- ENTER_V8;
+ ENTER_V8(isolate);
if (value > 0) {
// The internal field count is set by the constructor function's
// construct code, so we ensure that there is a constructor
@@ -1200,7 +1445,7 @@ ScriptData* ScriptData::New(const char* data, int length) {
}
// Copy the data to align it.
unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
- i::MemCopy(deserialized_data, data, length);
+ i::OS::MemCopy(deserialized_data, data, length);
return new i::ScriptDataImpl(
i::Vector<unsigned>(deserialized_data, deserialized_data_length));
@@ -1214,9 +1459,10 @@ Local<Script> Script::New(v8::Handle<String> source,
v8::ScriptOrigin* origin,
v8::ScriptData* pre_data,
v8::Handle<String> script_data) {
- ON_BAILOUT("v8::Script::New()", return Local<Script>());
- LOG_API("Script::New");
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
+ LOG_API(isolate, "Script::New");
+ ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(*source);
i::Handle<i::Object> name_obj;
int line_offset = 0;
@@ -1232,7 +1478,7 @@ Local<Script> Script::New(v8::Handle<String> source,
column_offset = static_cast<int>(origin->ResourceColumnOffset()->Value());
}
}
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::ScriptDataImpl* pre_data_impl = static_cast<i::ScriptDataImpl*>(pre_data);
// We assert that the pre-data is sane, even though we can actually
// handle it if it turns out not to be in release mode.
@@ -1251,7 +1497,7 @@ Local<Script> Script::New(v8::Handle<String> source,
Utils::OpenHandle(*script_data),
i::NOT_NATIVES_CODE);
has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(Local<Script>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
return Local<Script>(ToApi<Script>(result));
}
@@ -1267,9 +1513,10 @@ Local<Script> Script::Compile(v8::Handle<String> source,
v8::ScriptOrigin* origin,
v8::ScriptData* pre_data,
v8::Handle<String> script_data) {
- ON_BAILOUT("v8::Script::Compile()", return Local<Script>());
- LOG_API("Script::Compile");
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
+ LOG_API(isolate, "Script::Compile");
+ ENTER_V8(isolate);
Local<Script> generic = New(source, origin, pre_data, script_data);
if (generic.IsEmpty())
return generic;
@@ -1277,8 +1524,9 @@ Local<Script> Script::Compile(v8::Handle<String> source,
i::Handle<i::SharedFunctionInfo> function =
i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
i::Handle<i::JSFunction> result =
- i::Factory::NewFunctionFromSharedFunctionInfo(function,
- i::Top::global_context());
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ function,
+ isolate->global_context());
return Local<Script>(ToApi<Script>(result));
}
@@ -1292,30 +1540,32 @@ Local<Script> Script::Compile(v8::Handle<String> source,
Local<Value> Script::Run() {
- ON_BAILOUT("v8::Script::Run()", return Local<Value>());
- LOG_API("Script::Run");
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
+ LOG_API(isolate, "Script::Run");
+ ENTER_V8(isolate);
i::Object* raw_result = NULL;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSFunction> fun;
if (obj->IsSharedFunctionInfo()) {
i::Handle<i::SharedFunctionInfo>
- function_info(i::SharedFunctionInfo::cast(*obj));
- fun = i::Factory::NewFunctionFromSharedFunctionInfo(
- function_info, i::Top::global_context());
+ function_info(i::SharedFunctionInfo::cast(*obj), isolate);
+ fun = isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ function_info, isolate->global_context());
} else {
- fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj));
+ fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj), isolate);
}
- EXCEPTION_PREAMBLE();
- i::Handle<i::Object> receiver(i::Top::context()->global_proxy());
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> receiver(
+ isolate->context()->global_proxy(), isolate);
i::Handle<i::Object> result =
i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<Value>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
raw_result = *result;
}
- i::Handle<i::Object> result(raw_result);
+ i::Handle<i::Object> result(raw_result, isolate);
return Utils::ToLocal(result);
}
@@ -1335,11 +1585,12 @@ static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
Local<Value> Script::Id() {
- ON_BAILOUT("v8::Script::Id()", return Local<Value>());
- LOG_API("Script::Id");
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::Id()", return Local<Value>());
+ LOG_API(isolate, "Script::Id");
i::Object* raw_id = NULL;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
i::Handle<i::Script> script(i::Script::cast(function_info->script()));
i::Handle<i::Object> id(script->id());
@@ -1351,10 +1602,11 @@ Local<Value> Script::Id() {
void Script::SetData(v8::Handle<String> data) {
- ON_BAILOUT("v8::Script::SetData()", return);
- LOG_API("Script::SetData");
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::SetData()", return);
+ LOG_API(isolate, "Script::SetData");
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
i::Handle<i::Script> script(i::Script::cast(function_info->script()));
@@ -1367,25 +1619,27 @@ void Script::SetData(v8::Handle<String> data) {
v8::TryCatch::TryCatch()
- : next_(i::Top::try_catch_handler_address()),
- exception_(i::Heap::the_hole_value()),
+ : isolate_(i::Isolate::Current()),
+ next_(isolate_->try_catch_handler_address()),
+ exception_(isolate_->heap()->the_hole_value()),
message_(i::Smi::FromInt(0)),
is_verbose_(false),
can_continue_(true),
capture_message_(true),
rethrow_(false) {
- i::Top::RegisterTryCatchHandler(this);
+ isolate_->RegisterTryCatchHandler(this);
}
v8::TryCatch::~TryCatch() {
+ ASSERT(isolate_ == i::Isolate::Current());
if (rethrow_) {
v8::HandleScope scope;
v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
- i::Top::UnregisterTryCatchHandler(this);
+ isolate_->UnregisterTryCatchHandler(this);
v8::ThrowException(exc);
} else {
- i::Top::UnregisterTryCatchHandler(this);
+ isolate_->UnregisterTryCatchHandler(this);
}
}
@@ -1408,10 +1662,11 @@ v8::Handle<v8::Value> v8::TryCatch::ReThrow() {
v8::Local<Value> v8::TryCatch::Exception() const {
+ ASSERT(isolate_ == i::Isolate::Current());
if (HasCaught()) {
// Check for out of memory exception.
i::Object* exception = reinterpret_cast<i::Object*>(exception_);
- return v8::Utils::ToLocal(i::Handle<i::Object>(exception));
+ return v8::Utils::ToLocal(i::Handle<i::Object>(exception, isolate_));
} else {
return v8::Local<Value>();
}
@@ -1419,15 +1674,15 @@ v8::Local<Value> v8::TryCatch::Exception() const {
v8::Local<Value> v8::TryCatch::StackTrace() const {
+ ASSERT(isolate_ == i::Isolate::Current());
if (HasCaught()) {
i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
if (!raw_obj->IsJSObject()) return v8::Local<Value>();
- v8::HandleScope scope;
- i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj));
- i::Handle<i::String> name = i::Factory::LookupAsciiSymbol("stack");
- if (!obj->HasProperty(*name))
- return v8::Local<Value>();
- return scope.Close(v8::Utils::ToLocal(i::GetProperty(obj, name)));
+ i::HandleScope scope(isolate_);
+ i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
+ i::Handle<i::String> name = isolate_->factory()->LookupAsciiSymbol("stack");
+ if (!obj->HasProperty(*name)) return v8::Local<Value>();
+ return v8::Utils::ToLocal(scope.CloseAndEscape(i::GetProperty(obj, name)));
} else {
return v8::Local<Value>();
}
@@ -1435,9 +1690,10 @@ v8::Local<Value> v8::TryCatch::StackTrace() const {
v8::Local<v8::Message> v8::TryCatch::Message() const {
+ ASSERT(isolate_ == i::Isolate::Current());
if (HasCaught() && message_ != i::Smi::FromInt(0)) {
i::Object* message = reinterpret_cast<i::Object*>(message_);
- return v8::Utils::MessageToLocal(i::Handle<i::Object>(message));
+ return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_));
} else {
return v8::Local<v8::Message>();
}
@@ -1445,7 +1701,8 @@ v8::Local<v8::Message> v8::TryCatch::Message() const {
void v8::TryCatch::Reset() {
- exception_ = i::Heap::the_hole_value();
+ ASSERT(isolate_ == i::Isolate::Current());
+ exception_ = isolate_->heap()->the_hole_value();
message_ = i::Smi::FromInt(0);
}
@@ -1464,8 +1721,9 @@ void v8::TryCatch::SetCaptureMessage(bool value) {
Local<String> Message::Get() const {
- ON_BAILOUT("v8::Message::Get()", return Local<String>());
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Message::Get()", return Local<String>());
+ ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(obj);
@@ -1475,10 +1733,11 @@ Local<String> Message::Get() const {
v8::Handle<Value> Message::GetScriptResourceName() const {
- if (IsDeadCheck("v8::Message::GetScriptResourceName()")) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceName()")) {
return Local<String>();
}
- ENTER_V8;
+ ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
@@ -1491,10 +1750,11 @@ v8::Handle<Value> Message::GetScriptResourceName() const {
v8::Handle<Value> Message::GetScriptData() const {
- if (IsDeadCheck("v8::Message::GetScriptResourceData()")) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceData()")) {
return Local<Value>();
}
- ENTER_V8;
+ ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
@@ -1507,10 +1767,11 @@ v8::Handle<Value> Message::GetScriptData() const {
v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
- if (IsDeadCheck("v8::Message::GetStackTrace()")) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Message::GetStackTrace()")) {
return Local<v8::StackTrace>();
}
- ENTER_V8;
+ ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
@@ -1527,9 +1788,10 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
int argc,
i::Object** argv[],
bool* has_pending_exception) {
- i::Handle<i::String> fmt_str = i::Factory::LookupAsciiSymbol(name);
+ i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
i::Object* object_fun =
- i::Top::builtins()->GetPropertyNoExceptionThrown(*fmt_str);
+ isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str);
i::Handle<i::JSFunction> fun =
i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun));
i::Handle<i::Object> value =
@@ -1543,7 +1805,7 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
bool* has_pending_exception) {
i::Object** argv[1] = { data.location() };
return CallV8HeapFunction(name,
- i::Top::builtins(),
+ i::Isolate::Current()->js_builtins_object(),
1,
argv,
has_pending_exception);
@@ -1551,23 +1813,25 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
int Message::GetLineNumber() const {
- ON_BAILOUT("v8::Message::GetLineNumber()", return kNoLineNumberInfo);
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Message::GetLineNumber()", return kNoLineNumberInfo);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = CallV8HeapFunction("GetLineNumber",
Utils::OpenHandle(this),
&has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(0);
+ EXCEPTION_BAILOUT_CHECK(isolate, 0);
return static_cast<int>(result->Number());
}
int Message::GetStartPosition() const {
- if (IsDeadCheck("v8::Message::GetStartPosition()")) return 0;
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Message::GetStartPosition()")) return 0;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
return message->start_position();
@@ -1575,9 +1839,10 @@ int Message::GetStartPosition() const {
int Message::GetEndPosition() const {
- if (IsDeadCheck("v8::Message::GetEndPosition()")) return 0;
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Message::GetEndPosition()")) return 0;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
return message->end_position();
@@ -1585,31 +1850,35 @@ int Message::GetEndPosition() const {
int Message::GetStartColumn() const {
- if (IsDeadCheck("v8::Message::GetStartColumn()")) return kNoColumnInfo;
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Message::GetStartColumn()")) {
+ return kNoColumnInfo;
+ }
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
"GetPositionInLine",
data_obj,
&has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(0);
+ EXCEPTION_BAILOUT_CHECK(isolate, 0);
return static_cast<int>(start_col_obj->Number());
}
int Message::GetEndColumn() const {
- if (IsDeadCheck("v8::Message::GetEndColumn()")) return kNoColumnInfo;
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Message::GetEndColumn()")) return kNoColumnInfo;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> start_col_obj = CallV8HeapFunction(
"GetPositionInLine",
data_obj,
&has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(0);
+ EXCEPTION_BAILOUT_CHECK(isolate, 0);
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(data_obj);
int start = message->start_position();
@@ -1619,14 +1888,15 @@ int Message::GetEndColumn() const {
Local<String> Message::GetSourceLine() const {
- ON_BAILOUT("v8::Message::GetSourceLine()", return Local<String>());
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Message::GetSourceLine()", return Local<String>());
+ ENTER_V8(isolate);
HandleScope scope;
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = CallV8HeapFunction("GetSourceLine",
Utils::OpenHandle(this),
&has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<v8::String>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::String>());
if (result->IsString()) {
return scope.Close(Utils::ToLocal(i::Handle<i::String>::cast(result)));
} else {
@@ -1636,17 +1906,21 @@ Local<String> Message::GetSourceLine() const {
void Message::PrintCurrentStackTrace(FILE* out) {
- if (IsDeadCheck("v8::Message::PrintCurrentStackTrace()")) return;
- ENTER_V8;
- i::Top::PrintCurrentStackTrace(out);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Message::PrintCurrentStackTrace()")) return;
+ ENTER_V8(isolate);
+ isolate->PrintCurrentStackTrace(out);
}
// --- S t a c k T r a c e ---
Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
- if (IsDeadCheck("v8::StackTrace::GetFrame()")) return Local<StackFrame>();
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::StackTrace::GetFrame()")) {
+ return Local<StackFrame>();
+ }
+ ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
i::Object* raw_object = self->GetElementNoExceptionThrown(index);
@@ -1656,25 +1930,30 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
int StackTrace::GetFrameCount() const {
- if (IsDeadCheck("v8::StackTrace::GetFrameCount()")) return -1;
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::StackTrace::GetFrameCount()")) return -1;
+ ENTER_V8(isolate);
return i::Smi::cast(Utils::OpenHandle(this)->length())->value();
}
Local<Array> StackTrace::AsArray() {
- if (IsDeadCheck("v8::StackTrace::AsArray()")) Local<Array>();
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::StackTrace::AsArray()")) Local<Array>();
+ ENTER_V8(isolate);
return Utils::ToLocal(Utils::OpenHandle(this));
}
Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
StackTraceOptions options) {
- if (IsDeadCheck("v8::StackTrace::CurrentStackTrace()")) Local<StackTrace>();
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StackTrace::CurrentStackTrace()")) {
+ Local<StackTrace>();
+ }
+ ENTER_V8(isolate);
i::Handle<i::JSArray> stackTrace =
- i::Top::CaptureCurrentStackTrace(frame_limit, options);
+ isolate->CaptureCurrentStackTrace(frame_limit, options);
return Utils::StackTraceToLocal(stackTrace);
}
@@ -1682,11 +1961,12 @@ Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
// --- S t a c k F r a m e ---
int StackFrame::GetLineNumber() const {
- if (IsDeadCheck("v8::StackFrame::GetLineNumber()")) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::StackFrame::GetLineNumber()")) {
return Message::kNoLineNumberInfo;
}
- ENTER_V8;
- i::HandleScope scope;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> line = GetProperty(self, "lineNumber");
if (!line->IsSmi()) {
@@ -1697,11 +1977,12 @@ int StackFrame::GetLineNumber() const {
int StackFrame::GetColumn() const {
- if (IsDeadCheck("v8::StackFrame::GetColumn()")) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::StackFrame::GetColumn()")) {
return Message::kNoColumnInfo;
}
- ENTER_V8;
- i::HandleScope scope;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> column = GetProperty(self, "column");
if (!column->IsSmi()) {
@@ -1712,8 +1993,11 @@ int StackFrame::GetColumn() const {
Local<String> StackFrame::GetScriptName() const {
- if (IsDeadCheck("v8::StackFrame::GetScriptName()")) return Local<String>();
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptName()")) {
+ return Local<String>();
+ }
+ ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "scriptName");
@@ -1725,10 +2009,11 @@ Local<String> StackFrame::GetScriptName() const {
Local<String> StackFrame::GetScriptNameOrSourceURL() const {
- if (IsDeadCheck("v8::StackFrame::GetScriptNameOrSourceURL()")) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptNameOrSourceURL()")) {
return Local<String>();
}
- ENTER_V8;
+ ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "scriptNameOrSourceURL");
@@ -1740,8 +2025,11 @@ Local<String> StackFrame::GetScriptNameOrSourceURL() const {
Local<String> StackFrame::GetFunctionName() const {
- if (IsDeadCheck("v8::StackFrame::GetFunctionName()")) return Local<String>();
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::StackFrame::GetFunctionName()")) {
+ return Local<String>();
+ }
+ ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "functionName");
@@ -1753,9 +2041,10 @@ Local<String> StackFrame::GetFunctionName() const {
bool StackFrame::IsEval() const {
- if (IsDeadCheck("v8::StackFrame::IsEval()")) return false;
- ENTER_V8;
- i::HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::StackFrame::IsEval()")) return false;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> is_eval = GetProperty(self, "isEval");
return is_eval->IsTrue();
@@ -1763,9 +2052,10 @@ bool StackFrame::IsEval() const {
bool StackFrame::IsConstructor() const {
- if (IsDeadCheck("v8::StackFrame::IsConstructor()")) return false;
- ENTER_V8;
- i::HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::StackFrame::IsConstructor()")) return false;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> is_constructor = GetProperty(self, "isConstructor");
return is_constructor->IsTrue();
@@ -1775,37 +2065,41 @@ bool StackFrame::IsConstructor() const {
// --- D a t a ---
bool Value::IsUndefined() const {
- if (IsDeadCheck("v8::Value::IsUndefined()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) {
+ return false;
+ }
return Utils::OpenHandle(this)->IsUndefined();
}
bool Value::IsNull() const {
- if (IsDeadCheck("v8::Value::IsNull()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false;
return Utils::OpenHandle(this)->IsNull();
}
bool Value::IsTrue() const {
- if (IsDeadCheck("v8::Value::IsTrue()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsTrue()")) return false;
return Utils::OpenHandle(this)->IsTrue();
}
bool Value::IsFalse() const {
- if (IsDeadCheck("v8::Value::IsFalse()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFalse()")) return false;
return Utils::OpenHandle(this)->IsFalse();
}
bool Value::IsFunction() const {
- if (IsDeadCheck("v8::Value::IsFunction()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFunction()")) {
+ return false;
+ }
return Utils::OpenHandle(this)->IsJSFunction();
}
bool Value::FullIsString() const {
- if (IsDeadCheck("v8::Value::IsString()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsString()")) return false;
bool result = Utils::OpenHandle(this)->IsString();
ASSERT_EQ(result, QuickIsString());
return result;
@@ -1813,37 +2107,41 @@ bool Value::FullIsString() const {
bool Value::IsArray() const {
- if (IsDeadCheck("v8::Value::IsArray()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArray()")) return false;
return Utils::OpenHandle(this)->IsJSArray();
}
bool Value::IsObject() const {
- if (IsDeadCheck("v8::Value::IsObject()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false;
return Utils::OpenHandle(this)->IsJSObject();
}
bool Value::IsNumber() const {
- if (IsDeadCheck("v8::Value::IsNumber()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNumber()")) return false;
return Utils::OpenHandle(this)->IsNumber();
}
bool Value::IsBoolean() const {
- if (IsDeadCheck("v8::Value::IsBoolean()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsBoolean()")) {
+ return false;
+ }
return Utils::OpenHandle(this)->IsBoolean();
}
bool Value::IsExternal() const {
- if (IsDeadCheck("v8::Value::IsExternal()")) return false;
- return Utils::OpenHandle(this)->IsProxy();
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
+ return false;
+ }
+ return Utils::OpenHandle(this)->IsForeign();
}
bool Value::IsInt32() const {
- if (IsDeadCheck("v8::Value::IsInt32()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsInt32()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return true;
if (obj->IsNumber()) {
@@ -1855,7 +2153,7 @@ bool Value::IsInt32() const {
bool Value::IsUint32() const {
- if (IsDeadCheck("v8::Value::IsUint32()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUint32()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
if (obj->IsNumber()) {
@@ -1867,78 +2165,91 @@ bool Value::IsUint32() const {
bool Value::IsDate() const {
- if (IsDeadCheck("v8::Value::IsDate()")) return false;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::IsDate()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(i::Heap::Date_symbol());
+ return obj->HasSpecificClassOf(isolate->heap()->Date_symbol());
}
bool Value::IsRegExp() const {
- if (IsDeadCheck("v8::Value::IsRegExp()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsRegExp()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsJSRegExp();
}
Local<String> Value::ToString() const {
- if (IsDeadCheck("v8::Value::ToString()")) return Local<String>();
- LOG_API("ToString");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> str;
if (obj->IsString()) {
str = obj;
} else {
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToString()")) {
+ return Local<String>();
+ }
+ LOG_API(isolate, "ToString");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
str = i::Execution::ToString(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<String>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
}
return Local<String>(ToApi<String>(str));
}
Local<String> Value::ToDetailString() const {
- if (IsDeadCheck("v8::Value::ToDetailString()")) return Local<String>();
- LOG_API("ToDetailString");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> str;
if (obj->IsString()) {
str = obj;
} else {
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToDetailString()")) {
+ return Local<String>();
+ }
+ LOG_API(isolate, "ToDetailString");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
str = i::Execution::ToDetailString(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<String>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
}
return Local<String>(ToApi<String>(str));
}
Local<v8::Object> Value::ToObject() const {
- if (IsDeadCheck("v8::Value::ToObject()")) return Local<v8::Object>();
- LOG_API("ToObject");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> val;
if (obj->IsJSObject()) {
val = obj;
} else {
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToObject()")) {
+ return Local<v8::Object>();
+ }
+ LOG_API(isolate, "ToObject");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
val = i::Execution::ToObject(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<v8::Object>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
}
return Local<v8::Object>(ToApi<Object>(val));
}
Local<Boolean> Value::ToBoolean() const {
- if (IsDeadCheck("v8::Value::ToBoolean()")) return Local<Boolean>();
- LOG_API("ToBoolean");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsBoolean()) {
return Local<Boolean>(ToApi<Boolean>(obj));
} else {
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToBoolean()")) {
+ return Local<Boolean>();
+ }
+ LOG_API(isolate, "ToBoolean");
+ ENTER_V8(isolate);
i::Handle<i::Object> val = i::Execution::ToBoolean(obj);
return Local<Boolean>(ToApi<Boolean>(val));
}
@@ -1946,50 +2257,54 @@ Local<Boolean> Value::ToBoolean() const {
Local<Number> Value::ToNumber() const {
- if (IsDeadCheck("v8::Value::ToNumber()")) return Local<Number>();
- LOG_API("ToNumber");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsNumber()) {
num = obj;
} else {
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToNumber()")) {
+ return Local<Number>();
+ }
+ LOG_API(isolate, "ToNumber");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
num = i::Execution::ToNumber(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<Number>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Number>());
}
return Local<Number>(ToApi<Number>(num));
}
Local<Integer> Value::ToInteger() const {
- if (IsDeadCheck("v8::Value::ToInteger()")) return Local<Integer>();
- LOG_API("ToInteger");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsSmi()) {
num = obj;
} else {
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToInteger()")) return Local<Integer>();
+ LOG_API(isolate, "ToInteger");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
num = i::Execution::ToInteger(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<Integer>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Integer>());
}
return Local<Integer>(ToApi<Integer>(num));
}
void External::CheckCast(v8::Value* that) {
- if (IsDeadCheck("v8::External::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsProxy(),
+ ApiCheck(obj->IsForeign(),
"v8::External::Cast()",
"Could not convert to external");
}
void v8::Object::CheckCast(Value* that) {
- if (IsDeadCheck("v8::Object::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Object::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSObject(),
"v8::Object::Cast()",
@@ -1998,7 +2313,7 @@ void v8::Object::CheckCast(Value* that) {
void v8::Function::CheckCast(Value* that) {
- if (IsDeadCheck("v8::Function::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Function::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSFunction(),
"v8::Function::Cast()",
@@ -2007,7 +2322,7 @@ void v8::Function::CheckCast(Value* that) {
void v8::String::CheckCast(v8::Value* that) {
- if (IsDeadCheck("v8::String::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::String::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsString(),
"v8::String::Cast()",
@@ -2016,7 +2331,7 @@ void v8::String::CheckCast(v8::Value* that) {
void v8::Number::CheckCast(v8::Value* that) {
- if (IsDeadCheck("v8::Number::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Number::Cast()",
@@ -2025,7 +2340,7 @@ void v8::Number::CheckCast(v8::Value* that) {
void v8::Integer::CheckCast(v8::Value* that) {
- if (IsDeadCheck("v8::Integer::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Integer::Cast()",
@@ -2034,7 +2349,7 @@ void v8::Integer::CheckCast(v8::Value* that) {
void v8::Array::CheckCast(Value* that) {
- if (IsDeadCheck("v8::Array::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Array::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArray(),
"v8::Array::Cast()",
@@ -2043,16 +2358,17 @@ void v8::Array::CheckCast(Value* that) {
void v8::Date::CheckCast(v8::Value* that) {
- if (IsDeadCheck("v8::Date::Cast()")) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(i::Heap::Date_symbol()),
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_symbol()),
"v8::Date::Cast()",
"Could not convert to date");
}
void v8::RegExp::CheckCast(v8::Value* that) {
- if (IsDeadCheck("v8::RegExp::Cast()")) return;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSRegExp(),
"v8::RegExp::Cast()",
@@ -2061,13 +2377,14 @@ void v8::RegExp::CheckCast(v8::Value* that) {
bool Value::BooleanValue() const {
- if (IsDeadCheck("v8::Value::BooleanValue()")) return false;
- LOG_API("BooleanValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsBoolean()) {
return obj->IsTrue();
} else {
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::BooleanValue()")) return false;
+ LOG_API(isolate, "BooleanValue");
+ ENTER_V8(isolate);
i::Handle<i::Object> value = i::Execution::ToBoolean(obj);
return value->IsTrue();
}
@@ -2075,34 +2392,38 @@ bool Value::BooleanValue() const {
double Value::NumberValue() const {
- if (IsDeadCheck("v8::Value::NumberValue()")) return i::OS::nan_value();
- LOG_API("NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsNumber()) {
num = obj;
} else {
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::NumberValue()")) {
+ return i::OS::nan_value();
+ }
+ LOG_API(isolate, "NumberValue");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
num = i::Execution::ToNumber(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(i::OS::nan_value());
+ EXCEPTION_BAILOUT_CHECK(isolate, i::OS::nan_value());
}
return num->Number();
}
int64_t Value::IntegerValue() const {
- if (IsDeadCheck("v8::Value::IntegerValue()")) return 0;
- LOG_API("IntegerValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsNumber()) {
num = obj;
} else {
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::IntegerValue()")) return 0;
+ LOG_API(isolate, "IntegerValue");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
num = i::Execution::ToInteger(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(0);
+ EXCEPTION_BAILOUT_CHECK(isolate, 0);
}
if (num->IsSmi()) {
return i::Smi::cast(*num)->value();
@@ -2113,52 +2434,55 @@ int64_t Value::IntegerValue() const {
Local<Int32> Value::ToInt32() const {
- if (IsDeadCheck("v8::Value::ToInt32()")) return Local<Int32>();
- LOG_API("ToInt32");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsSmi()) {
num = obj;
} else {
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToInt32()")) return Local<Int32>();
+ LOG_API(isolate, "ToInt32");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
num = i::Execution::ToInt32(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<Int32>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Int32>());
}
return Local<Int32>(ToApi<Int32>(num));
}
Local<Uint32> Value::ToUint32() const {
- if (IsDeadCheck("v8::Value::ToUint32()")) return Local<Uint32>();
- LOG_API("ToUInt32");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsSmi()) {
num = obj;
} else {
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToUint32()")) return Local<Uint32>();
+ LOG_API(isolate, "ToUInt32");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
num = i::Execution::ToUint32(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<Uint32>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
}
return Local<Uint32>(ToApi<Uint32>(num));
}
Local<Uint32> Value::ToArrayIndex() const {
- if (IsDeadCheck("v8::Value::ToArrayIndex()")) return Local<Uint32>();
- LOG_API("ToArrayIndex");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
if (i::Smi::cast(*obj)->value() >= 0) return Utils::Uint32ToLocal(obj);
return Local<Uint32>();
}
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::ToArrayIndex()")) return Local<Uint32>();
+ LOG_API(isolate, "ToArrayIndex");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> string_obj =
i::Execution::ToString(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<Uint32>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj);
uint32_t index;
if (str->AsArrayIndex(&index)) {
@@ -2166,7 +2490,7 @@ Local<Uint32> Value::ToArrayIndex() const {
if (index <= static_cast<uint32_t>(i::Smi::kMaxValue)) {
value = i::Handle<i::Object>(i::Smi::FromInt(index));
} else {
- value = i::Factory::NewNumber(index);
+ value = isolate->factory()->NewNumber(index);
}
return Utils::Uint32ToLocal(value);
}
@@ -2175,18 +2499,18 @@ Local<Uint32> Value::ToArrayIndex() const {
int32_t Value::Int32Value() const {
- if (IsDeadCheck("v8::Value::Int32Value()")) return 0;
- LOG_API("Int32Value");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
} else {
- LOG_API("Int32Value (slow)");
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::Int32Value()")) return 0;
+ LOG_API(isolate, "Int32Value (slow)");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> num =
i::Execution::ToInt32(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(0);
+ EXCEPTION_BAILOUT_CHECK(isolate, 0);
if (num->IsSmi()) {
return i::Smi::cast(*num)->value();
} else {
@@ -2197,13 +2521,14 @@ int32_t Value::Int32Value() const {
bool Value::Equals(Handle<Value> that) const {
- if (IsDeadCheck("v8::Value::Equals()")
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::Equals()")
|| EmptyCheck("v8::Value::Equals()", this)
|| EmptyCheck("v8::Value::Equals()", that)) {
return false;
}
- LOG_API("Equals");
- ENTER_V8;
+ LOG_API(isolate, "Equals");
+ ENTER_V8(isolate);
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
// If both obj and other are JSObjects, we'd better compare by identity
@@ -2213,21 +2538,22 @@ bool Value::Equals(Handle<Value> that) const {
return *obj == *other;
}
i::Object** args[1] = { other.location() };
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result =
CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(false);
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
return *result == i::Smi::FromInt(i::EQUAL);
}
bool Value::StrictEquals(Handle<Value> that) const {
- if (IsDeadCheck("v8::Value::StrictEquals()")
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::StrictEquals()")
|| EmptyCheck("v8::Value::StrictEquals()", this)
|| EmptyCheck("v8::Value::StrictEquals()", that)) {
return false;
}
- LOG_API("StrictEquals");
+ LOG_API(isolate, "StrictEquals");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
// Must check HeapNumber first, since NaN !== NaN.
@@ -2253,17 +2579,18 @@ bool Value::StrictEquals(Handle<Value> that) const {
uint32_t Value::Uint32Value() const {
- if (IsDeadCheck("v8::Value::Uint32Value()")) return 0;
- LOG_API("Uint32Value");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
} else {
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::Uint32Value()")) return 0;
+ LOG_API(isolate, "Uint32Value");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> num =
i::Execution::ToUint32(obj, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(0);
+ EXCEPTION_BAILOUT_CHECK(isolate, 0);
if (num->IsSmi()) {
return i::Smi::cast(*num)->value();
} else {
@@ -2275,13 +2602,14 @@ uint32_t Value::Uint32Value() const {
bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
v8::PropertyAttribute attribs) {
- ON_BAILOUT("v8::Object::Set()", return false);
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::Set()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::SetProperty(
self,
key_obj,
@@ -2289,24 +2617,26 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
static_cast<PropertyAttributes>(attribs),
i::kNonStrictMode);
has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(false);
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
}
bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
- ON_BAILOUT("v8::Object::Set()", return false);
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::Set()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::SetElement(
self,
index,
- value_obj);
+ value_obj,
+ i::kNonStrictMode);
has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(false);
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
}
@@ -2314,28 +2644,30 @@ bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
bool v8::Object::ForceSet(v8::Handle<Value> key,
v8::Handle<Value> value,
v8::PropertyAttribute attribs) {
- ON_BAILOUT("v8::Object::ForceSet()", return false);
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::ForceSet()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::ForceSetProperty(
self,
key_obj,
value_obj,
static_cast<PropertyAttributes>(attribs));
has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(false);
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
}
bool v8::Object::ForceDelete(v8::Handle<Value> key) {
- ON_BAILOUT("v8::Object::ForceDelete()", return false);
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::ForceDelete()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
@@ -2343,42 +2675,46 @@ bool v8::Object::ForceDelete(v8::Handle<Value> key) {
// as optimized code does not always handle access checks.
i::Deoptimizer::DeoptimizeGlobalObject(*self);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(false);
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
return obj->IsTrue();
}
Local<Value> v8::Object::Get(v8::Handle<Value> key) {
- ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::Get()", return Local<v8::Value>());
+ ENTER_V8(isolate);
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = i::GetProperty(self, key_obj);
has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(Local<Value>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
}
Local<Value> v8::Object::Get(uint32_t index) {
- ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::Get()", return Local<v8::Value>());
+ ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = i::GetElement(self, index);
has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(Local<Value>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
}
Local<Value> v8::Object::GetPrototype() {
- ON_BAILOUT("v8::Object::GetPrototype()", return Local<v8::Value>());
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::GetPrototype()",
+ return Local<v8::Value>());
+ ENTER_V8(isolate);
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> result = i::GetPrototype(self);
return Utils::ToLocal(result);
@@ -2386,23 +2722,29 @@ Local<Value> v8::Object::GetPrototype() {
bool v8::Object::SetPrototype(Handle<Value> value) {
- ON_BAILOUT("v8::Object::SetPrototype()", return false);
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::SetPrototype()", return false);
+ ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE();
+ // We do not allow exceptions thrown while setting the prototype
+ // to propagate outside.
+ TryCatch try_catch;
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = i::SetPrototype(self, value_obj);
has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(false);
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
}
Local<Object> v8::Object::FindInstanceInPrototypeChain(
v8::Handle<FunctionTemplate> tmpl) {
- ON_BAILOUT("v8::Object::FindInstanceInPrototypeChain()",
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate,
+ "v8::Object::FindInstanceInPrototypeChain()",
return Local<v8::Object>());
- ENTER_V8;
+ ENTER_V8(isolate);
i::JSObject* object = *Utils::OpenHandle(this);
i::FunctionTemplateInfo* tmpl_info = *Utils::OpenHandle(*tmpl);
while (!object->IsInstanceOf(tmpl_info)) {
@@ -2415,24 +2757,48 @@ Local<Object> v8::Object::FindInstanceInPrototypeChain(
Local<Array> v8::Object::GetPropertyNames() {
- ON_BAILOUT("v8::Object::GetPropertyNames()", return Local<v8::Array>());
- ENTER_V8;
- v8::HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::GetPropertyNames()",
+ return Local<v8::Array>());
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value =
i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
- i::Handle<i::FixedArray> elms = i::Factory::CopyFixedArray(value);
- i::Handle<i::JSArray> result = i::Factory::NewJSArrayWithElements(elms);
- return scope.Close(Utils::ToLocal(result));
+ i::Handle<i::FixedArray> elms = isolate->factory()->CopyFixedArray(value);
+ i::Handle<i::JSArray> result =
+ isolate->factory()->NewJSArrayWithElements(elms);
+ return Utils::ToLocal(scope.CloseAndEscape(result));
+}
+
+
+Local<Array> v8::Object::GetOwnPropertyNames() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::GetOwnPropertyNames()",
+ return Local<v8::Array>());
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::FixedArray> value =
+ i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY);
+ // Because we use caching to speed up enumeration it is important
+ // to never change the result of the basic enumeration function so
+ // we clone the result.
+ i::Handle<i::FixedArray> elms = isolate->factory()->CopyFixedArray(value);
+ i::Handle<i::JSArray> result =
+ isolate->factory()->NewJSArrayWithElements(elms);
+ return Utils::ToLocal(scope.CloseAndEscape(result));
}
Local<String> v8::Object::ObjectProtoToString() {
- ON_BAILOUT("v8::Object::ObjectProtoToString()", return Local<v8::String>());
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::ObjectProtoToString()",
+ return Local<v8::String>());
+ ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name(self->class_name());
@@ -2483,8 +2849,10 @@ Local<String> v8::Object::ObjectProtoToString() {
Local<String> v8::Object::GetConstructorName() {
- ON_BAILOUT("v8::Object::GetConstructorName()", return Local<v8::String>());
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::GetConstructorName()",
+ return Local<v8::String>());
+ ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> name(self->constructor_name());
return Utils::ToLocal(name);
@@ -2492,9 +2860,10 @@ Local<String> v8::Object::GetConstructorName() {
bool v8::Object::Delete(v8::Handle<String> key) {
- ON_BAILOUT("v8::Object::Delete()", return false);
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::Delete()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
return i::DeleteProperty(self, key_obj)->IsTrue();
@@ -2502,8 +2871,9 @@ bool v8::Object::Delete(v8::Handle<String> key) {
bool v8::Object::Has(v8::Handle<String> key) {
- ON_BAILOUT("v8::Object::Has()", return false);
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::Has()", return false);
+ ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
return self->HasProperty(*key_obj);
@@ -2511,8 +2881,10 @@ bool v8::Object::Has(v8::Handle<String> key) {
bool v8::Object::Delete(uint32_t index) {
- ON_BAILOUT("v8::Object::DeleteProperty()", return false);
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::DeleteProperty()",
+ return false);
+ ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
return i::DeleteElement(self, index)->IsTrue();
@@ -2520,7 +2892,8 @@ bool v8::Object::Delete(uint32_t index) {
bool v8::Object::Has(uint32_t index) {
- ON_BAILOUT("v8::Object::HasProperty()", return false);
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::HasProperty()", return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
return self->HasElement(index);
}
@@ -2532,9 +2905,10 @@ bool Object::SetAccessor(Handle<String> name,
v8::Handle<Value> data,
AccessControl settings,
PropertyAttribute attributes) {
- ON_BAILOUT("v8::Object::SetAccessor()", return false);
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
getter, setter, data,
settings, attributes);
@@ -2543,80 +2917,104 @@ bool Object::SetAccessor(Handle<String> name,
}
+bool v8::Object::HasOwnProperty(Handle<String> key) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
+ return false);
+ return Utils::OpenHandle(this)->HasLocalProperty(
+ *Utils::OpenHandle(*key));
+}
+
+
bool v8::Object::HasRealNamedProperty(Handle<String> key) {
- ON_BAILOUT("v8::Object::HasRealNamedProperty()", return false);
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
+ return false);
return Utils::OpenHandle(this)->HasRealNamedProperty(
*Utils::OpenHandle(*key));
}
bool v8::Object::HasRealIndexedProperty(uint32_t index) {
- ON_BAILOUT("v8::Object::HasRealIndexedProperty()", return false);
+ ON_BAILOUT(Utils::OpenHandle(this)->GetIsolate(),
+ "v8::Object::HasRealIndexedProperty()",
+ return false);
return Utils::OpenHandle(this)->HasRealElementProperty(index);
}
bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
- ON_BAILOUT("v8::Object::HasRealNamedCallbackProperty()", return false);
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate,
+ "v8::Object::HasRealNamedCallbackProperty()",
+ return false);
+ ENTER_V8(isolate);
return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
*Utils::OpenHandle(*key));
}
bool v8::Object::HasNamedLookupInterceptor() {
- ON_BAILOUT("v8::Object::HasNamedLookupInterceptor()", return false);
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::HasNamedLookupInterceptor()",
+ return false);
return Utils::OpenHandle(this)->HasNamedInterceptor();
}
bool v8::Object::HasIndexedLookupInterceptor() {
- ON_BAILOUT("v8::Object::HasIndexedLookupInterceptor()", return false);
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::HasIndexedLookupInterceptor()",
+ return false);
return Utils::OpenHandle(this)->HasIndexedInterceptor();
}
+static Local<Value> GetPropertyByLookup(i::Isolate* isolate,
+ i::Handle<i::JSObject> receiver,
+ i::Handle<i::String> name,
+ i::LookupResult* lookup) {
+ if (!lookup->IsProperty()) {
+ // No real property was found.
+ return Local<Value>();
+ }
+
+ // If the property being looked up is a callback, it can throw
+ // an exception.
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> result = i::GetProperty(receiver, name, lookup);
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+
+ return Utils::ToLocal(result);
+}
+
+
Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Handle<String> key) {
- ON_BAILOUT("v8::Object::GetRealNamedPropertyInPrototypeChain()",
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate,
+ "v8::Object::GetRealNamedPropertyInPrototypeChain()",
return Local<Value>());
- ENTER_V8;
+ ENTER_V8(isolate);
i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::LookupResult lookup;
self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
- if (lookup.IsProperty()) {
- PropertyAttributes attributes;
- i::Object* property =
- self_obj->GetProperty(*self_obj,
- &lookup,
- *key_obj,
- &attributes)->ToObjectUnchecked();
- i::Handle<i::Object> result(property);
- return Utils::ToLocal(result);
- }
- return Local<Value>(); // No real property was found in prototype chain.
+ return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
}
Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
- ON_BAILOUT("v8::Object::GetRealNamedProperty()", return Local<Value>());
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::GetRealNamedProperty()",
+ return Local<Value>());
+ ENTER_V8(isolate);
i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::LookupResult lookup;
self_obj->LookupRealNamedProperty(*key_obj, &lookup);
- if (lookup.IsProperty()) {
- PropertyAttributes attributes;
- i::Object* property =
- self_obj->GetProperty(*self_obj,
- &lookup,
- *key_obj,
- &attributes)->ToObjectUnchecked();
- i::Handle<i::Object> result(property);
- return Utils::ToLocal(result);
- }
- return Local<Value>(); // No real property was found in prototype chain.
+ return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
}
@@ -2624,9 +3022,10 @@ Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
// Because the object gets a new map, existing inline cache caching
// the old map of this object will fail.
void v8::Object::TurnOnAccessCheck() {
- ON_BAILOUT("v8::Object::TurnOnAccessCheck()", return);
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::TurnOnAccessCheck()", return);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
// When turning on access checks for a global object deoptimize all functions
@@ -2634,7 +3033,7 @@ void v8::Object::TurnOnAccessCheck() {
i::Deoptimizer::DeoptimizeGlobalObject(*obj);
i::Handle<i::Map> new_map =
- i::Factory::CopyMapDropTransitions(i::Handle<i::Map>(obj->map()));
+ isolate->factory()->CopyMapDropTransitions(i::Handle<i::Map>(obj->map()));
new_map->set_is_access_check_needed(true);
obj->set_map(*new_map);
}
@@ -2646,21 +3045,50 @@ bool v8::Object::IsDirty() {
Local<v8::Object> v8::Object::Clone() {
- ON_BAILOUT("v8::Object::Clone()", return Local<Object>());
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::Clone()", return Local<Object>());
+ ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::JSObject> result = i::Copy(self);
has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(Local<Object>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(result);
}
+static i::Context* GetCreationContext(i::JSObject* object) {
+ i::Object* constructor = object->map()->constructor();
+ i::JSFunction* function;
+ if (!constructor->IsJSFunction()) {
+ // API functions have null as a constructor,
+ // but any JSFunction knows its context immediately.
+ ASSERT(object->IsJSFunction() &&
+ i::JSFunction::cast(object)->shared()->IsApiFunction());
+ function = i::JSFunction::cast(object);
+ } else {
+ function = i::JSFunction::cast(constructor);
+ }
+ return function->context()->global_context();
+}
+
+
+Local<v8::Context> v8::Object::CreationContext() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate,
+ "v8::Object::CreationContext()", return Local<v8::Context>());
+ ENTER_V8(isolate);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Context* context = GetCreationContext(*self);
+ return Utils::ToLocal(i::Handle<i::Context>(context));
+}
+
+
int v8::Object::GetIdentityHash() {
- ON_BAILOUT("v8::Object::GetIdentityHash()", return 0);
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::GetIdentityHash()", return 0);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props_obj(i::GetHiddenProperties(self, true));
if (!hidden_props_obj->IsJSObject()) {
@@ -2671,7 +3099,7 @@ int v8::Object::GetIdentityHash() {
}
i::Handle<i::JSObject> hidden_props =
i::Handle<i::JSObject>::cast(hidden_props_obj);
- i::Handle<i::String> hash_symbol = i::Factory::identity_hash_symbol();
+ i::Handle<i::String> hash_symbol = isolate->factory()->identity_hash_symbol();
if (hidden_props->HasLocalProperty(*hash_symbol)) {
i::Handle<i::Object> hash = i::GetProperty(hidden_props, hash_symbol);
CHECK(!hash.is_null());
@@ -2684,7 +3112,7 @@ int v8::Object::GetIdentityHash() {
do {
// Generate a random 32-bit hash value but limit range to fit
// within a smi.
- hash_value = i::V8::Random() & i::Smi::kMaxValue;
+ hash_value = i::V8::Random(self->GetIsolate()) & i::Smi::kMaxValue;
attempts++;
} while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0
@@ -2700,14 +3128,15 @@ int v8::Object::GetIdentityHash() {
bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
v8::Handle<v8::Value> value) {
- ON_BAILOUT("v8::Object::SetHiddenValue()", return false);
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::SetHiddenValue()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::SetProperty(
hidden_props,
key_obj,
@@ -2715,24 +3144,26 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
static_cast<PropertyAttributes>(None),
i::kNonStrictMode);
has_pending_exception = obj.is_null();
- EXCEPTION_BAILOUT_CHECK(false);
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
}
v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
- ON_BAILOUT("v8::Object::GetHiddenValue()", return Local<v8::Value>());
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::GetHiddenValue()",
+ return Local<v8::Value>());
+ ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
if (hidden_props->IsUndefined()) {
return v8::Local<v8::Value>();
}
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj);
has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(v8::Local<v8::Value>());
+ EXCEPTION_BAILOUT_CHECK(isolate, v8::Local<v8::Value>());
if (result->IsUndefined()) {
return v8::Local<v8::Value>();
}
@@ -2741,9 +3172,10 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
- ON_BAILOUT("v8::DeleteHiddenValue()", return false);
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::DeleteHiddenValue()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
if (hidden_props->IsUndefined()) {
@@ -2755,11 +3187,44 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
}
+namespace {
+
+void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
+ void* data,
+ ExternalArrayType array_type,
+ int length) {
+ i::Isolate* isolate = object->GetIsolate();
+ i::Handle<i::ExternalArray> array =
+ isolate->factory()->NewExternalArray(length, array_type, data);
+
+ // If the object already has external elements, create a new, unique
+ // map if the element type is now changing, because assumptions about
+ // generated code based on the receiver's map will be invalid.
+ i::Handle<i::HeapObject> elements(object->elements());
+ bool cant_reuse_map =
+ elements->map()->IsUndefined() ||
+ !elements->map()->has_external_array_elements() ||
+ elements->map() != isolate->heap()->MapForExternalArrayType(array_type);
+ if (cant_reuse_map) {
+ i::Handle<i::Map> external_array_map =
+ isolate->factory()->GetExternalArrayElementsMap(
+ i::Handle<i::Map>(object->map()),
+ array_type,
+ object->HasFastProperties());
+ object->set_map(*external_array_map);
+ }
+ object->set_elements(*array);
+}
+
+} // namespace
+
+
void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
- ON_BAILOUT("v8::SetElementsToPixelData()", return);
- ENTER_V8;
- HandleScope scope;
- if (!ApiCheck(length <= i::PixelArray::kMaxLength,
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::SetElementsToPixelData()", return);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ if (!ApiCheck(length <= i::ExternalPixelArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToPixelData()",
"length exceeds max acceptable value")) {
return;
@@ -2770,26 +3235,25 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
"JSArray is not supported")) {
return;
}
- i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
- i::Handle<i::Map> pixel_array_map =
- i::Factory::GetPixelArrayElementsMap(i::Handle<i::Map>(self->map()));
- self->set_map(*pixel_array_map);
- self->set_elements(*pixels);
+ PrepareExternalArrayElements(self, data, kExternalPixelArray, length);
}
bool v8::Object::HasIndexedPropertiesInPixelData() {
- ON_BAILOUT("v8::HasIndexedPropertiesInPixelData()", return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return self->HasPixelElements();
+ ON_BAILOUT(self->GetIsolate(), "v8::HasIndexedPropertiesInPixelData()",
+ return false);
+ return self->HasExternalPixelElements();
}
uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
- ON_BAILOUT("v8::GetIndexedPropertiesPixelData()", return NULL);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (self->HasPixelElements()) {
- return i::PixelArray::cast(self->elements())->external_pointer();
+ ON_BAILOUT(self->GetIsolate(), "v8::GetIndexedPropertiesPixelData()",
+ return NULL);
+ if (self->HasExternalPixelElements()) {
+ return i::ExternalPixelArray::cast(self->elements())->
+ external_pixel_pointer();
} else {
return NULL;
}
@@ -2797,23 +3261,24 @@ uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
int v8::Object::GetIndexedPropertiesPixelDataLength() {
- ON_BAILOUT("v8::GetIndexedPropertiesPixelDataLength()", return -1);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (self->HasPixelElements()) {
- return i::PixelArray::cast(self->elements())->length();
+ ON_BAILOUT(self->GetIsolate(), "v8::GetIndexedPropertiesPixelDataLength()",
+ return -1);
+ if (self->HasExternalPixelElements()) {
+ return i::ExternalPixelArray::cast(self->elements())->length();
} else {
return -1;
}
}
-
void v8::Object::SetIndexedPropertiesToExternalArrayData(
void* data,
ExternalArrayType array_type,
int length) {
- ON_BAILOUT("v8::SetIndexedPropertiesToExternalArrayData()", return);
- ENTER_V8;
- HandleScope scope;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::SetIndexedPropertiesToExternalArrayData()", return);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
if (!ApiCheck(length <= i::ExternalArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToExternalArrayData()",
"length exceeds max acceptable value")) {
@@ -2825,25 +3290,24 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
"JSArray is not supported")) {
return;
}
- i::Handle<i::ExternalArray> array =
- i::Factory::NewExternalArray(length, array_type, data);
- i::Handle<i::Map> slow_map =
- i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()));
- self->set_map(*slow_map);
- self->set_elements(*array);
+ PrepareExternalArrayElements(self, data, array_type, length);
}
bool v8::Object::HasIndexedPropertiesInExternalArrayData() {
- ON_BAILOUT("v8::HasIndexedPropertiesInExternalArrayData()", return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ ON_BAILOUT(self->GetIsolate(),
+ "v8::HasIndexedPropertiesInExternalArrayData()",
+ return false);
return self->HasExternalArrayElements();
}
void* v8::Object::GetIndexedPropertiesExternalArrayData() {
- ON_BAILOUT("v8::GetIndexedPropertiesExternalArrayData()", return NULL);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ ON_BAILOUT(self->GetIsolate(),
+ "v8::GetIndexedPropertiesExternalArrayData()",
+ return NULL);
if (self->HasExternalArrayElements()) {
return i::ExternalArray::cast(self->elements())->external_pointer();
} else {
@@ -2853,9 +3317,10 @@ void* v8::Object::GetIndexedPropertiesExternalArrayData() {
ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
- ON_BAILOUT("v8::GetIndexedPropertiesExternalArrayDataType()",
- return static_cast<ExternalArrayType>(-1));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ ON_BAILOUT(self->GetIsolate(),
+ "v8::GetIndexedPropertiesExternalArrayDataType()",
+ return static_cast<ExternalArrayType>(-1));
switch (self->elements()->map()->instance_type()) {
case i::EXTERNAL_BYTE_ARRAY_TYPE:
return kExternalByteArray;
@@ -2871,6 +3336,10 @@ ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
return kExternalUnsignedIntArray;
case i::EXTERNAL_FLOAT_ARRAY_TYPE:
return kExternalFloatArray;
+ case i::EXTERNAL_DOUBLE_ARRAY_TYPE:
+ return kExternalDoubleArray;
+ case i::EXTERNAL_PIXEL_ARRAY_TYPE:
+ return kExternalPixelArray;
default:
return static_cast<ExternalArrayType>(-1);
}
@@ -2878,8 +3347,10 @@ ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
int v8::Object::GetIndexedPropertiesExternalArrayDataLength() {
- ON_BAILOUT("v8::GetIndexedPropertiesExternalArrayDataLength()", return 0);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ ON_BAILOUT(self->GetIsolate(),
+ "v8::GetIndexedPropertiesExternalArrayDataLength()",
+ return 0);
if (self->HasExternalArrayElements()) {
return i::ExternalArray::cast(self->elements())->length();
} else {
@@ -2888,6 +3359,85 @@ int v8::Object::GetIndexedPropertiesExternalArrayDataLength() {
}
+bool v8::Object::IsCallable() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::IsCallable()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ if (obj->IsJSFunction()) return true;
+ return i::Execution::GetFunctionDelegate(obj)->IsJSFunction();
+}
+
+
+Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
+ v8::Handle<v8::Value> argv[]) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
+ return Local<v8::Value>());
+ LOG_API(isolate, "Object::CallAsFunction");
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
+ STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+ i::Object*** args = reinterpret_cast<i::Object***>(argv);
+ i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
+ if (obj->IsJSFunction()) {
+ fun = i::Handle<i::JSFunction>::cast(obj);
+ } else {
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> delegate =
+ i::Execution::TryGetFunctionDelegate(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+ fun = i::Handle<i::JSFunction>::cast(delegate);
+ recv_obj = obj;
+ }
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> returned =
+ i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+ return Utils::ToLocal(scope.CloseAndEscape(returned));
+}
+
+
+Local<v8::Value> Object::CallAsConstructor(int argc,
+ v8::Handle<v8::Value> argv[]) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::CallAsConstructor()",
+ return Local<v8::Object>());
+ LOG_API(isolate, "Object::CallAsConstructor");
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+ i::Object*** args = reinterpret_cast<i::Object***>(argv);
+ if (obj->IsJSFunction()) {
+ i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> returned =
+ i::Execution::New(fun, argc, args, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+ return Utils::ToLocal(scope.CloseAndEscape(
+ i::Handle<i::JSObject>::cast(returned)));
+ }
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> delegate =
+ i::Execution::TryGetConstructorDelegate(obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+ if (!delegate->IsUndefined()) {
+ i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(delegate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> returned =
+ i::Execution::Call(fun, obj, argc, args, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+ ASSERT(!delegate->IsUndefined());
+ return Utils::ToLocal(scope.CloseAndEscape(returned));
+ }
+ return Local<v8::Object>();
+}
+
+
Local<v8::Object> Function::NewInstance() const {
return NewInstance(0, NULL);
}
@@ -2895,37 +3445,40 @@ Local<v8::Object> Function::NewInstance() const {
Local<v8::Object> Function::NewInstance(int argc,
v8::Handle<v8::Value> argv[]) const {
- ON_BAILOUT("v8::Function::NewInstance()", return Local<v8::Object>());
- LOG_API("Function::NewInstance");
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Function::NewInstance()",
+ return Local<v8::Object>());
+ LOG_API(isolate, "Function::NewInstance");
+ ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Object*** args = reinterpret_cast<i::Object***>(argv);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::New(function, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<v8::Object>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
return scope.Close(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
}
Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
v8::Handle<v8::Value> argv[]) {
- ON_BAILOUT("v8::Function::Call()", return Local<v8::Value>());
- LOG_API("Function::Call");
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
+ LOG_API(isolate, "Function::Call");
+ ENTER_V8(isolate);
i::Object* raw_result = NULL;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Object*** args = reinterpret_cast<i::Object***>(argv);
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<Object>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
raw_result = *returned;
}
i::Handle<i::Object> result(raw_result);
@@ -2934,7 +3487,9 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
void Function::SetName(v8::Handle<v8::String> name) {
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8(isolate);
+ USE(isolate);
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
func->shared()->set_name(*Utils::OpenHandle(*name));
}
@@ -2973,81 +3528,17 @@ int Function::GetScriptLineNumber() const {
}
-namespace {
-
-// Tracks string usage to help make better decisions when
-// externalizing strings.
-//
-// Implementation note: internally this class only tracks fresh
-// strings and keeps a single use counter for them.
-class StringTracker {
- public:
- // Records that the given string's characters were copied to some
- // external buffer. If this happens often we should honor
- // externalization requests for the string.
- static void RecordWrite(i::Handle<i::String> string) {
- i::Address address = reinterpret_cast<i::Address>(*string);
- i::Address top = i::Heap::NewSpaceTop();
- if (IsFreshString(address, top)) {
- IncrementUseCount(top);
- }
- }
-
- // Estimates freshness and use frequency of the given string based
- // on how close it is to the new space top and the recorded usage
- // history.
- static inline bool IsFreshUnusedString(i::Handle<i::String> string) {
- i::Address address = reinterpret_cast<i::Address>(*string);
- i::Address top = i::Heap::NewSpaceTop();
- return IsFreshString(address, top) && IsUseCountLow(top);
- }
-
- private:
- static inline bool IsFreshString(i::Address string, i::Address top) {
- return top - kFreshnessLimit <= string && string <= top;
- }
-
- static inline bool IsUseCountLow(i::Address top) {
- if (last_top_ != top) return true;
- return use_count_ < kUseLimit;
- }
-
- static inline void IncrementUseCount(i::Address top) {
- if (last_top_ != top) {
- use_count_ = 0;
- last_top_ = top;
- }
- ++use_count_;
- }
-
- // How close to the new space top a fresh string has to be.
- static const int kFreshnessLimit = 1024;
-
- // The number of uses required to consider a string useful.
- static const int kUseLimit = 32;
-
- // Single use counter shared by all fresh strings.
- static int use_count_;
-
- // Last new space top when the use count above was valid.
- static i::Address last_top_;
-};
-
-int StringTracker::use_count_ = 0;
-i::Address StringTracker::last_top_ = NULL;
-
-} // namespace
-
-
int String::Length() const {
- if (IsDeadCheck("v8::String::Length()")) return 0;
- return Utils::OpenHandle(this)->length();
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
+ return str->length();
}
int String::Utf8Length() const {
- if (IsDeadCheck("v8::String::Utf8Length()")) return 0;
- return Utils::OpenHandle(this)->Utf8Length();
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (IsDeadCheck(str->GetIsolate(), "v8::String::Utf8Length()")) return 0;
+ return str->Utf8Length();
}
@@ -3055,11 +3546,13 @@ int String::WriteUtf8(char* buffer,
int capacity,
int* nchars_ref,
WriteHints hints) const {
- if (IsDeadCheck("v8::String::WriteUtf8()")) return 0;
- LOG_API("String::WriteUtf8");
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
+ LOG_API(isolate, "String::WriteUtf8");
+ ENTER_V8(isolate);
+ i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
i::Handle<i::String> str = Utils::OpenHandle(this);
- StringTracker::RecordWrite(str);
+ isolate->string_tracker()->RecordWrite(str);
if (hints & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
@@ -3110,12 +3603,14 @@ int String::WriteAscii(char* buffer,
int start,
int length,
WriteHints hints) const {
- if (IsDeadCheck("v8::String::WriteAscii()")) return 0;
- LOG_API("String::WriteAscii");
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0;
+ LOG_API(isolate, "String::WriteAscii");
+ ENTER_V8(isolate);
+ i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
- StringTracker::RecordWrite(str);
+ isolate->string_tracker()->RecordWrite(str);
if (hints & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
@@ -3142,12 +3637,13 @@ int String::Write(uint16_t* buffer,
int start,
int length,
WriteHints hints) const {
- if (IsDeadCheck("v8::String::Write()")) return 0;
- LOG_API("String::Write");
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::String::Write()")) return 0;
+ LOG_API(isolate, "String::Write");
+ ENTER_V8(isolate);
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
- StringTracker::RecordWrite(str);
+ isolate->string_tracker()->RecordWrite(str);
if (hints & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
@@ -3166,15 +3662,20 @@ int String::Write(uint16_t* buffer,
bool v8::String::IsExternal() const {
- EnsureInitialized("v8::String::IsExternal()");
i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternal()")) {
+ return false;
+ }
+ EnsureInitializedForIsolate(str->GetIsolate(), "v8::String::IsExternal()");
return i::StringShape(*str).IsExternalTwoByte();
}
bool v8::String::IsExternalAscii() const {
- EnsureInitialized("v8::String::IsExternalAscii()");
i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternalAscii()")) {
+ return false;
+ }
return i::StringShape(*str).IsExternalAscii();
}
@@ -3195,8 +3696,11 @@ void v8::String::VerifyExternalStringResource(
v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const {
- EnsureInitialized("v8::String::GetExternalAsciiStringResource()");
i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (IsDeadCheck(str->GetIsolate(),
+ "v8::String::GetExternalAsciiStringResource()")) {
+ return NULL;
+ }
if (i::StringShape(*str).IsExternalAscii()) {
void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
return reinterpret_cast<ExternalAsciiStringResource*>(resource);
@@ -3207,21 +3711,21 @@ v8::String::ExternalAsciiStringResource*
double Number::Value() const {
- if (IsDeadCheck("v8::Number::Value()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->Number();
}
bool Boolean::Value() const {
- if (IsDeadCheck("v8::Boolean::Value()")) return false;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Boolean::Value()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsTrue();
}
int64_t Integer::Value() const {
- if (IsDeadCheck("v8::Integer::Value()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
@@ -3232,7 +3736,7 @@ int64_t Integer::Value() const {
int32_t Int32::Value() const {
- if (IsDeadCheck("v8::Int32::Value()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Int32::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
@@ -3243,7 +3747,7 @@ int32_t Int32::Value() const {
uint32_t Uint32::Value() const {
- if (IsDeadCheck("v8::Uint32::Value()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Uint32::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
@@ -3254,15 +3758,19 @@ uint32_t Uint32::Value() const {
int v8::Object::InternalFieldCount() {
- if (IsDeadCheck("v8::Object::InternalFieldCount()")) return 0;
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ if (IsDeadCheck(obj->GetIsolate(), "v8::Object::InternalFieldCount()")) {
+ return 0;
+ }
return obj->GetInternalFieldCount();
}
Local<Value> v8::Object::CheckedGetInternalField(int index) {
- if (IsDeadCheck("v8::Object::GetInternalField()")) return Local<Value>();
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ if (IsDeadCheck(obj->GetIsolate(), "v8::Object::GetInternalField()")) {
+ return Local<Value>();
+ }
if (!ApiCheck(index < obj->GetInternalFieldCount(),
"v8::Object::GetInternalField()",
"Reading internal field out of bounds")) {
@@ -3279,14 +3787,17 @@ Local<Value> v8::Object::CheckedGetInternalField(int index) {
void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
- if (IsDeadCheck("v8::Object::SetInternalField()")) return;
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Isolate* isolate = obj->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Object::SetInternalField()")) {
+ return;
+ }
if (!ApiCheck(index < obj->GetInternalFieldCount(),
"v8::Object::SetInternalField()",
"Writing internal field out of bounds")) {
return;
}
- ENTER_V8;
+ ENTER_V8(isolate);
i::Handle<i::Object> val = Utils::OpenHandle(*value);
obj->SetInternalField(index, *val);
}
@@ -3310,15 +3821,17 @@ static i::Smi* EncodeAsSmi(void* ptr) {
void v8::Object::SetPointerInInternalField(int index, void* value) {
- ENTER_V8;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8(isolate);
if (CanBeEncodedAsSmi(value)) {
Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
} else {
HandleScope scope;
- i::Handle<i::Proxy> proxy =
- i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
- if (!proxy.is_null())
- Utils::OpenHandle(this)->SetInternalField(index, *proxy);
+ i::Handle<i::Foreign> foreign =
+ isolate->factory()->NewForeign(
+ reinterpret_cast<i::Address>(value), i::TENURED);
+ if (!foreign.is_null())
+ Utils::OpenHandle(this)->SetInternalField(index, *foreign);
}
ASSERT_EQ(value, GetPointerFromInternalField(index));
}
@@ -3326,15 +3839,23 @@ void v8::Object::SetPointerInInternalField(int index, void* value) {
// --- E n v i r o n m e n t ---
+
bool v8::V8::Initialize() {
- if (i::V8::IsRunning()) return true;
- HandleScope scope;
- if (i::Snapshot::Initialize()) return true;
- return i::V8::Initialize(NULL);
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ if (isolate != NULL && isolate->IsInitialized()) {
+ return true;
+ }
+ return InitializeHelper();
}
bool v8::V8::Dispose() {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
+ "v8::V8::Dispose()",
+ "Use v8::Isolate::Dispose() for a non-default isolate.")) {
+ return false;
+ }
i::V8::TearDown();
return true;
}
@@ -3347,38 +3868,39 @@ HeapStatistics::HeapStatistics(): total_heap_size_(0),
void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
- heap_statistics->set_total_heap_size(i::Heap::CommittedMemory());
+ i::Heap* heap = i::Isolate::Current()->heap();
+ heap_statistics->set_total_heap_size(heap->CommittedMemory());
heap_statistics->set_total_heap_size_executable(
- i::Heap::CommittedMemoryExecutable());
- heap_statistics->set_used_heap_size(i::Heap::SizeOfObjects());
- heap_statistics->set_heap_size_limit(i::Heap::MaxReserved());
+ heap->CommittedMemoryExecutable());
+ heap_statistics->set_used_heap_size(heap->SizeOfObjects());
+ heap_statistics->set_heap_size_limit(heap->MaxReserved());
}
bool v8::V8::IdleNotification() {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
- if (!i::V8::IsRunning()) return true;
+ if (!i::Isolate::Current()->IsInitialized()) return true;
return i::V8::IdleNotification();
}
void v8::V8::LowMemoryNotification() {
- if (!i::V8::IsRunning()) return;
- i::Heap::CollectAllGarbage(true);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!isolate->IsInitialized()) return;
+ isolate->heap()->CollectAllGarbage(true);
}
int v8::V8::ContextDisposedNotification() {
- if (!i::V8::IsRunning()) return 0;
- return i::Heap::NotifyContextDisposed();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!isolate->IsInitialized()) return 0;
+ return isolate->heap()->NotifyContextDisposed();
}
const char* v8::V8::GetVersion() {
- static v8::internal::EmbeddedVector<char, 128> buffer;
- v8::internal::Version::GetString(buffer);
- return buffer.start();
+ return i::Version::GetVersion();
}
@@ -3398,14 +3920,15 @@ Persistent<Context> v8::Context::New(
v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
- EnsureInitialized("v8::Context::New()");
- LOG_API("Context::New");
- ON_BAILOUT("v8::Context::New()", return Persistent<Context>());
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Context::New()");
+ LOG_API(isolate, "Context::New");
+ ON_BAILOUT(isolate, "v8::Context::New()", return Persistent<Context>());
// Enter V8 via an ENTER_V8 scope.
i::Handle<i::Context> env;
{
- ENTER_V8;
+ ENTER_V8(isolate);
v8::Handle<ObjectTemplate> proxy_template = global_template;
i::Handle<i::FunctionTemplateInfo> proxy_constructor;
i::Handle<i::FunctionTemplateInfo> global_constructor;
@@ -3434,12 +3957,14 @@ Persistent<Context> v8::Context::New(
proxy_constructor->set_needs_access_check(
global_constructor->needs_access_check());
global_constructor->set_needs_access_check(false);
- global_constructor->set_access_check_info(i::Heap::undefined_value());
+ global_constructor->set_access_check_info(
+ isolate->heap()->undefined_value());
}
}
// Create the environment.
- env = i::Bootstrapper::CreateEnvironment(
+ env = isolate->bootstrapper()->CreateEnvironment(
+ isolate,
Utils::OpenHandle(*global_object),
proxy_template,
extensions);
@@ -3453,7 +3978,7 @@ Persistent<Context> v8::Context::New(
global_constructor->set_needs_access_check(
proxy_constructor->needs_access_check());
}
- i::RuntimeProfiler::Reset();
+ isolate->runtime_profiler()->Reset();
}
// Leave V8.
@@ -3464,8 +3989,11 @@ Persistent<Context> v8::Context::New(
void v8::Context::SetSecurityToken(Handle<Value> token) {
- if (IsDeadCheck("v8::Context::SetSecurityToken()")) return;
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::SetSecurityToken()")) {
+ return;
+ }
+ ENTER_V8(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
env->set_security_token(*token_handle);
@@ -3473,15 +4001,22 @@ void v8::Context::SetSecurityToken(Handle<Value> token) {
void v8::Context::UseDefaultSecurityToken() {
- if (IsDeadCheck("v8::Context::UseDefaultSecurityToken()")) return;
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate,
+ "v8::Context::UseDefaultSecurityToken()")) {
+ return;
+ }
+ ENTER_V8(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
env->set_security_token(env->global());
}
Handle<Value> v8::Context::GetSecurityToken() {
- if (IsDeadCheck("v8::Context::GetSecurityToken()")) return Handle<Value>();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::GetSecurityToken()")) {
+ return Handle<Value>();
+ }
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Object* security_token = env->security_token();
i::Handle<i::Object> token_handle(security_token);
@@ -3496,13 +4031,17 @@ bool Context::HasOutOfMemoryException() {
bool Context::InContext() {
- return i::Top::context() != NULL;
+ return i::Isolate::Current()->context() != NULL;
}
v8::Local<v8::Context> Context::GetEntered() {
- if (IsDeadCheck("v8::Context::GetEntered()")) return Local<Context>();
- i::Handle<i::Object> last = thread_local.LastEnteredContext();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::GetEntered()")) {
+ return Local<Context>();
+ }
+ i::Handle<i::Object> last =
+ isolate->handle_scope_implementer()->LastEnteredContext();
if (last.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(last);
return Utils::ToLocal(context);
@@ -3510,8 +4049,11 @@ v8::Local<v8::Context> Context::GetEntered() {
v8::Local<v8::Context> Context::GetCurrent() {
- if (IsDeadCheck("v8::Context::GetCurrent()")) return Local<Context>();
- i::Handle<i::Object> current = i::Top::global_context();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::GetCurrent()")) {
+ return Local<Context>();
+ }
+ i::Handle<i::Object> current = isolate->global_context();
if (current.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
return Utils::ToLocal(context);
@@ -3519,8 +4061,12 @@ v8::Local<v8::Context> Context::GetCurrent() {
v8::Local<v8::Context> Context::GetCalling() {
- if (IsDeadCheck("v8::Context::GetCalling()")) return Local<Context>();
- i::Handle<i::Object> calling = i::Top::GetCallingGlobalContext();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::GetCalling()")) {
+ return Local<Context>();
+ }
+ i::Handle<i::Object> calling =
+ isolate->GetCallingGlobalContext();
if (calling.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
return Utils::ToLocal(context);
@@ -3528,7 +4074,9 @@ v8::Local<v8::Context> Context::GetCalling() {
v8::Local<v8::Object> Context::Global() {
- if (IsDeadCheck("v8::Context::Global()")) return Local<v8::Object>();
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) {
+ return Local<v8::Object>();
+ }
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
@@ -3538,73 +4086,101 @@ v8::Local<v8::Object> Context::Global() {
void Context::DetachGlobal() {
- if (IsDeadCheck("v8::Context::DetachGlobal()")) return;
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::DetachGlobal()")) return;
+ ENTER_V8(isolate);
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Bootstrapper::DetachGlobal(context);
+ isolate->bootstrapper()->DetachGlobal(context);
}
void Context::ReattachGlobal(Handle<Object> global_object) {
- if (IsDeadCheck("v8::Context::ReattachGlobal()")) return;
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::ReattachGlobal()")) return;
+ ENTER_V8(isolate);
+ i::Object** ctx = reinterpret_cast<i::Object**>(this);
+ i::Handle<i::Context> context =
+ i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ isolate->bootstrapper()->ReattachGlobal(
+ context,
+ Utils::OpenHandle(*global_object));
+}
+
+
+void Context::AllowCodeGenerationFromStrings(bool allow) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::AllowCodeGenerationFromStrings()")) {
+ return;
+ }
+ ENTER_V8(isolate);
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Bootstrapper::ReattachGlobal(context, Utils::OpenHandle(*global_object));
+ context->set_allow_code_gen_from_strings(
+ allow ? isolate->heap()->true_value() : isolate->heap()->false_value());
+}
+
+
+void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) {
+ i::GlobalHandles::SetWrapperClassId(global_handle, class_id);
}
Local<v8::Object> ObjectTemplate::NewInstance() {
- ON_BAILOUT("v8::ObjectTemplate::NewInstance()", return Local<v8::Object>());
- LOG_API("ObjectTemplate::NewInstance");
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()",
+ return Local<v8::Object>());
+ LOG_API(isolate, "ObjectTemplate::NewInstance");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj =
i::Execution::InstantiateObject(Utils::OpenHandle(this),
&has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<v8::Object>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
return Utils::ToLocal(i::Handle<i::JSObject>::cast(obj));
}
Local<v8::Function> FunctionTemplate::GetFunction() {
- ON_BAILOUT("v8::FunctionTemplate::GetFunction()",
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::FunctionTemplate::GetFunction()",
return Local<v8::Function>());
- LOG_API("FunctionTemplate::GetFunction");
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ LOG_API(isolate, "FunctionTemplate::GetFunction");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj =
i::Execution::InstantiateFunction(Utils::OpenHandle(this),
&has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<v8::Function>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Function>());
return Utils::ToLocal(i::Handle<i::JSFunction>::cast(obj));
}
bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
- ON_BAILOUT("v8::FunctionTemplate::HasInstanceOf()", return false);
+ ON_BAILOUT(i::Isolate::Current(), "v8::FunctionTemplate::HasInstanceOf()",
+ return false);
i::Object* obj = *Utils::OpenHandle(*value);
return obj->IsInstanceOf(*Utils::OpenHandle(this));
}
static Local<External> ExternalNewImpl(void* data) {
- return Utils::ToLocal(i::Factory::NewProxy(static_cast<i::Address>(data)));
+ return Utils::ToLocal(FACTORY->NewForeign(static_cast<i::Address>(data)));
}
static void* ExternalValueImpl(i::Handle<i::Object> obj) {
- return reinterpret_cast<void*>(i::Proxy::cast(*obj)->proxy());
+ return reinterpret_cast<void*>(i::Foreign::cast(*obj)->address());
}
Local<Value> v8::External::Wrap(void* data) {
+ i::Isolate* isolate = i::Isolate::Current();
STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
- LOG_API("External::Wrap");
- EnsureInitialized("v8::External::Wrap()");
- ENTER_V8;
+ LOG_API(isolate, "External::Wrap");
+ EnsureInitializedForIsolate(isolate, "v8::External::Wrap()");
+ ENTER_V8(isolate);
v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data)))
@@ -3620,8 +4196,8 @@ void* v8::Object::SlowGetPointerFromInternalField(int index) {
i::Object* value = obj->GetInternalField(index);
if (value->IsSmi()) {
return i::Internals::GetExternalPointerFromSmi(value);
- } else if (value->IsProxy()) {
- return reinterpret_cast<void*>(i::Proxy::cast(value)->proxy());
+ } else if (value->IsForeign()) {
+ return reinterpret_cast<void*>(i::Foreign::cast(value)->address());
} else {
return NULL;
}
@@ -3629,12 +4205,12 @@ void* v8::Object::SlowGetPointerFromInternalField(int index) {
void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
- if (IsDeadCheck("v8::External::Unwrap()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::External::Unwrap()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
void* result;
if (obj->IsSmi()) {
result = i::Internals::GetExternalPointerFromSmi(*obj);
- } else if (obj->IsProxy()) {
+ } else if (obj->IsForeign()) {
result = ExternalValueImpl(obj);
} else {
result = NULL;
@@ -3646,58 +4222,65 @@ void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
Local<External> v8::External::New(void* data) {
STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
- LOG_API("External::New");
- EnsureInitialized("v8::External::New()");
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "External::New");
+ EnsureInitializedForIsolate(isolate, "v8::External::New()");
+ ENTER_V8(isolate);
return ExternalNewImpl(data);
}
void* External::Value() const {
- if (IsDeadCheck("v8::External::Value()")) return 0;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return ExternalValueImpl(obj);
}
Local<String> v8::String::Empty() {
- EnsureInitialized("v8::String::Empty()");
- LOG_API("String::Empty()");
- return Utils::ToLocal(i::Factory::empty_symbol());
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::Empty()");
+ LOG_API(isolate, "String::Empty()");
+ return Utils::ToLocal(isolate->factory()->empty_symbol());
}
Local<String> v8::String::New(const char* data, int length) {
- EnsureInitialized("v8::String::New()");
- LOG_API("String::New(char)");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::New()");
+ LOG_API(isolate, "String::New(char)");
if (length == 0) return Empty();
- ENTER_V8;
+ ENTER_V8(isolate);
if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
- i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
+ isolate->factory()->NewStringFromUtf8(
+ i::Vector<const char>(data, length));
return Utils::ToLocal(result);
}
Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
- EnsureInitialized("v8::String::New()");
- LOG_API("String::New(char)");
- ENTER_V8;
i::Handle<i::String> left_string = Utils::OpenHandle(*left);
+ i::Isolate* isolate = left_string->GetIsolate();
+ EnsureInitializedForIsolate(isolate, "v8::String::New()");
+ LOG_API(isolate, "String::New(char)");
+ ENTER_V8(isolate);
i::Handle<i::String> right_string = Utils::OpenHandle(*right);
- i::Handle<i::String> result = i::Factory::NewConsString(left_string,
- right_string);
+ i::Handle<i::String> result = isolate->factory()->NewConsString(left_string,
+ right_string);
return Utils::ToLocal(result);
}
Local<String> v8::String::NewUndetectable(const char* data, int length) {
- EnsureInitialized("v8::String::NewUndetectable()");
- LOG_API("String::NewUndetectable(char)");
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()");
+ LOG_API(isolate, "String::NewUndetectable(char)");
+ ENTER_V8(isolate);
if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
- i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
+ isolate->factory()->NewStringFromUtf8(
+ i::Vector<const char>(data, length));
result->MarkAsUndetectable();
return Utils::ToLocal(result);
}
@@ -3711,65 +4294,78 @@ static int TwoByteStringLength(const uint16_t* data) {
Local<String> v8::String::New(const uint16_t* data, int length) {
- EnsureInitialized("v8::String::New()");
- LOG_API("String::New(uint16_)");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::New()");
+ LOG_API(isolate, "String::New(uint16_)");
if (length == 0) return Empty();
- ENTER_V8;
+ ENTER_V8(isolate);
if (length == -1) length = TwoByteStringLength(data);
i::Handle<i::String> result =
- i::Factory::NewStringFromTwoByte(i::Vector<const uint16_t>(data, length));
+ isolate->factory()->NewStringFromTwoByte(
+ i::Vector<const uint16_t>(data, length));
return Utils::ToLocal(result);
}
Local<String> v8::String::NewUndetectable(const uint16_t* data, int length) {
- EnsureInitialized("v8::String::NewUndetectable()");
- LOG_API("String::NewUndetectable(uint16_)");
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()");
+ LOG_API(isolate, "String::NewUndetectable(uint16_)");
+ ENTER_V8(isolate);
if (length == -1) length = TwoByteStringLength(data);
i::Handle<i::String> result =
- i::Factory::NewStringFromTwoByte(i::Vector<const uint16_t>(data, length));
+ isolate->factory()->NewStringFromTwoByte(
+ i::Vector<const uint16_t>(data, length));
result->MarkAsUndetectable();
return Utils::ToLocal(result);
}
-i::Handle<i::String> NewExternalStringHandle(
+i::Handle<i::String> NewExternalStringHandle(i::Isolate* isolate,
v8::String::ExternalStringResource* resource) {
i::Handle<i::String> result =
- i::Factory::NewExternalStringFromTwoByte(resource);
+ isolate->factory()->NewExternalStringFromTwoByte(resource);
return result;
}
-i::Handle<i::String> NewExternalAsciiStringHandle(
+i::Handle<i::String> NewExternalAsciiStringHandle(i::Isolate* isolate,
v8::String::ExternalAsciiStringResource* resource) {
i::Handle<i::String> result =
- i::Factory::NewExternalStringFromAscii(resource);
+ isolate->factory()->NewExternalStringFromAscii(resource);
return result;
}
Local<String> v8::String::NewExternal(
v8::String::ExternalStringResource* resource) {
- EnsureInitialized("v8::String::NewExternal()");
- LOG_API("String::NewExternal");
- ENTER_V8;
- i::Handle<i::String> result = NewExternalStringHandle(resource);
- i::ExternalStringTable::AddString(*result);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
+ LOG_API(isolate, "String::NewExternal");
+ ENTER_V8(isolate);
+ i::Handle<i::String> result = NewExternalStringHandle(isolate, resource);
+ isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
}
bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
- if (IsDeadCheck("v8::String::MakeExternal()")) return false;
- if (this->IsExternal()) return false; // Already an external string.
- ENTER_V8;
i::Handle<i::String> obj = Utils::OpenHandle(this);
- if (StringTracker::IsFreshUnusedString(obj)) return false;
+ i::Isolate* isolate = obj->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
+ if (i::StringShape(*obj).IsExternalTwoByte()) {
+ return false; // Already an external string.
+ }
+ ENTER_V8(isolate);
+ if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
+ return false;
+ }
+ if (isolate->heap()->IsInGCPostProcessing()) {
+ return false;
+ }
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
- i::ExternalStringTable::AddString(*obj);
+ isolate->heap()->external_string_table()->AddString(*obj);
}
return result;
}
@@ -3777,34 +4373,46 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
Local<String> v8::String::NewExternal(
v8::String::ExternalAsciiStringResource* resource) {
- EnsureInitialized("v8::String::NewExternal()");
- LOG_API("String::NewExternal");
- ENTER_V8;
- i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
- i::ExternalStringTable::AddString(*result);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
+ LOG_API(isolate, "String::NewExternal");
+ ENTER_V8(isolate);
+ i::Handle<i::String> result = NewExternalAsciiStringHandle(isolate, resource);
+ isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
}
bool v8::String::MakeExternal(
v8::String::ExternalAsciiStringResource* resource) {
- if (IsDeadCheck("v8::String::MakeExternal()")) return false;
- if (this->IsExternal()) return false; // Already an external string.
- ENTER_V8;
i::Handle<i::String> obj = Utils::OpenHandle(this);
- if (StringTracker::IsFreshUnusedString(obj)) return false;
+ i::Isolate* isolate = obj->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
+ if (i::StringShape(*obj).IsExternalTwoByte()) {
+ return false; // Already an external string.
+ }
+ ENTER_V8(isolate);
+ if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
+ return false;
+ }
+ if (isolate->heap()->IsInGCPostProcessing()) {
+ return false;
+ }
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
- i::ExternalStringTable::AddString(*obj);
+ isolate->heap()->external_string_table()->AddString(*obj);
}
return result;
}
bool v8::String::CanMakeExternal() {
- if (IsDeadCheck("v8::String::CanMakeExternal()")) return false;
i::Handle<i::String> obj = Utils::OpenHandle(this);
- if (StringTracker::IsFreshUnusedString(obj)) return false;
+ i::Isolate* isolate = obj->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
+ if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
+ return false;
+ }
int size = obj->Size(); // Byte size of the original string.
if (size < i::ExternalString::kSize)
return false;
@@ -3814,34 +4422,37 @@ bool v8::String::CanMakeExternal() {
Local<v8::Object> v8::Object::New() {
- EnsureInitialized("v8::Object::New()");
- LOG_API("Object::New");
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Object::New()");
+ LOG_API(isolate, "Object::New");
+ ENTER_V8(isolate);
i::Handle<i::JSObject> obj =
- i::Factory::NewJSObject(i::Top::object_function());
+ isolate->factory()->NewJSObject(isolate->object_function());
return Utils::ToLocal(obj);
}
Local<v8::Value> v8::Date::New(double time) {
- EnsureInitialized("v8::Date::New()");
- LOG_API("Date::New");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Date::New()");
+ LOG_API(isolate, "Date::New");
if (isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
time = i::OS::nan_value();
}
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj =
i::Execution::NewDate(time, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<v8::Value>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Value>());
return Utils::ToLocal(obj);
}
double v8::Date::NumberValue() const {
- if (IsDeadCheck("v8::Date::NumberValue()")) return 0;
- LOG_API("Date::NumberValue");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Date::NumberValue()")) return 0;
+ LOG_API(isolate, "Date::NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
return jsvalue->value()->Number();
@@ -3849,16 +4460,18 @@ double v8::Date::NumberValue() const {
void v8::Date::DateTimeConfigurationChangeNotification() {
- ON_BAILOUT("v8::Date::DateTimeConfigurationChangeNotification()", return);
- LOG_API("Date::DateTimeConfigurationChangeNotification");
- ENTER_V8;
-
- HandleScope scope;
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Date::DateTimeConfigurationChangeNotification()",
+ return);
+ LOG_API(isolate, "Date::DateTimeConfigurationChangeNotification");
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
// Get the function ResetDateCache (defined in date-delay.js).
i::Handle<i::String> func_name_str =
- i::Factory::LookupAsciiSymbol("ResetDateCache");
- i::MaybeObject* result = i::Top::builtins()->GetProperty(*func_name_str);
+ isolate->factory()->LookupAsciiSymbol("ResetDateCache");
+ i::MaybeObject* result =
+ isolate->js_builtins_object()->GetProperty(*func_name_str);
i::Object* object_func;
if (!result->ToObject(&object_func)) {
return;
@@ -3870,9 +4483,11 @@ void v8::Date::DateTimeConfigurationChangeNotification() {
// Call ResetDateCache(0 but expect no exceptions:
bool caught_exception = false;
- i::Handle<i::Object> result =
- i::Execution::TryCall(func, i::Top::builtins(), 0, NULL,
- &caught_exception);
+ i::Execution::TryCall(func,
+ isolate->js_builtins_object(),
+ 0,
+ NULL,
+ &caught_exception);
}
}
@@ -3884,28 +4499,32 @@ static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
- return i::Factory::LookupSymbol(
+ return FACTORY->LookupSymbol(
i::Vector<const char>(flags_buf, num_flags));
}
Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern,
Flags flags) {
- EnsureInitialized("v8::RegExp::New()");
- LOG_API("RegExp::New");
- ENTER_V8;
- EXCEPTION_PREAMBLE();
+ i::Isolate* isolate = Utils::OpenHandle(*pattern)->GetIsolate();
+ EnsureInitializedForIsolate(isolate, "v8::RegExp::New()");
+ LOG_API(isolate, "RegExp::New");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
i::Handle<i::JSRegExp> obj = i::Execution::NewJSRegExp(
Utils::OpenHandle(*pattern),
RegExpFlagsToString(flags),
&has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(Local<v8::RegExp>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::RegExp>());
return Utils::ToLocal(i::Handle<i::JSRegExp>::cast(obj));
}
Local<v8::String> v8::RegExp::GetSource() const {
- if (IsDeadCheck("v8::RegExp::GetSource()")) return Local<v8::String>();
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::RegExp::GetSource()")) {
+ return Local<v8::String>();
+ }
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
return Utils::ToLocal(i::Handle<i::String>(obj->Pattern()));
}
@@ -3922,23 +4541,31 @@ REGEXP_FLAG_ASSERT_EQ(kMultiline, MULTILINE);
#undef REGEXP_FLAG_ASSERT_EQ
v8::RegExp::Flags v8::RegExp::GetFlags() const {
- if (IsDeadCheck("v8::RegExp::GetFlags()")) return v8::RegExp::kNone;
+ if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::GetFlags()")) {
+ return v8::RegExp::kNone;
+ }
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
return static_cast<RegExp::Flags>(obj->GetFlags().value());
}
Local<v8::Array> v8::Array::New(int length) {
- EnsureInitialized("v8::Array::New()");
- LOG_API("Array::New");
- ENTER_V8;
- i::Handle<i::JSArray> obj = i::Factory::NewJSArray(length);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Array::New()");
+ LOG_API(isolate, "Array::New");
+ ENTER_V8(isolate);
+ int real_length = length > 0 ? length : 0;
+ i::Handle<i::JSArray> obj = isolate->factory()->NewJSArray(real_length);
+ i::Handle<i::Object> length_obj =
+ isolate->factory()->NewNumberFromInt(real_length);
+ obj->set_length(*length_obj);
return Utils::ToLocal(obj);
}
uint32_t v8::Array::Length() const {
- if (IsDeadCheck("v8::Array::Length()")) return 0;
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Array::Length()")) return 0;
i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
i::Object* length = obj->length();
if (length->IsSmi()) {
@@ -3950,7 +4577,8 @@ uint32_t v8::Array::Length() const {
Local<Object> Array::CloneElementAt(uint32_t index) {
- ON_BAILOUT("v8::Array::CloneElementAt()", return Local<Object>());
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (!self->HasFastElements()) {
return Local<Object>();
@@ -3961,45 +4589,49 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
return Local<Object>();
}
i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
- EXCEPTION_PREAMBLE();
- ENTER_V8;
+ EXCEPTION_PREAMBLE(isolate);
+ ENTER_V8(isolate);
i::Handle<i::JSObject> result = i::Copy(paragon_handle);
has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(Local<Object>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(result);
}
Local<String> v8::String::NewSymbol(const char* data, int length) {
- EnsureInitialized("v8::String::NewSymbol()");
- LOG_API("String::NewSymbol(char)");
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::String::NewSymbol()");
+ LOG_API(isolate, "String::NewSymbol(char)");
+ ENTER_V8(isolate);
if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
- i::Factory::LookupSymbol(i::Vector<const char>(data, length));
+ isolate->factory()->LookupSymbol(i::Vector<const char>(data, length));
return Utils::ToLocal(result);
}
Local<Number> v8::Number::New(double value) {
- EnsureInitialized("v8::Number::New()");
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Number::New()");
if (isnan(value)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
value = i::OS::nan_value();
}
- ENTER_V8;
- i::Handle<i::Object> result = i::Factory::NewNumber(value);
+ ENTER_V8(isolate);
+ i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
return Utils::NumberToLocal(result);
}
Local<Integer> v8::Integer::New(int32_t value) {
- EnsureInitialized("v8::Integer::New()");
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
if (i::Smi::IsValid(value)) {
- return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value)));
+ return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
+ isolate));
}
- ENTER_V8;
- i::Handle<i::Object> result = i::Factory::NewNumber(value);
+ ENTER_V8(isolate);
+ i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
return Utils::IntegerToLocal(result);
}
@@ -4009,27 +4641,30 @@ Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
if (fits_into_int32_t) {
return Integer::New(static_cast<int32_t>(value));
}
- ENTER_V8;
- i::Handle<i::Object> result = i::Factory::NewNumber(value);
+ i::Isolate* isolate = i::Isolate::Current();
+ ENTER_V8(isolate);
+ i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
return Utils::IntegerToLocal(result);
}
void V8::IgnoreOutOfMemoryException() {
- thread_local.set_ignore_out_of_memory(true);
+ EnterIsolateIfNeeded()->handle_scope_implementer()->set_ignore_out_of_memory(
+ true);
}
bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
- EnsureInitialized("v8::V8::AddMessageListener()");
- ON_BAILOUT("v8::V8::AddMessageListener()", return false);
- ENTER_V8;
- HandleScope scope;
- NeanderArray listeners(i::Factory::message_listeners());
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
+ ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ NeanderArray listeners(isolate->factory()->message_listeners());
NeanderObject obj(2);
- obj.set(0, *i::Factory::NewProxy(FUNCTION_ADDR(that)));
+ obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
obj.set(1, data.IsEmpty() ?
- i::Heap::undefined_value() :
+ isolate->heap()->undefined_value() :
*Utils::OpenHandle(*data));
listeners.add(obj.value());
return true;
@@ -4037,18 +4672,19 @@ bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
void V8::RemoveMessageListeners(MessageCallback that) {
- EnsureInitialized("v8::V8::RemoveMessageListener()");
- ON_BAILOUT("v8::V8::RemoveMessageListeners()", return);
- ENTER_V8;
- HandleScope scope;
- NeanderArray listeners(i::Factory::message_listeners());
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::V8::RemoveMessageListener()");
+ ON_BAILOUT(isolate, "v8::V8::RemoveMessageListeners()", return);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ NeanderArray listeners(isolate->factory()->message_listeners());
for (int i = 0; i < listeners.length(); i++) {
if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
NeanderObject listener(i::JSObject::cast(listeners.get(i)));
- i::Handle<i::Proxy> callback_obj(i::Proxy::cast(listener.get(0)));
- if (callback_obj->proxy() == FUNCTION_ADDR(that)) {
- listeners.set(i, i::Heap::undefined_value());
+ i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
+ if (callback_obj->address() == FUNCTION_ADDR(that)) {
+ listeners.set(i, isolate->heap()->undefined_value());
}
}
}
@@ -4058,7 +4694,7 @@ void V8::SetCaptureStackTraceForUncaughtExceptions(
bool capture,
int frame_limit,
StackTrace::StackTraceOptions options) {
- i::Top::SetCaptureStackTraceForUncaughtExceptions(
+ i::Isolate::Current()->SetCaptureStackTraceForUncaughtExceptions(
capture,
frame_limit,
options);
@@ -4066,95 +4702,130 @@ void V8::SetCaptureStackTraceForUncaughtExceptions(
void V8::SetCounterFunction(CounterLookupCallback callback) {
- if (IsDeadCheck("v8::V8::SetCounterFunction()")) return;
- i::StatsTable::SetCounterFunction(callback);
+ i::Isolate* isolate = EnterIsolateIfNeeded();
+ if (IsDeadCheck(isolate, "v8::V8::SetCounterFunction()")) return;
+ isolate->stats_table()->SetCounterFunction(callback);
}
void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
- if (IsDeadCheck("v8::V8::SetCreateHistogramFunction()")) return;
- i::StatsTable::SetCreateHistogramFunction(callback);
+ i::Isolate* isolate = EnterIsolateIfNeeded();
+ if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return;
+ isolate->stats_table()->SetCreateHistogramFunction(callback);
}
void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
- if (IsDeadCheck("v8::V8::SetAddHistogramSampleFunction()")) return;
- i::StatsTable::SetAddHistogramSampleFunction(callback);
+ i::Isolate* isolate = EnterIsolateIfNeeded();
+ if (IsDeadCheck(isolate, "v8::V8::SetAddHistogramSampleFunction()")) return;
+ isolate->stats_table()->
+ SetAddHistogramSampleFunction(callback);
}
void V8::EnableSlidingStateWindow() {
- if (IsDeadCheck("v8::V8::EnableSlidingStateWindow()")) return;
- i::Logger::EnableSlidingStateWindow();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::EnableSlidingStateWindow()")) return;
+ isolate->logger()->EnableSlidingStateWindow();
}
void V8::SetFailedAccessCheckCallbackFunction(
FailedAccessCheckCallback callback) {
- if (IsDeadCheck("v8::V8::SetFailedAccessCheckCallbackFunction()")) return;
- i::Top::SetFailedAccessCheckCallback(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::SetFailedAccessCheckCallbackFunction()")) {
+ return;
+ }
+ isolate->SetFailedAccessCheckCallback(callback);
+}
+
+void V8::AddObjectGroup(Persistent<Value>* objects,
+ size_t length,
+ RetainedObjectInfo* info) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
+ STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
+ isolate->global_handles()->AddObjectGroup(
+ reinterpret_cast<i::Object***>(objects), length, info);
}
-void V8::AddObjectGroup(Persistent<Value>* objects, size_t length) {
- if (IsDeadCheck("v8::V8::AddObjectGroup()")) return;
+void V8::AddImplicitReferences(Persistent<Object> parent,
+ Persistent<Value>* children,
+ size_t length) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AddImplicitReferences()")) return;
STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- i::GlobalHandles::AddGroup(reinterpret_cast<i::Object***>(objects), length);
+ isolate->global_handles()->AddImplicitReferences(
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*parent)).location(),
+ reinterpret_cast<i::Object***>(children), length);
}
int V8::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
- if (IsDeadCheck("v8::V8::AdjustAmountOfExternalAllocatedMemory()")) return 0;
- return i::Heap::AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
+ return 0;
+ }
+ return isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
+ change_in_bytes);
}
void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
- if (IsDeadCheck("v8::V8::SetGlobalGCPrologueCallback()")) return;
- i::Heap::SetGlobalGCPrologueCallback(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return;
+ isolate->heap()->SetGlobalGCPrologueCallback(callback);
}
void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
- if (IsDeadCheck("v8::V8::SetGlobalGCEpilogueCallback()")) return;
- i::Heap::SetGlobalGCEpilogueCallback(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCEpilogueCallback()")) return;
+ isolate->heap()->SetGlobalGCEpilogueCallback(callback);
}
void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
- if (IsDeadCheck("v8::V8::AddGCPrologueCallback()")) return;
- i::Heap::AddGCPrologueCallback(callback, gc_type);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AddGCPrologueCallback()")) return;
+ isolate->heap()->AddGCPrologueCallback(callback, gc_type);
}
void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
- if (IsDeadCheck("v8::V8::RemoveGCPrologueCallback()")) return;
- i::Heap::RemoveGCPrologueCallback(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::RemoveGCPrologueCallback()")) return;
+ isolate->heap()->RemoveGCPrologueCallback(callback);
}
void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
- if (IsDeadCheck("v8::V8::AddGCEpilogueCallback()")) return;
- i::Heap::AddGCEpilogueCallback(callback, gc_type);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AddGCEpilogueCallback()")) return;
+ isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
}
void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
- if (IsDeadCheck("v8::V8::RemoveGCEpilogueCallback()")) return;
- i::Heap::RemoveGCEpilogueCallback(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::RemoveGCEpilogueCallback()")) return;
+ isolate->heap()->RemoveGCEpilogueCallback(callback);
}
void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
AllocationAction action) {
- if (IsDeadCheck("v8::V8::AddMemoryAllocationCallback()")) return;
- i::MemoryAllocator::AddMemoryAllocationCallback(callback,
- space,
- action);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AddMemoryAllocationCallback()")) return;
+ isolate->memory_allocator()->AddMemoryAllocationCallback(
+ callback, space, action);
}
void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
- if (IsDeadCheck("v8::V8::RemoveMemoryAllocationCallback()")) return;
- i::MemoryAllocator::RemoveMemoryAllocationCallback(callback);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::RemoveMemoryAllocationCallback()")) return;
+ isolate->memory_allocator()->RemoveMemoryAllocationCallback(
+ callback);
}
@@ -4174,7 +4845,7 @@ void V8::ResumeProfiler() {
bool V8::IsProfilerPaused() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- return i::Logger::GetActiveProfilerModules() & PROFILER_MODULE_CPU;
+ return LOGGER->GetActiveProfilerModules() & PROFILER_MODULE_CPU;
#else
return true;
#endif
@@ -4183,36 +4854,22 @@ bool V8::IsProfilerPaused() {
void V8::ResumeProfilerEx(int flags, int tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (flags & PROFILER_MODULE_HEAP_SNAPSHOT) {
- // Snapshot mode: resume modules, perform GC, then pause only
- // those modules which haven't been started prior to making a
- // snapshot.
-
- // Make a GC prior to taking a snapshot.
- i::Heap::CollectAllGarbage(false);
- // Reset snapshot flag and CPU module flags.
- flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
- const int current_flags = i::Logger::GetActiveProfilerModules();
- i::Logger::ResumeProfiler(flags, tag);
- i::Heap::CollectAllGarbage(false);
- i::Logger::PauseProfiler(~current_flags & flags, tag);
- } else {
- i::Logger::ResumeProfiler(flags, tag);
- }
+ i::Isolate* isolate = i::Isolate::Current();
+ isolate->logger()->ResumeProfiler(flags, tag);
#endif
}
void V8::PauseProfilerEx(int flags, int tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Logger::PauseProfiler(flags, tag);
+ LOGGER->PauseProfiler(flags, tag);
#endif
}
int V8::GetActiveProfilerModules() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- return i::Logger::GetActiveProfilerModules();
+ return LOGGER->GetActiveProfilerModules();
#else
return PROFILER_MODULE_NONE;
#endif
@@ -4222,67 +4879,111 @@ int V8::GetActiveProfilerModules() {
int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(max_size >= kMinimumSizeForLogLinesBuffer);
- return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
+ return LOGGER->GetLogLines(from_pos, dest_buf, max_size);
#endif
return 0;
}
int V8::GetCurrentThreadId() {
- API_ENTRY_CHECK("V8::GetCurrentThreadId()");
- EnsureInitialized("V8::GetCurrentThreadId()");
- return i::Top::thread_id();
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "V8::GetCurrentThreadId()");
+ return isolate->thread_id().ToInteger();
}
void V8::TerminateExecution(int thread_id) {
- if (!i::V8::IsRunning()) return;
- API_ENTRY_CHECK("V8::GetCurrentThreadId()");
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!isolate->IsInitialized()) return;
+ API_ENTRY_CHECK(isolate, "V8::TerminateExecution()");
// If the thread_id identifies the current thread just terminate
// execution right away. Otherwise, ask the thread manager to
// terminate the thread with the given id if any.
- if (thread_id == i::Top::thread_id()) {
- i::StackGuard::TerminateExecution();
+ i::ThreadId internal_tid = i::ThreadId::FromInteger(thread_id);
+ if (isolate->thread_id().Equals(internal_tid)) {
+ isolate->stack_guard()->TerminateExecution();
} else {
- i::ThreadManager::TerminateExecution(thread_id);
+ isolate->thread_manager()->TerminateExecution(internal_tid);
}
}
-void V8::TerminateExecution() {
- if (!i::V8::IsRunning()) return;
- i::StackGuard::TerminateExecution();
+void V8::TerminateExecution(Isolate* isolate) {
+ // If no isolate is supplied, use the default isolate.
+ if (isolate != NULL) {
+ reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->TerminateExecution();
+ } else {
+ i::Isolate::GetDefaultIsolateStackGuard()->TerminateExecution();
+ }
}
-bool V8::IsExecutionTerminating() {
- if (!i::V8::IsRunning()) return false;
- if (i::Top::has_scheduled_exception()) {
- return i::Top::scheduled_exception() == i::Heap::termination_exception();
- }
- return false;
+bool V8::IsExecutionTerminating(Isolate* isolate) {
+ i::Isolate* i_isolate = isolate != NULL ?
+ reinterpret_cast<i::Isolate*>(isolate) : i::Isolate::Current();
+ return IsExecutionTerminatingCheck(i_isolate);
}
-String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) {
- EnsureInitialized("v8::String::Utf8Value::Utf8Value()");
- if (obj.IsEmpty()) {
- str_ = NULL;
- length_ = 0;
+Isolate* Isolate::GetCurrent() {
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ return reinterpret_cast<Isolate*>(isolate);
+}
+
+
+Isolate* Isolate::New() {
+ i::Isolate* isolate = new i::Isolate();
+ return reinterpret_cast<Isolate*>(isolate);
+}
+
+
+void Isolate::Dispose() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ if (!ApiCheck(!isolate->IsInUse(),
+ "v8::Isolate::Dispose()",
+ "Disposing the isolate that is entered by a thread.")) {
return;
}
- ENTER_V8;
- HandleScope scope;
+ isolate->TearDown();
+}
+
+
+void Isolate::Enter() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->Enter();
+}
+
+
+void Isolate::Exit() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->Exit();
+}
+
+
+void Isolate::SetData(void* data) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetData(data);
+}
+
+void* Isolate::GetData() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->GetData();
+}
+
+
+String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
+ : str_(NULL), length_(0) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::String::Utf8Value::Utf8Value()")) return;
+ if (obj.IsEmpty()) return;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
TryCatch try_catch;
Handle<String> str = obj->ToString();
- if (str.IsEmpty()) {
- str_ = NULL;
- length_ = 0;
- } else {
- length_ = str->Utf8Length();
- str_ = i::NewArray<char>(length_ + 1);
- str->WriteUtf8(str_);
- }
+ if (str.IsEmpty()) return;
+ length_ = str->Utf8Length();
+ str_ = i::NewArray<char>(length_ + 1);
+ str->WriteUtf8(str_);
}
@@ -4291,25 +4992,19 @@ String::Utf8Value::~Utf8Value() {
}
-String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj) {
- EnsureInitialized("v8::String::AsciiValue::AsciiValue()");
- if (obj.IsEmpty()) {
- str_ = NULL;
- length_ = 0;
- return;
- }
- ENTER_V8;
- HandleScope scope;
+String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj)
+ : str_(NULL), length_(0) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::String::AsciiValue::AsciiValue()")) return;
+ if (obj.IsEmpty()) return;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
TryCatch try_catch;
Handle<String> str = obj->ToString();
- if (str.IsEmpty()) {
- str_ = NULL;
- length_ = 0;
- } else {
- length_ = str->Length();
- str_ = i::NewArray<char>(length_ + 1);
- str->WriteAscii(str_);
- }
+ if (str.IsEmpty()) return;
+ length_ = str->Length();
+ str_ = i::NewArray<char>(length_ + 1);
+ str->WriteAscii(str_);
}
@@ -4318,25 +5013,19 @@ String::AsciiValue::~AsciiValue() {
}
-String::Value::Value(v8::Handle<v8::Value> obj) {
- EnsureInitialized("v8::String::Value::Value()");
- if (obj.IsEmpty()) {
- str_ = NULL;
- length_ = 0;
- return;
- }
- ENTER_V8;
- HandleScope scope;
+String::Value::Value(v8::Handle<v8::Value> obj)
+ : str_(NULL), length_(0) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::String::Value::Value()")) return;
+ if (obj.IsEmpty()) return;
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
TryCatch try_catch;
Handle<String> str = obj->ToString();
- if (str.IsEmpty()) {
- str_ = NULL;
- length_ = 0;
- } else {
- length_ = str->Length();
- str_ = i::NewArray<uint16_t>(length_ + 1);
- str->Write(str_);
- }
+ if (str.IsEmpty()) return;
+ length_ = str->Length();
+ str_ = i::NewArray<uint16_t>(length_ + 1);
+ str->Write(str_);
}
@@ -4345,14 +5034,15 @@ String::Value::~Value() {
}
Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
- LOG_API("RangeError");
- ON_BAILOUT("v8::Exception::RangeError()", return Local<Value>());
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "RangeError");
+ ON_BAILOUT(isolate, "v8::Exception::RangeError()", return Local<Value>());
+ ENTER_V8(isolate);
i::Object* error;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = i::Factory::NewRangeError(message);
+ i::Handle<i::Object> result = isolate->factory()->NewRangeError(message);
error = *result;
}
i::Handle<i::Object> result(error);
@@ -4360,14 +5050,16 @@ Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
}
Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
- LOG_API("ReferenceError");
- ON_BAILOUT("v8::Exception::ReferenceError()", return Local<Value>());
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "ReferenceError");
+ ON_BAILOUT(isolate, "v8::Exception::ReferenceError()", return Local<Value>());
+ ENTER_V8(isolate);
i::Object* error;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = i::Factory::NewReferenceError(message);
+ i::Handle<i::Object> result =
+ isolate->factory()->NewReferenceError(message);
error = *result;
}
i::Handle<i::Object> result(error);
@@ -4375,14 +5067,15 @@ Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
}
Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
- LOG_API("SyntaxError");
- ON_BAILOUT("v8::Exception::SyntaxError()", return Local<Value>());
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "SyntaxError");
+ ON_BAILOUT(isolate, "v8::Exception::SyntaxError()", return Local<Value>());
+ ENTER_V8(isolate);
i::Object* error;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = i::Factory::NewSyntaxError(message);
+ i::Handle<i::Object> result = isolate->factory()->NewSyntaxError(message);
error = *result;
}
i::Handle<i::Object> result(error);
@@ -4390,14 +5083,15 @@ Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
}
Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
- LOG_API("TypeError");
- ON_BAILOUT("v8::Exception::TypeError()", return Local<Value>());
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "TypeError");
+ ON_BAILOUT(isolate, "v8::Exception::TypeError()", return Local<Value>());
+ ENTER_V8(isolate);
i::Object* error;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = i::Factory::NewTypeError(message);
+ i::Handle<i::Object> result = isolate->factory()->NewTypeError(message);
error = *result;
}
i::Handle<i::Object> result(error);
@@ -4405,14 +5099,15 @@ Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
}
Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
- LOG_API("Error");
- ON_BAILOUT("v8::Exception::Error()", return Local<Value>());
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "Error");
+ ON_BAILOUT(isolate, "v8::Exception::Error()", return Local<Value>());
+ ENTER_V8(isolate);
i::Object* error;
{
- HandleScope scope;
+ i::HandleScope scope(isolate);
i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
- i::Handle<i::Object> result = i::Factory::NewError(message);
+ i::Handle<i::Object> result = isolate->factory()->NewError(message);
error = *result;
}
i::Handle<i::Object> result(error);
@@ -4424,180 +5119,216 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
#ifdef ENABLE_DEBUGGER_SUPPORT
-static v8::Debug::EventCallback event_callback = NULL;
-
static void EventCallbackWrapper(const v8::Debug::EventDetails& event_details) {
- if (event_callback) {
- event_callback(event_details.GetEvent(),
- event_details.GetExecutionState(),
- event_details.GetEventData(),
- event_details.GetCallbackData());
+ i::Isolate* isolate = i::Isolate::Current();
+ if (isolate->debug_event_callback() != NULL) {
+ isolate->debug_event_callback()(event_details.GetEvent(),
+ event_details.GetExecutionState(),
+ event_details.GetEventData(),
+ event_details.GetCallbackData());
}
}
bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
- EnsureInitialized("v8::Debug::SetDebugEventListener()");
- ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false);
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener()");
+ ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
+ ENTER_V8(isolate);
- event_callback = that;
+ isolate->set_debug_event_callback(that);
- HandleScope scope;
- i::Handle<i::Object> proxy = i::Factory::undefined_value();
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
if (that != NULL) {
- proxy = i::Factory::NewProxy(FUNCTION_ADDR(EventCallbackWrapper));
+ foreign =
+ isolate->factory()->NewForeign(FUNCTION_ADDR(EventCallbackWrapper));
}
- i::Debugger::SetEventListener(proxy, Utils::OpenHandle(*data));
+ isolate->debugger()->SetEventListener(foreign, Utils::OpenHandle(*data));
return true;
}
bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
- EnsureInitialized("v8::Debug::SetDebugEventListener2()");
- ON_BAILOUT("v8::Debug::SetDebugEventListener2()", return false);
- ENTER_V8;
- HandleScope scope;
- i::Handle<i::Object> proxy = i::Factory::undefined_value();
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener2()");
+ ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener2()", return false);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
if (that != NULL) {
- proxy = i::Factory::NewProxy(FUNCTION_ADDR(that));
+ foreign = isolate->factory()->NewForeign(FUNCTION_ADDR(that));
}
- i::Debugger::SetEventListener(proxy, Utils::OpenHandle(*data));
+ isolate->debugger()->SetEventListener(foreign, Utils::OpenHandle(*data));
return true;
}
bool Debug::SetDebugEventListener(v8::Handle<v8::Object> that,
Handle<Value> data) {
- ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false);
- ENTER_V8;
- i::Debugger::SetEventListener(Utils::OpenHandle(*that),
- Utils::OpenHandle(*data));
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
+ ENTER_V8(isolate);
+ isolate->debugger()->SetEventListener(Utils::OpenHandle(*that),
+ Utils::OpenHandle(*data));
return true;
}
-void Debug::DebugBreak() {
- if (!i::V8::IsRunning()) return;
- i::StackGuard::DebugBreak();
+void Debug::DebugBreak(Isolate* isolate) {
+ // If no isolate is supplied, use the default isolate.
+ if (isolate != NULL) {
+ reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->DebugBreak();
+ } else {
+ i::Isolate::GetDefaultIsolateStackGuard()->DebugBreak();
+ }
}
-void Debug::CancelDebugBreak() {
- i::StackGuard::Continue(i::DEBUGBREAK);
+void Debug::CancelDebugBreak(Isolate* isolate) {
+ // If no isolate is supplied, use the default isolate.
+ if (isolate != NULL) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->stack_guard()->Continue(i::DEBUGBREAK);
+ } else {
+ i::Isolate::GetDefaultIsolateStackGuard()->Continue(i::DEBUGBREAK);
+ }
}
-void Debug::DebugBreakForCommand(ClientData* data) {
- if (!i::V8::IsRunning()) return;
- i::Debugger::EnqueueDebugCommand(data);
+void Debug::DebugBreakForCommand(ClientData* data, Isolate* isolate) {
+ // If no isolate is supplied, use the default isolate.
+ if (isolate != NULL) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->debugger()->EnqueueDebugCommand(data);
+ } else {
+ i::Isolate::GetDefaultIsolateDebugger()->EnqueueDebugCommand(data);
+ }
}
-static v8::Debug::MessageHandler message_handler = NULL;
-
static void MessageHandlerWrapper(const v8::Debug::Message& message) {
- if (message_handler) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (isolate->message_handler()) {
v8::String::Value json(message.GetJSON());
- message_handler(*json, json.length(), message.GetClientData());
+ (isolate->message_handler())(*json, json.length(), message.GetClientData());
}
}
void Debug::SetMessageHandler(v8::Debug::MessageHandler handler,
bool message_handler_thread) {
- EnsureInitialized("v8::Debug::SetMessageHandler");
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
+ ENTER_V8(isolate);
+
// Message handler thread not supported any more. Parameter temporally left in
- // the API for client compatability reasons.
+ // the API for client compatibility reasons.
CHECK(!message_handler_thread);
// TODO(sgjesse) support the old message handler API through a simple wrapper.
- message_handler = handler;
- if (message_handler != NULL) {
- i::Debugger::SetMessageHandler(MessageHandlerWrapper);
+ isolate->set_message_handler(handler);
+ if (handler != NULL) {
+ isolate->debugger()->SetMessageHandler(MessageHandlerWrapper);
} else {
- i::Debugger::SetMessageHandler(NULL);
+ isolate->debugger()->SetMessageHandler(NULL);
}
}
void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
- EnsureInitialized("v8::Debug::SetMessageHandler");
- ENTER_V8;
- i::Debugger::SetMessageHandler(handler);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
+ ENTER_V8(isolate);
+ isolate->debugger()->SetMessageHandler(handler);
}
void Debug::SendCommand(const uint16_t* command, int length,
- ClientData* client_data) {
- if (!i::V8::IsRunning()) return;
- i::Debugger::ProcessCommand(i::Vector<const uint16_t>(command, length),
- client_data);
+ ClientData* client_data,
+ Isolate* isolate) {
+ // If no isolate is supplied, use the default isolate.
+ if (isolate != NULL) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->debugger()->ProcessCommand(
+ i::Vector<const uint16_t>(command, length), client_data);
+ } else {
+ i::Isolate::GetDefaultIsolateDebugger()->ProcessCommand(
+ i::Vector<const uint16_t>(command, length), client_data);
+ }
}
void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
int period) {
- EnsureInitialized("v8::Debug::SetHostDispatchHandler");
- ENTER_V8;
- i::Debugger::SetHostDispatchHandler(handler, period);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Debug::SetHostDispatchHandler");
+ ENTER_V8(isolate);
+ isolate->debugger()->SetHostDispatchHandler(handler, period);
}
void Debug::SetDebugMessageDispatchHandler(
DebugMessageDispatchHandler handler, bool provide_locker) {
- EnsureInitialized("v8::Debug::SetDebugMessageDispatchHandler");
- ENTER_V8;
- i::Debugger::SetDebugMessageDispatchHandler(handler, provide_locker);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate,
+ "v8::Debug::SetDebugMessageDispatchHandler");
+ ENTER_V8(isolate);
+ isolate->debugger()->SetDebugMessageDispatchHandler(
+ handler, provide_locker);
}
Local<Value> Debug::Call(v8::Handle<v8::Function> fun,
v8::Handle<v8::Value> data) {
- if (!i::V8::IsRunning()) return Local<Value>();
- ON_BAILOUT("v8::Debug::Call()", return Local<Value>());
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!isolate->IsInitialized()) return Local<Value>();
+ ON_BAILOUT(isolate, "v8::Debug::Call()", return Local<Value>());
+ ENTER_V8(isolate);
i::Handle<i::Object> result;
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
if (data.IsEmpty()) {
- result = i::Debugger::Call(Utils::OpenHandle(*fun),
- i::Factory::undefined_value(),
- &has_pending_exception);
+ result = isolate->debugger()->Call(Utils::OpenHandle(*fun),
+ isolate->factory()->undefined_value(),
+ &has_pending_exception);
} else {
- result = i::Debugger::Call(Utils::OpenHandle(*fun),
- Utils::OpenHandle(*data),
- &has_pending_exception);
+ result = isolate->debugger()->Call(Utils::OpenHandle(*fun),
+ Utils::OpenHandle(*data),
+ &has_pending_exception);
}
- EXCEPTION_BAILOUT_CHECK(Local<Value>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
}
Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
- if (!i::V8::IsRunning()) return Local<Value>();
- ON_BAILOUT("v8::Debug::GetMirror()", return Local<Value>());
- ENTER_V8;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!isolate->IsInitialized()) return Local<Value>();
+ ON_BAILOUT(isolate, "v8::Debug::GetMirror()", return Local<Value>());
+ ENTER_V8(isolate);
v8::HandleScope scope;
- i::Debug::Load();
- i::Handle<i::JSObject> debug(i::Debug::debug_context()->global());
- i::Handle<i::String> name = i::Factory::LookupAsciiSymbol("MakeMirror");
+ i::Debug* isolate_debug = isolate->debug();
+ isolate_debug->Load();
+ i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global());
+ i::Handle<i::String> name =
+ isolate->factory()->LookupAsciiSymbol("MakeMirror");
i::Handle<i::Object> fun_obj = i::GetProperty(debug, name);
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun);
const int kArgc = 1;
v8::Handle<v8::Value> argv[kArgc] = { obj };
- EXCEPTION_PREAMBLE();
+ EXCEPTION_PREAMBLE(isolate);
v8::Handle<v8::Value> result = v8_fun->Call(Utils::ToLocal(debug),
kArgc,
argv);
- EXCEPTION_BAILOUT_CHECK(Local<Value>());
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return scope.Close(result);
}
bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
- return i::Debugger::StartAgent(name, port, wait_for_connection);
+ return i::Isolate::Current()->debugger()->StartAgent(name, port,
+ wait_for_connection);
}
void Debug::ProcessDebugMessages() {
@@ -4605,389 +5336,568 @@ void Debug::ProcessDebugMessages() {
}
Local<Context> Debug::GetDebugContext() {
- EnsureInitialized("v8::Debug::GetDebugContext()");
- ENTER_V8;
- return Utils::ToLocal(i::Debugger::GetDebugContext());
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Debug::GetDebugContext()");
+ ENTER_V8(isolate);
+ return Utils::ToLocal(i::Isolate::Current()->debugger()->GetDebugContext());
}
#endif // ENABLE_DEBUGGER_SUPPORT
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
Handle<String> CpuProfileNode::GetFunctionName() const {
- IsDeadCheck("v8::CpuProfileNode::GetFunctionName");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetFunctionName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
const i::CodeEntry* entry = node->entry();
if (!entry->has_name_prefix()) {
return Handle<String>(ToApi<String>(
- i::Factory::LookupAsciiSymbol(entry->name())));
+ isolate->factory()->LookupAsciiSymbol(entry->name())));
} else {
- return Handle<String>(ToApi<String>(i::Factory::NewConsString(
- i::Factory::LookupAsciiSymbol(entry->name_prefix()),
- i::Factory::LookupAsciiSymbol(entry->name()))));
+ return Handle<String>(ToApi<String>(isolate->factory()->NewConsString(
+ isolate->factory()->LookupAsciiSymbol(entry->name_prefix()),
+ isolate->factory()->LookupAsciiSymbol(entry->name()))));
}
+#else
+ return v8::String::Empty();
+#endif
}
Handle<String> CpuProfileNode::GetScriptResourceName() const {
- IsDeadCheck("v8::CpuProfileNode::GetScriptResourceName");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
- return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+ return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
node->entry()->resource_name())));
+#else
+ return v8::String::Empty();
+#endif
}
int CpuProfileNode::GetLineNumber() const {
- IsDeadCheck("v8::CpuProfileNode::GetLineNumber");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetLineNumber");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
+#else
+ return 0;
+#endif
}
double CpuProfileNode::GetTotalTime() const {
- IsDeadCheck("v8::CpuProfileNode::GetTotalTime");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalTime");
return reinterpret_cast<const i::ProfileNode*>(this)->GetTotalMillis();
+#else
+ return 0.0;
+#endif
}
double CpuProfileNode::GetSelfTime() const {
- IsDeadCheck("v8::CpuProfileNode::GetSelfTime");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfTime");
return reinterpret_cast<const i::ProfileNode*>(this)->GetSelfMillis();
+#else
+ return 0.0;
+#endif
}
double CpuProfileNode::GetTotalSamplesCount() const {
- IsDeadCheck("v8::CpuProfileNode::GetTotalSamplesCount");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalSamplesCount");
return reinterpret_cast<const i::ProfileNode*>(this)->total_ticks();
+#else
+ return 0.0;
+#endif
}
double CpuProfileNode::GetSelfSamplesCount() const {
- IsDeadCheck("v8::CpuProfileNode::GetSelfSamplesCount");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount");
return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
+#else
+ return 0.0;
+#endif
}
unsigned CpuProfileNode::GetCallUid() const {
- IsDeadCheck("v8::CpuProfileNode::GetCallUid");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetCallUid");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
+#else
+ return 0;
+#endif
}
int CpuProfileNode::GetChildrenCount() const {
- IsDeadCheck("v8::CpuProfileNode::GetChildrenCount");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetChildrenCount");
return reinterpret_cast<const i::ProfileNode*>(this)->children()->length();
+#else
+ return 0;
+#endif
}
const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
- IsDeadCheck("v8::CpuProfileNode::GetChild");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetChild");
const i::ProfileNode* child =
reinterpret_cast<const i::ProfileNode*>(this)->children()->at(index);
return reinterpret_cast<const CpuProfileNode*>(child);
+#else
+ return NULL;
+#endif
+}
+
+
+void CpuProfile::Delete() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfile::Delete");
+ i::CpuProfiler::DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
+ if (i::CpuProfiler::GetProfilesCount() == 0 &&
+ !i::CpuProfiler::HasDetachedProfiles()) {
+ // If this was the last profile, clean up all accessory data as well.
+ i::CpuProfiler::DeleteAllProfiles();
+ }
+#endif
}
unsigned CpuProfile::GetUid() const {
- IsDeadCheck("v8::CpuProfile::GetUid");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfile::GetUid");
return reinterpret_cast<const i::CpuProfile*>(this)->uid();
+#else
+ return 0;
+#endif
}
Handle<String> CpuProfile::GetTitle() const {
- IsDeadCheck("v8::CpuProfile::GetTitle");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfile::GetTitle");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+ return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
profile->title())));
+#else
+ return v8::String::Empty();
+#endif
}
const CpuProfileNode* CpuProfile::GetBottomUpRoot() const {
- IsDeadCheck("v8::CpuProfile::GetBottomUpRoot");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfile::GetBottomUpRoot");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return reinterpret_cast<const CpuProfileNode*>(profile->bottom_up()->root());
+#else
+ return NULL;
+#endif
}
const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
- IsDeadCheck("v8::CpuProfile::GetTopDownRoot");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfile::GetTopDownRoot");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root());
+#else
+ return NULL;
+#endif
}
int CpuProfiler::GetProfilesCount() {
- IsDeadCheck("v8::CpuProfiler::GetProfilesCount");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfiler::GetProfilesCount");
return i::CpuProfiler::GetProfilesCount();
+#else
+ return 0;
+#endif
}
const CpuProfile* CpuProfiler::GetProfile(int index,
Handle<Value> security_token) {
- IsDeadCheck("v8::CpuProfiler::GetProfile");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfiler::GetProfile");
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::GetProfile(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
index));
+#else
+ return NULL;
+#endif
}
const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
Handle<Value> security_token) {
- IsDeadCheck("v8::CpuProfiler::FindProfile");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfiler::FindProfile");
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::FindProfile(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
uid));
+#else
+ return NULL;
+#endif
}
void CpuProfiler::StartProfiling(Handle<String> title) {
- IsDeadCheck("v8::CpuProfiler::StartProfiling");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfiler::StartProfiling");
i::CpuProfiler::StartProfiling(*Utils::OpenHandle(*title));
+#endif
}
const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
Handle<Value> security_token) {
- IsDeadCheck("v8::CpuProfiler::StopProfiling");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfiler::StopProfiling");
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::StopProfiling(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
*Utils::OpenHandle(*title)));
+#else
+ return NULL;
+#endif
}
+void CpuProfiler::DeleteAllProfiles() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles");
+ i::CpuProfiler::DeleteAllProfiles();
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
return const_cast<i::HeapGraphEdge*>(
reinterpret_cast<const i::HeapGraphEdge*>(edge));
}
+#endif
+
HeapGraphEdge::Type HeapGraphEdge::GetType() const {
- IsDeadCheck("v8::HeapGraphEdge::GetType");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphEdge::GetType");
return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
+#else
+ return static_cast<HeapGraphEdge::Type>(0);
+#endif
}
Handle<Value> HeapGraphEdge::GetName() const {
- IsDeadCheck("v8::HeapGraphEdge::GetName");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphEdge::GetName");
i::HeapGraphEdge* edge = ToInternal(this);
switch (edge->type()) {
case i::HeapGraphEdge::kContextVariable:
case i::HeapGraphEdge::kInternal:
case i::HeapGraphEdge::kProperty:
case i::HeapGraphEdge::kShortcut:
- return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+ return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
edge->name())));
case i::HeapGraphEdge::kElement:
case i::HeapGraphEdge::kHidden:
- return Handle<Number>(ToApi<Number>(i::Factory::NewNumberFromInt(
+ return Handle<Number>(ToApi<Number>(isolate->factory()->NewNumberFromInt(
edge->index())));
default: UNREACHABLE();
}
- return ImplementationUtilities::Undefined();
+#endif
+ return v8::Undefined();
}
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
- IsDeadCheck("v8::HeapGraphEdge::GetFromNode");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode");
const i::HeapEntry* from = ToInternal(this)->From();
return reinterpret_cast<const HeapGraphNode*>(from);
+#else
+ return NULL;
+#endif
}
const HeapGraphNode* HeapGraphEdge::GetToNode() const {
- IsDeadCheck("v8::HeapGraphEdge::GetToNode");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphEdge::GetToNode");
const i::HeapEntry* to = ToInternal(this)->to();
return reinterpret_cast<const HeapGraphNode*>(to);
+#else
+ return NULL;
+#endif
}
-static i::HeapGraphPath* ToInternal(const HeapGraphPath* path) {
- return const_cast<i::HeapGraphPath*>(
- reinterpret_cast<const i::HeapGraphPath*>(path));
-}
-
-
-int HeapGraphPath::GetEdgesCount() const {
- return ToInternal(this)->path()->length();
-}
-
-
-const HeapGraphEdge* HeapGraphPath::GetEdge(int index) const {
- return reinterpret_cast<const HeapGraphEdge*>(
- ToInternal(this)->path()->at(index));
-}
-
-
-const HeapGraphNode* HeapGraphPath::GetFromNode() const {
- return GetEdgesCount() > 0 ? GetEdge(0)->GetFromNode() : NULL;
-}
-
-
-const HeapGraphNode* HeapGraphPath::GetToNode() const {
- const int count = GetEdgesCount();
- return count > 0 ? GetEdge(count - 1)->GetToNode() : NULL;
-}
-
-
+#ifdef ENABLE_LOGGING_AND_PROFILING
static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
return const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(entry));
}
+#endif
HeapGraphNode::Type HeapGraphNode::GetType() const {
- IsDeadCheck("v8::HeapGraphNode::GetType");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphNode::GetType");
return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
+#else
+ return static_cast<HeapGraphNode::Type>(0);
+#endif
}
Handle<String> HeapGraphNode::GetName() const {
- IsDeadCheck("v8::HeapGraphNode::GetName");
- return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphNode::GetName");
+ return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
ToInternal(this)->name())));
+#else
+ return v8::String::Empty();
+#endif
}
uint64_t HeapGraphNode::GetId() const {
- IsDeadCheck("v8::HeapGraphNode::GetId");
- ASSERT(ToInternal(this)->snapshot()->type() != i::HeapSnapshot::kAggregated);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
return ToInternal(this)->id();
-}
-
-
-int HeapGraphNode::GetInstancesCount() const {
- IsDeadCheck("v8::HeapGraphNode::GetInstancesCount");
- ASSERT(ToInternal(this)->snapshot()->type() == i::HeapSnapshot::kAggregated);
- return static_cast<int>(ToInternal(this)->id());
+#else
+ return 0;
+#endif
}
int HeapGraphNode::GetSelfSize() const {
- IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapGraphNode::GetSelfSize");
return ToInternal(this)->self_size();
+#else
+ return 0;
+#endif
}
int HeapGraphNode::GetRetainedSize(bool exact) const {
- IsDeadCheck("v8::HeapSnapshot::GetRetainedSize");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
return ToInternal(this)->RetainedSize(exact);
+#else
+ return 0;
+#endif
}
int HeapGraphNode::GetChildrenCount() const {
- IsDeadCheck("v8::HeapSnapshot::GetChildrenCount");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
return ToInternal(this)->children().length();
+#else
+ return 0;
+#endif
}
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
- IsDeadCheck("v8::HeapSnapshot::GetChild");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>(
&ToInternal(this)->children()[index]);
+#else
+ return NULL;
+#endif
}
int HeapGraphNode::GetRetainersCount() const {
- IsDeadCheck("v8::HeapSnapshot::GetRetainersCount");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount");
return ToInternal(this)->retainers().length();
+#else
+ return 0;
+#endif
}
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
- IsDeadCheck("v8::HeapSnapshot::GetRetainer");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer");
return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->retainers()[index]);
-}
-
-
-int HeapGraphNode::GetRetainingPathsCount() const {
- IsDeadCheck("v8::HeapSnapshot::GetRetainingPathsCount");
- return ToInternal(this)->GetRetainingPaths()->length();
-}
-
-
-const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const {
- IsDeadCheck("v8::HeapSnapshot::GetRetainingPath");
- return reinterpret_cast<const HeapGraphPath*>(
- ToInternal(this)->GetRetainingPaths()->at(index));
+#else
+ return NULL;
+#endif
}
const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
- IsDeadCheck("v8::HeapSnapshot::GetDominatorNode");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator());
+#else
+ return NULL;
+#endif
}
-const HeapGraphNode* HeapSnapshotsDiff::GetAdditionsRoot() const {
- IsDeadCheck("v8::HeapSnapshotsDiff::GetAdditionsRoot");
- i::HeapSnapshotsDiff* diff =
- const_cast<i::HeapSnapshotsDiff*>(
- reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
- return reinterpret_cast<const HeapGraphNode*>(diff->additions_root());
-}
-
-
-const HeapGraphNode* HeapSnapshotsDiff::GetDeletionsRoot() const {
- IsDeadCheck("v8::HeapSnapshotsDiff::GetDeletionsRoot");
- i::HeapSnapshotsDiff* diff =
- const_cast<i::HeapSnapshotsDiff*>(
- reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
- return reinterpret_cast<const HeapGraphNode*>(diff->deletions_root());
-}
-
-
+#ifdef ENABLE_LOGGING_AND_PROFILING
static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
return const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
}
+#endif
+
+
+void HeapSnapshot::Delete() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::Delete");
+ if (i::HeapProfiler::GetSnapshotsCount() > 1) {
+ ToInternal(this)->Delete();
+ } else {
+ // If this is the last snapshot, clean up all accessory data as well.
+ i::HeapProfiler::DeleteAllSnapshots();
+ }
+#endif
+}
HeapSnapshot::Type HeapSnapshot::GetType() const {
- IsDeadCheck("v8::HeapSnapshot::GetType");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetType");
return static_cast<HeapSnapshot::Type>(ToInternal(this)->type());
+#else
+ return static_cast<HeapSnapshot::Type>(0);
+#endif
}
unsigned HeapSnapshot::GetUid() const {
- IsDeadCheck("v8::HeapSnapshot::GetUid");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetUid");
return ToInternal(this)->uid();
+#else
+ return 0;
+#endif
}
Handle<String> HeapSnapshot::GetTitle() const {
- IsDeadCheck("v8::HeapSnapshot::GetTitle");
- return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle");
+ return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
ToInternal(this)->title())));
+#else
+ return v8::String::Empty();
+#endif
}
const HeapGraphNode* HeapSnapshot::GetRoot() const {
- IsDeadCheck("v8::HeapSnapshot::GetHead");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetHead");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
+#else
+ return 0;
+#endif
}
const HeapGraphNode* HeapSnapshot::GetNodeById(uint64_t id) const {
- IsDeadCheck("v8::HeapSnapshot::GetNodeById");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->GetEntryById(id));
+#else
+ return NULL;
+#endif
}
-const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
- const HeapSnapshot* snapshot) const {
- IsDeadCheck("v8::HeapSnapshot::CompareWith");
- return reinterpret_cast<const HeapSnapshotsDiff*>(
- ToInternal(this)->CompareWith(ToInternal(snapshot)));
+int HeapSnapshot::GetNodesCount() const {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
+ return ToInternal(this)->entries()->length();
+#else
+ return 0;
+#endif
+}
+
+
+const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
+ return reinterpret_cast<const HeapGraphNode*>(
+ ToInternal(this)->entries()->at(index));
+#else
+ return 0;
+#endif
}
void HeapSnapshot::Serialize(OutputStream* stream,
HeapSnapshot::SerializationFormat format) const {
- IsDeadCheck("v8::HeapSnapshot::Serialize");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::Serialize");
ApiCheck(format == kJSON,
"v8::HeapSnapshot::Serialize",
"Unknown serialization format");
@@ -4999,50 +5909,85 @@ void HeapSnapshot::Serialize(OutputStream* stream,
"Invalid stream chunk size");
i::HeapSnapshotJSONSerializer serializer(ToInternal(this));
serializer.Serialize(stream);
+#endif
}
int HeapProfiler::GetSnapshotsCount() {
- IsDeadCheck("v8::HeapProfiler::GetSnapshotsCount");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotsCount");
return i::HeapProfiler::GetSnapshotsCount();
+#else
+ return 0;
+#endif
}
const HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- IsDeadCheck("v8::HeapProfiler::GetSnapshot");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshot");
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::GetSnapshot(index));
+#else
+ return NULL;
+#endif
}
const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
- IsDeadCheck("v8::HeapProfiler::FindSnapshot");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapProfiler::FindSnapshot");
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::FindSnapshot(uid));
+#else
+ return NULL;
+#endif
}
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
HeapSnapshot::Type type,
ActivityControl* control) {
- IsDeadCheck("v8::HeapProfiler::TakeSnapshot");
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
switch (type) {
case HeapSnapshot::kFull:
internal_type = i::HeapSnapshot::kFull;
break;
- case HeapSnapshot::kAggregated:
- internal_type = i::HeapSnapshot::kAggregated;
- break;
default:
UNREACHABLE();
}
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::TakeSnapshot(
*Utils::OpenHandle(*title), internal_type, control));
+#else
+ return NULL;
+#endif
+}
+
+
+void HeapProfiler::DeleteAllSnapshots() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots");
+ i::HeapProfiler::DeleteAllSnapshots();
+#endif
+}
+
+
+void HeapProfiler::DefineWrapperClass(uint16_t class_id,
+ WrapperInfoCallback callback) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate::Current()->heap_profiler()->DefineWrapperClass(class_id,
+ callback);
+#endif
}
-#endif // ENABLE_LOGGING_AND_PROFILING
v8::Testing::StressType internal::Testing::stress_type_ =
@@ -5106,27 +6051,22 @@ void Testing::PrepareStressRun(int run) {
}
-namespace internal {
+void Testing::DeoptimizeAll() {
+ internal::Deoptimizer::DeoptimizeAll();
+}
-HandleScopeImplementer* HandleScopeImplementer::instance() {
- return &thread_local;
-}
+namespace internal {
void HandleScopeImplementer::FreeThreadResources() {
- thread_local.Free();
+ Free();
}
char* HandleScopeImplementer::ArchiveThread(char* storage) {
- return thread_local.ArchiveThreadHelper(storage);
-}
-
-
-char* HandleScopeImplementer::ArchiveThreadHelper(char* storage) {
v8::ImplementationUtilities::HandleScopeData* current =
- v8::ImplementationUtilities::CurrentHandleScope();
+ isolate_->handle_scope_data();
handle_scope_data_ = *current;
memcpy(storage, this, sizeof(*this));
@@ -5138,18 +6078,13 @@ char* HandleScopeImplementer::ArchiveThreadHelper(char* storage) {
int HandleScopeImplementer::ArchiveSpacePerThread() {
- return sizeof(thread_local);
+ return sizeof(HandleScopeImplementer);
}
char* HandleScopeImplementer::RestoreThread(char* storage) {
- return thread_local.RestoreThreadHelper(storage);
-}
-
-
-char* HandleScopeImplementer::RestoreThreadHelper(char* storage) {
memcpy(this, storage, sizeof(*this));
- *v8::ImplementationUtilities::CurrentHandleScope() = handle_scope_data_;
+ *isolate_->handle_scope_data() = handle_scope_data_;
return storage + ArchiveSpacePerThread();
}
@@ -5175,16 +6110,16 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
v8::ImplementationUtilities::HandleScopeData* current =
- v8::ImplementationUtilities::CurrentHandleScope();
- thread_local.handle_scope_data_ = *current;
- thread_local.IterateThis(v);
+ isolate_->handle_scope_data();
+ handle_scope_data_ = *current;
+ IterateThis(v);
}
char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
- HandleScopeImplementer* thread_local =
+ HandleScopeImplementer* scope_implementer =
reinterpret_cast<HandleScopeImplementer*>(storage);
- thread_local->IterateThis(v);
+ scope_implementer->IterateThis(v);
return storage + ArchiveSpacePerThread();
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index d07d75b91..8d2e77889 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -53,8 +53,8 @@ class Consts {
class NeanderObject {
public:
explicit NeanderObject(int size);
- inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
- inline NeanderObject(v8::internal::Object* obj);
+ explicit inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
+ explicit inline NeanderObject(v8::internal::Object* obj);
inline v8::internal::Object* get(int index);
inline void set(int index, v8::internal::Object* value);
inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; }
@@ -69,7 +69,7 @@ class NeanderObject {
class NeanderArray {
public:
NeanderArray();
- inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
+ explicit inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
inline v8::internal::Handle<v8::internal::JSObject> value() {
return obj_.value();
}
@@ -115,14 +115,14 @@ void NeanderObject::set(int offset, v8::internal::Object* value) {
template <typename T> static inline T ToCData(v8::internal::Object* obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
return reinterpret_cast<T>(
- reinterpret_cast<intptr_t>(v8::internal::Proxy::cast(obj)->proxy()));
+ reinterpret_cast<intptr_t>(v8::internal::Foreign::cast(obj)->address()));
}
template <typename T>
static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
- return v8::internal::Factory::NewProxy(
+ return FACTORY->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
}
@@ -157,7 +157,6 @@ class RegisteredExtension {
RegisteredExtension* next_auto_;
ExtensionTraversalState state_;
static RegisteredExtension* first_extension_;
- static RegisteredExtension* first_auto_extension_;
};
@@ -183,7 +182,7 @@ class Utils {
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<External> ToLocal(
- v8::internal::Handle<v8::internal::Proxy> obj);
+ v8::internal::Handle<v8::internal::Foreign> obj);
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal(
@@ -237,7 +236,7 @@ class Utils {
OpenHandle(const v8::Signature* sig);
static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
OpenHandle(const v8::TypeSwitch* that);
- static inline v8::internal::Handle<v8::internal::Proxy>
+ static inline v8::internal::Handle<v8::internal::Foreign>
OpenHandle(const v8::External* that);
};
@@ -274,7 +273,7 @@ MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
-MAKE_TO_LOCAL(ToLocal, Proxy, External)
+MAKE_TO_LOCAL(ToLocal, Foreign, External)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@@ -312,7 +311,7 @@ MAKE_OPEN_HANDLE(Script, Object)
MAKE_OPEN_HANDLE(Function, JSFunction)
MAKE_OPEN_HANDLE(Message, JSObject)
MAKE_OPEN_HANDLE(Context, Context)
-MAKE_OPEN_HANDLE(External, Proxy)
+MAKE_OPEN_HANDLE(External, Foreign)
MAKE_OPEN_HANDLE(StackTrace, JSArray)
MAKE_OPEN_HANDLE(StackFrame, JSObject)
@@ -321,36 +320,101 @@ MAKE_OPEN_HANDLE(StackFrame, JSObject)
namespace internal {
+// Tracks string usage to help make better decisions when
+// externalizing strings.
+//
+// Implementation note: internally this class only tracks fresh
+// strings and keeps a single use counter for them.
+class StringTracker {
+ public:
+ // Records that the given string's characters were copied to some
+ // external buffer. If this happens often we should honor
+ // externalization requests for the string.
+ void RecordWrite(Handle<String> string) {
+ Address address = reinterpret_cast<Address>(*string);
+ Address top = isolate_->heap()->NewSpaceTop();
+ if (IsFreshString(address, top)) {
+ IncrementUseCount(top);
+ }
+ }
+
+ // Estimates freshness and use frequency of the given string based
+ // on how close it is to the new space top and the recorded usage
+ // history.
+ inline bool IsFreshUnusedString(Handle<String> string) {
+ Address address = reinterpret_cast<Address>(*string);
+ Address top = isolate_->heap()->NewSpaceTop();
+ return IsFreshString(address, top) && IsUseCountLow(top);
+ }
+
+ private:
+ StringTracker() : use_count_(0), last_top_(NULL), isolate_(NULL) { }
+
+ static inline bool IsFreshString(Address string, Address top) {
+ return top - kFreshnessLimit <= string && string <= top;
+ }
+
+ inline bool IsUseCountLow(Address top) {
+ if (last_top_ != top) return true;
+ return use_count_ < kUseLimit;
+ }
+
+ inline void IncrementUseCount(Address top) {
+ if (last_top_ != top) {
+ use_count_ = 0;
+ last_top_ = top;
+ }
+ ++use_count_;
+ }
+
+ // Single use counter shared by all fresh strings.
+ int use_count_;
+
+ // Last new space top when the use count above was valid.
+ Address last_top_;
+
+ Isolate* isolate_;
+
+ // How close to the new space top a fresh string has to be.
+ static const int kFreshnessLimit = 1024;
+
+ // The number of uses required to consider a string useful.
+ static const int kUseLimit = 32;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(StringTracker);
+};
+
+
// This class is here in order to be able to declare it a friend of
// HandleScope. Moving these methods to be members of HandleScope would be
-// neat in some ways, but it would expose external implementation details in
+// neat in some ways, but it would expose internal implementation details in
// our public header file, which is undesirable.
//
-// There is a singleton instance of this class to hold the per-thread data.
-// For multithreaded V8 programs this data is copied in and out of storage
+// An isolate has a single instance of this class to hold the current thread's
+// data. In multithreaded V8 programs this data is copied in and out of storage
// so that the currently executing thread always has its own copy of this
// data.
class HandleScopeImplementer {
public:
-
- HandleScopeImplementer()
- : blocks_(0),
+ explicit HandleScopeImplementer(Isolate* isolate)
+ : isolate_(isolate),
+ blocks_(0),
entered_contexts_(0),
saved_contexts_(0),
spare_(NULL),
ignore_out_of_memory_(false),
call_depth_(0) { }
- static HandleScopeImplementer* instance();
-
// Threading support for handle data.
static int ArchiveSpacePerThread();
- static char* RestoreThread(char* from);
- static char* ArchiveThread(char* to);
- static void FreeThreadResources();
+ char* RestoreThread(char* from);
+ char* ArchiveThread(char* to);
+ void FreeThreadResources();
// Garbage collection support.
- static void Iterate(v8::internal::ObjectVisitor* v);
+ void Iterate(v8::internal::ObjectVisitor* v);
static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
@@ -402,6 +466,7 @@ class HandleScopeImplementer {
ASSERT(call_depth_ == 0);
}
+ Isolate* isolate_;
List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
List<Handle<Object> > entered_contexts_;
diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js
index ca2bbf5c5..193863f5c 100644
--- a/deps/v8/src/apinatives.js
+++ b/deps/v8/src/apinatives.js
@@ -73,7 +73,15 @@ function InstantiateFunction(data, name) {
if (name) %FunctionSetName(fun, name);
cache[serialNumber] = fun;
var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
- fun.prototype = prototype ? Instantiate(prototype) : {};
+ var attributes = %GetTemplateField(data, kApiPrototypeAttributesOffset);
+ if (attributes != NONE) {
+ %IgnoreAttributesAndSetProperty(
+ fun, "prototype",
+ prototype ? Instantiate(prototype) : {},
+ attributes);
+ } else {
+ fun.prototype = prototype ? Instantiate(prototype) : {};
+ }
%SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
var parent = %GetTemplateField(data, kApiParentTemplateOffset);
if (parent) {
diff --git a/deps/v8/src/apiutils.h b/deps/v8/src/apiutils.h
index 9683aa43b..68579af1b 100644
--- a/deps/v8/src/apiutils.h
+++ b/deps/v8/src/apiutils.h
@@ -31,11 +31,6 @@
namespace v8 {
class ImplementationUtilities {
public:
- static v8::Handle<v8::Primitive> Undefined();
- static v8::Handle<v8::Primitive> Null();
- static v8::Handle<v8::Boolean> True();
- static v8::Handle<v8::Boolean> False();
-
static int GetNameCount(ExtensionConfiguration* that) {
return that->name_count_;
}
@@ -68,8 +63,6 @@ class ImplementationUtilities {
// to access the HandleScope data.
typedef v8::HandleScope::Data HandleScopeData;
- static HandleScopeData* CurrentHandleScope();
-
#ifdef DEBUG
static void ZapHandleRange(internal::Object** begin, internal::Object** end);
#endif
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 5cf8deaa5..72bbe1dd1 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -28,6 +28,8 @@
#ifndef V8_ARGUMENTS_H_
#define V8_ARGUMENTS_H_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
@@ -61,11 +63,18 @@ class Arguments BASE_EMBEDDED {
return Handle<S>(reinterpret_cast<S**>(value));
}
+ int smi_at(int index) {
+ return Smi::cast((*this)[index])->value();
+ }
+
+ double number_at(int index) {
+ return (*this)[index]->Number();
+ }
+
// Get the total number of arguments including the receiver.
int length() const { return length_; }
Object** arguments() { return arguments_; }
-
private:
int length_;
Object** arguments_;
@@ -77,15 +86,16 @@ class Arguments BASE_EMBEDDED {
// can.
class CustomArguments : public Relocatable {
public:
- inline CustomArguments(Object* data,
+ inline CustomArguments(Isolate* isolate,
+ Object* data,
Object* self,
- JSObject* holder) {
+ JSObject* holder) : Relocatable(isolate) {
values_[2] = self;
values_[1] = holder;
values_[0] = data;
}
- inline CustomArguments() {
+ inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) {
#ifdef DEBUG
for (size_t i = 0; i < ARRAY_SIZE(values_); i++) {
values_[i] = reinterpret_cast<Object*>(kZapValue);
@@ -100,6 +110,17 @@ class CustomArguments : public Relocatable {
};
+#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
+Type Name(Arguments args, Isolate* isolate)
+
+
+#define RUNTIME_FUNCTION(Type, Name) \
+Type Name(Arguments args, Isolate* isolate)
+
+
+#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
+
+
} } // namespace v8::internal
#endif // V8_ARGUMENTS_H_
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 3b811021b..3e19a4538 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -203,11 +203,12 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
+ // TODO(isolates): Get a cached isolate below.
+ } else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ IsPatchedDebugBreakSlotSequence())) &&
+ Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
@@ -217,23 +218,23 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
template<typename StaticVisitor>
-void RelocInfo::Visit() {
+void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(target_object_address());
+ StaticVisitor::VisitPointer(heap, target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(this);
+ StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(this);
+ StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
+ } else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(this);
+ StaticVisitor::VisitDebugTarget(heap, this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this);
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index c91d4ba2b..efa252dba 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -32,7 +32,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -44,62 +44,80 @@
namespace v8 {
namespace internal {
-// Safe default is no features.
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::enabled_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_ = 0;
-#ifdef __arm__
+// Get the CPU features enabled by the build. For cross compilation the
+// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS
+// can be defined to enable ARMv7 and VFPv3 instructions when building the
+// snapshot.
static uint64_t CpuFeaturesImpliedByCompiler() {
uint64_t answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
answer |= 1u << ARMv7;
#endif // def CAN_USE_ARMV7_INSTRUCTIONS
+#ifdef CAN_USE_VFP_INSTRUCTIONS
+ answer |= 1u << VFP3 | 1u << ARMv7;
+#endif // def CAN_USE_VFP_INSTRUCTIONS
+
+#ifdef __arm__
// If the compiler is allowed to use VFP then we can use VFP too in our code
// generation even when generating snapshots. This won't work for cross
- // compilation.
+ // compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
#if defined(__VFP_FP__) && !defined(__SOFTFP__)
- answer |= 1u << VFP3;
+ answer |= 1u << VFP3 | 1u << ARMv7;
#endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
-#ifdef CAN_USE_VFP_INSTRUCTIONS
- answer |= 1u << VFP3;
-#endif // def CAN_USE_VFP_INSTRUCTIONS
+#endif // def __arm__
+
return answer;
}
-#endif // def __arm__
-void CpuFeatures::Probe(bool portable) {
+void CpuFeatures::Probe() {
+ ASSERT(!initialized_);
+#ifdef DEBUG
+ initialized_ = true;
+#endif
+
+ // Get the features implied by the OS and the compiler settings. This is the
+ // minimal set of features which is also alowed for generated code in the
+ // snapshot.
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ supported_ |= CpuFeaturesImpliedByCompiler();
+
+ if (Serializer::enabled()) {
+ // No probing for features if we might serialize (generate snapshot).
+ return;
+ }
+
#ifndef __arm__
- // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
+ // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
+ // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
if (FLAG_enable_vfp3) {
- supported_ |= 1u << VFP3;
+ supported_ |= 1u << VFP3 | 1u << ARMv7;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7;
}
#else // def __arm__
- if (portable && Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- supported_ |= CpuFeaturesImpliedByCompiler();
- return; // No features if we might serialize.
+ // Probe for additional features not already known to be available.
+ if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
+ // This implementation also sets the VFP flags if runtime
+ // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
+ // 0406B, page A1-6.
+ supported_ |= 1u << VFP3 | 1u << ARMv7;
+ found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
}
- if (OS::ArmCpuHasFeature(VFP3)) {
- // This implementation also sets the VFP flags if
- // runtime detection of VFP returns true.
- supported_ |= 1u << VFP3;
- found_by_runtime_probing_ |= 1u << VFP3;
- }
-
- if (OS::ArmCpuHasFeature(ARMv7)) {
+ if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
supported_ |= 1u << ARMv7;
found_by_runtime_probing_ |= 1u << ARMv7;
}
-
- if (!portable) found_by_runtime_probing_ = 0;
#endif
}
@@ -148,7 +166,7 @@ Operand::Operand(Handle<Object> handle) {
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!Heap::InNewSpace(obj));
+ ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
@@ -266,21 +284,20 @@ const Instr kLdrStrOffsetMask = 0x00000fff;
// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
-static byte* spare_buffer_ = NULL;
-Assembler::Assembler(void* buffer, int buffer_size)
- : positions_recorder_(this),
- allow_peephole_optimization_(false) {
- allow_peephole_optimization_ = FLAG_peephole_optimization;
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
+ positions_recorder_(this),
+ emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
- if (spare_buffer_ != NULL) {
- buffer = spare_buffer_;
- spare_buffer_ = NULL;
+ if (isolate()->assembler_spare_buffer() != NULL) {
+ buffer = isolate()->assembler_spare_buffer();
+ isolate()->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
@@ -303,20 +320,22 @@ Assembler::Assembler(void* buffer, int buffer_size)
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- num_prinfo_ = 0;
+ num_pending_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
- last_const_pool_end_ = 0;
+ first_const_pool_use_ = -1;
last_bound_pos_ = 0;
+ ast_id_for_reloc_info_ = kNoASTId;
}
Assembler::~Assembler() {
ASSERT(const_pool_blocked_nesting_ == 0);
if (own_buffer_) {
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
@@ -327,7 +346,7 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
- ASSERT(num_prinfo_ == 0);
+ ASSERT(num_pending_reloc_info_ == 0);
// Setup code descriptor.
desc->buffer = buffer_;
@@ -767,11 +786,36 @@ bool Operand::must_use_constant_pool() const {
}
-bool Operand::is_single_instruction() const {
+bool Operand::is_single_instruction(Instr instr) const {
if (rm_.is_valid()) return true;
- if (must_use_constant_pool()) return false;
uint32_t dummy1, dummy2;
- return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
+ if (must_use_constant_pool() ||
+ !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
+ // The immediate operand cannot be encoded as a shifter operand, or use of
+ // constant pool is required. For a mov instruction not setting the
+ // condition code additional instruction conventions can be used.
+ if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
+ if (must_use_constant_pool() ||
+ !CpuFeatures::IsSupported(ARMv7)) {
+ // mov instruction will be an ldr from constant pool (one instruction).
+ return true;
+ } else {
+ // mov instruction will be a mov or movw followed by movt (two
+ // instructions).
+ return false;
+ }
+ } else {
+ // If this is not a mov or mvn instruction there will always an additional
+ // instructions - either mov or ldr. The mov might actually be two
+ // instructions mov or movw followed by movt so including the actual
+ // instruction two or three instructions will be generated.
+ return false;
+ }
+ } else {
+ // No use of constant pool and the immediate operand can be encoded as a
+ // shifter operand.
+ return true;
+ }
}
@@ -794,7 +838,8 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
+ if (x.must_use_constant_pool() ||
+ !CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
} else {
@@ -828,7 +873,7 @@ void Assembler::addrmod1(Instr instr,
emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
+ BlockConstPoolFor(1);
}
}
@@ -952,7 +997,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
// Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
+ BlockConstPoolFor(1);
return target_pos - (pc_offset() + kPcLoadDelta);
}
@@ -1049,20 +1094,6 @@ void Assembler::rsb(Register dst, Register src1, const Operand& src2,
void Assembler::add(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | ADD | s, src1, dst, src2);
-
- // Eliminate pattern: push(r), pop()
- // str(src, MemOperand(sp, 4, NegPreIndex), al);
- // add(sp, sp, Operand(kPointerSize));
- // Both instructions can be eliminated.
- if (can_peephole_optimize(2) &&
- // Pattern.
- instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
- (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
- pc_ -= 2 * kInstrSize;
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
- }
- }
}
@@ -1367,195 +1398,11 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
positions_recorder()->WriteRecordedPositions();
}
addrmod2(cond | B26 | L, dst, src);
-
- // Eliminate pattern: push(ry), pop(rx)
- // str(ry, MemOperand(sp, 4, NegPreIndex), al)
- // ldr(rx, MemOperand(sp, 4, PostIndex), al)
- // Both instructions can be eliminated if ry = rx.
- // If ry != rx, a register copy from ry to rx is inserted
- // after eliminating the push and the pop instructions.
- if (can_peephole_optimize(2)) {
- Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
- Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
-
- if (IsPush(push_instr) && IsPop(pop_instr)) {
- if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
- // For consecutive push and pop on different registers,
- // we delete both the push & pop and insert a register move.
- // push ry, pop rx --> mov rx, ry
- Register reg_pushed, reg_popped;
- reg_pushed = GetRd(push_instr);
- reg_popped = GetRd(pop_instr);
- pc_ -= 2 * kInstrSize;
- // Insert a mov instruction, which is better than a pair of push & pop
- mov(reg_popped, reg_pushed);
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (diff reg) replaced by a reg move\n",
- pc_offset());
- }
- } else {
- // For consecutive push and pop on the same register,
- // both the push and the pop can be deleted.
- pc_ -= 2 * kInstrSize;
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
- }
- }
- }
- }
-
- if (can_peephole_optimize(2)) {
- Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
- Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
-
- if ((IsStrRegFpOffset(str_instr) &&
- IsLdrRegFpOffset(ldr_instr)) ||
- (IsStrRegFpNegOffset(str_instr) &&
- IsLdrRegFpNegOffset(ldr_instr))) {
- if ((ldr_instr & kLdrStrInstrArgumentMask) ==
- (str_instr & kLdrStrInstrArgumentMask)) {
- // Pattern: Ldr/str same fp+offset, same register.
- //
- // The following:
- // str rx, [fp, #-12]
- // ldr rx, [fp, #-12]
- //
- // Becomes:
- // str rx, [fp, #-12]
-
- pc_ -= 1 * kInstrSize;
- if (FLAG_print_peephole_optimization) {
- PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
- }
- } else if ((ldr_instr & kLdrStrOffsetMask) ==
- (str_instr & kLdrStrOffsetMask)) {
- // Pattern: Ldr/str same fp+offset, different register.
- //
- // The following:
- // str rx, [fp, #-12]
- // ldr ry, [fp, #-12]
- //
- // Becomes:
- // str rx, [fp, #-12]
- // mov ry, rx
-
- Register reg_stored, reg_loaded;
- reg_stored = GetRd(str_instr);
- reg_loaded = GetRd(ldr_instr);
- pc_ -= 1 * kInstrSize;
- // Insert a mov instruction, which is better than ldr.
- mov(reg_loaded, reg_stored);
- if (FLAG_print_peephole_optimization) {
- PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
- }
- }
- }
- }
-
- if (can_peephole_optimize(3)) {
- Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
- Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
- Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
- if (IsPush(mem_write_instr) &&
- IsPop(mem_read_instr)) {
- if ((IsLdrRegFpOffset(ldr_instr) ||
- IsLdrRegFpNegOffset(ldr_instr))) {
- if (Instruction::RdValue(mem_write_instr) ==
- Instruction::RdValue(mem_read_instr)) {
- // Pattern: push & pop from/to same register,
- // with a fp+offset ldr in between
- //
- // The following:
- // str rx, [sp, #-4]!
- // ldr rz, [fp, #-24]
- // ldr rx, [sp], #+4
- //
- // Becomes:
- // if(rx == rz)
- // delete all
- // else
- // ldr rz, [fp, #-24]
-
- if (Instruction::RdValue(mem_write_instr) ==
- Instruction::RdValue(ldr_instr)) {
- pc_ -= 3 * kInstrSize;
- } else {
- pc_ -= 3 * kInstrSize;
- // Reinsert back the ldr rz.
- emit(ldr_instr);
- }
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
- }
- } else {
- // Pattern: push & pop from/to different registers
- // with a fp+offset ldr in between
- //
- // The following:
- // str rx, [sp, #-4]!
- // ldr rz, [fp, #-24]
- // ldr ry, [sp], #+4
- //
- // Becomes:
- // if(ry == rz)
- // mov ry, rx;
- // else if(rx != rz)
- // ldr rz, [fp, #-24]
- // mov ry, rx
- // else if((ry != rz) || (rx == rz)) becomes:
- // mov ry, rx
- // ldr rz, [fp, #-24]
-
- Register reg_pushed, reg_popped;
- if (Instruction::RdValue(mem_read_instr) ==
- Instruction::RdValue(ldr_instr)) {
- reg_pushed = GetRd(mem_write_instr);
- reg_popped = GetRd(mem_read_instr);
- pc_ -= 3 * kInstrSize;
- mov(reg_popped, reg_pushed);
- } else if (Instruction::RdValue(mem_write_instr) !=
- Instruction::RdValue(ldr_instr)) {
- reg_pushed = GetRd(mem_write_instr);
- reg_popped = GetRd(mem_read_instr);
- pc_ -= 3 * kInstrSize;
- emit(ldr_instr);
- mov(reg_popped, reg_pushed);
- } else if ((Instruction::RdValue(mem_read_instr) !=
- Instruction::RdValue(ldr_instr)) ||
- (Instruction::RdValue(mem_write_instr) ==
- Instruction::RdValue(ldr_instr))) {
- reg_pushed = GetRd(mem_write_instr);
- reg_popped = GetRd(mem_read_instr);
- pc_ -= 3 * kInstrSize;
- mov(reg_popped, reg_pushed);
- emit(ldr_instr);
- }
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
- }
- }
- }
- }
- }
}
void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
addrmod2(cond | B26, src, dst);
-
- // Eliminate pattern: pop(), push(r)
- // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
- // -> str r, [sp, 0], al
- if (can_peephole_optimize(2) &&
- // Pattern.
- instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
- instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
- pc_ -= 2 * kInstrSize;
- emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
- if (FLAG_print_peephole_optimization) {
- PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
- }
- }
}
@@ -1646,15 +1493,17 @@ void Assembler::stm(BlockAddrMode am,
void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__
ASSERT(code >= kDefaultStopCode);
- // The Simulator will handle the stop instruction and get the message address.
- // It expects to find the address just after the svc instruction.
- BlockConstPoolFor(2);
- if (code >= 0) {
- svc(kStopCode + code, cond);
- } else {
- svc(kStopCode + kMaxStopCode, cond);
+ {
+ // The Simulator will handle the stop instruction and get the message
+ // address. It expects to find the address just after the svc instruction.
+ BlockConstPoolScope block_const_pool(this);
+ if (code >= 0) {
+ svc(kStopCode + code, cond);
+ } else {
+ svc(kStopCode + kMaxStopCode, cond);
+ }
+ emit(reinterpret_cast<Instr>(msg));
}
- emit(reinterpret_cast<Instr>(msg));
#else // def __arm__
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
if (cond != al) {
@@ -1793,45 +1642,6 @@ void Assembler::ldc2(Coprocessor coproc,
}
-void Assembler::stc(Coprocessor coproc,
- CRegister crd,
- const MemOperand& dst,
- LFlag l,
- Condition cond) {
- addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
-}
-
-
-void Assembler::stc(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l,
- Condition cond) {
- // Unindexed addressing.
- ASSERT(is_uint8(option));
- emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
- coproc*B8 | (option & 255));
-}
-
-
-void Assembler::stc2(Coprocessor
- coproc, CRegister crd,
- const MemOperand& dst,
- LFlag l) { // v5 and above
- stc(coproc, crd, dst, l, kSpecialCondition);
-}
-
-
-void Assembler::stc2(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l) { // v5 and above
- stc(coproc, crd, rn, option, l, kSpecialCondition);
-}
-
-
// Support for VFP.
void Assembler::vldr(const DwVfpRegister dst,
@@ -2004,6 +1814,88 @@ void Assembler::vstr(const SwVfpRegister src,
}
+void Assembler::vldm(BlockAddrMode am,
+ Register base,
+ DwVfpRegister first,
+ DwVfpRegister last,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-626.
+ // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
+ // first(15-12) | 1010(11-8) | (count * 2)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT_LE(first.code(), last.code());
+ ASSERT(am == ia || am == ia_w || am == db_w);
+ ASSERT(!base.is(pc));
+
+ int sd, d;
+ first.split_code(&sd, &d);
+ int count = last.code() - first.code() + 1;
+ emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
+ 0xB*B8 | count*2);
+}
+
+
+void Assembler::vstm(BlockAddrMode am,
+ Register base,
+ DwVfpRegister first,
+ DwVfpRegister last,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
+ // first(15-12) | 1011(11-8) | (count * 2)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT_LE(first.code(), last.code());
+ ASSERT(am == ia || am == ia_w || am == db_w);
+ ASSERT(!base.is(pc));
+
+ int sd, d;
+ first.split_code(&sd, &d);
+ int count = last.code() - first.code() + 1;
+ emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
+ 0xB*B8 | count*2);
+}
+
+void Assembler::vldm(BlockAddrMode am,
+ Register base,
+ SwVfpRegister first,
+ SwVfpRegister last,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-626.
+ // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
+ // first(15-12) | 1010(11-8) | (count/2)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT_LE(first.code(), last.code());
+ ASSERT(am == ia || am == ia_w || am == db_w);
+ ASSERT(!base.is(pc));
+
+ int sd, d;
+ first.split_code(&sd, &d);
+ int count = last.code() - first.code() + 1;
+ emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
+ 0xA*B8 | count);
+}
+
+
+void Assembler::vstm(BlockAddrMode am,
+ Register base,
+ SwVfpRegister first,
+ SwVfpRegister last,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
+ // first(15-12) | 1011(11-8) | (count/2)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT_LE(first.code(), last.code());
+ ASSERT(am == ia || am == ia_w || am == db_w);
+ ASSERT(!base.is(pc));
+
+ int sd, d;
+ first.split_code(&sd, &d);
+ int count = last.code() - first.code() + 1;
+ emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
+ 0xA*B8 | count);
+}
+
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
@@ -2360,6 +2252,14 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
}
+void Assembler::vneg(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond) {
+ emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
+ 0x5*B9 | B8 | B6 | src.code());
+}
+
+
void Assembler::vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
@@ -2508,11 +2408,6 @@ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
}
-void Assembler::BlockConstPoolFor(int instructions) {
- BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
-}
-
-
// Debugging.
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
@@ -2576,8 +2471,8 @@ void Assembler::GrowBuffer() {
// to relocate any emitted relocation entries.
// Relocate pending relocation entries.
- for (int i = 0; i < num_prinfo_; i++) {
- RelocInfo& rinfo = prinfo_[i];
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
@@ -2591,7 +2486,7 @@ void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
- ASSERT(num_prinfo_ == 0);
+ ASSERT(num_pending_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@@ -2602,7 +2497,7 @@ void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
- ASSERT(num_prinfo_ == 0);
+ ASSERT(num_pending_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
@@ -2619,11 +2514,14 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
} else {
- ASSERT(num_prinfo_ < kMaxNumPRInfo);
- prinfo_[num_prinfo_++] = rinfo;
+ ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
+ if (num_pending_reloc_info_ == 0) {
+ first_const_pool_use_ = pc_offset();
+ }
+ pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
+ BlockConstPoolFor(1);
}
if (rinfo.rmode() != RelocInfo::NONE) {
// Don't record external references unless the heap will be serialized.
@@ -2633,121 +2531,129 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Serializer::TooLateToEnableNow();
}
#endif
- if (!Serializer::enabled() && !FLAG_debug_code) {
+ if (!Serializer::enabled() && !emit_debug_code()) {
return;
}
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- reloc_info_writer.Write(&rinfo);
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ ASSERT(ast_id_for_reloc_info_ != kNoASTId);
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
+ ast_id_for_reloc_info_ = kNoASTId;
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
+ } else {
+ reloc_info_writer.Write(&rinfo);
+ }
}
}
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- // Calculate the offset of the next check. It will be overwritten
- // when a const pool is generated or when const pools are being
- // blocked for a specific range.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
- // There is nothing to do if there are no pending relocation info entries.
- if (num_prinfo_ == 0) return;
-
- // We emit a constant pool at regular intervals of about kDistBetweenPools
- // or when requested by parameter force_emit (e.g. after each function).
- // We prefer not to emit a jump unless the max distance is reached or if we
- // are running low on slots, which can happen if a lot of constants are being
- // emitted (e.g. --debug-code and many static references).
- int dist = pc_offset() - last_const_pool_end_;
- if (!force_emit && dist < kMaxDistBetweenPools &&
- (require_jump || dist < kDistBetweenPools) &&
- // TODO(1236125): Cleanup the "magic" number below. We know that
- // the code generation will test every kCheckConstIntervalInst.
- // Thus we are safe as long as we generate less than 7 constant
- // entries per instruction.
- (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
- return;
+void Assembler::BlockConstPoolFor(int instructions) {
+ int pc_limit = pc_offset() + instructions * kInstrSize;
+ if (no_const_pool_before_ < pc_limit) {
+ // If there are some pending entries, the constant pool cannot be blocked
+ // further than first_const_pool_use_ + kMaxDistToPool
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
+ no_const_pool_before_ = pc_limit;
}
- // If we did not return by now, we need to emit the constant pool soon.
+ if (next_buffer_check_ < no_const_pool_before_) {
+ next_buffer_check_ = no_const_pool_before_;
+ }
+}
- // However, some small sequences of instructions must not be broken up by the
- // insertion of a constant pool; such sequences are protected by setting
- // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
- // both checked here. Also, recursive calls to CheckConstPool are blocked by
- // no_const_pool_before_.
- if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
- // Emission is currently blocked; make sure we try again as soon as
- // possible.
- if (const_pool_blocked_nesting_ > 0) {
- next_buffer_check_ = pc_offset() + kInstrSize;
- } else {
- next_buffer_check_ = no_const_pool_before_;
- }
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Some short sequence of instruction mustn't be broken up by constant pool
+ // emission, such sequences are protected by calls to BlockConstPoolFor and
+ // BlockConstPoolScope.
+ if (is_const_pool_blocked()) {
// Something is wrong if emission is forced and blocked at the same time.
ASSERT(!force_emit);
return;
}
- int jump_instr = require_jump ? kInstrSize : 0;
+ // There is nothing to do if there are no pending constant pool entries.
+ if (num_pending_reloc_info_ == 0) {
+ // Calculate the offset of the next check.
+ next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ return;
+ }
+
+ // We emit a constant pool when:
+ // * requested to do so by parameter force_emit (e.g. after each function).
+ // * the distance to the first instruction accessing the constant pool is
+ // kAvgDistToPool or more.
+ // * no jump is required and the distance to the first instruction accessing
+ // the constant pool is at least kMaxDistToPool / 2.
+ ASSERT(first_const_pool_use_ >= 0);
+ int dist = pc_offset() - first_const_pool_use_;
+ if (!force_emit && dist < kAvgDistToPool &&
+ (require_jump || (dist < (kMaxDistToPool / 2)))) {
+ return;
+ }
// Check that the code buffer is large enough before emitting the constant
- // pool and relocation information (include the jump over the pool and the
- // constant pool marker).
- int max_needed_space =
- jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
- while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
-
- // Block recursive calls to CheckConstPool.
- BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
- num_prinfo_*kInstrSize);
- // Don't bother to check for the emit calls below.
- next_buffer_check_ = no_const_pool_before_;
-
- // Emit jump over constant pool if necessary.
- Label after_pool;
- if (require_jump) b(&after_pool);
-
- RecordComment("[ Constant Pool");
-
- // Put down constant pool marker "Undefined instruction" as specified by
- // A3.1 Instruction set encoding.
- emit(0x03000000 | num_prinfo_);
-
- // Emit constant pool entries.
- for (int i = 0; i < num_prinfo_; i++) {
- RelocInfo& rinfo = prinfo_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
- Instr instr = instr_at(rinfo.pc());
-
- // Instruction to patch must be a ldr/str [pc, #offset].
- // P and U set, B and W clear, Rn == pc, offset12 still 0.
- ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
- (2*B25 | P | U | pc.code()*B16));
- int delta = pc_ - rinfo.pc() - 8;
- ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
- if (delta < 0) {
- instr &= ~U;
- delta = -delta;
+ // pool (include the jump over the pool and the constant pool marker and
+ // the gap to the relocation information).
+ int jump_instr = require_jump ? kInstrSize : 0;
+ int needed_space = jump_instr + kInstrSize +
+ num_pending_reloc_info_ * kInstrSize + kGap;
+ while (buffer_space() <= needed_space) GrowBuffer();
+
+ {
+ // Block recursive calls to CheckConstPool.
+ BlockConstPoolScope block_const_pool(this);
+
+ // Emit jump over constant pool if necessary.
+ Label after_pool;
+ if (require_jump) {
+ b(&after_pool);
}
- ASSERT(is_uint12(delta));
- instr_at_put(rinfo.pc(), instr + delta);
- emit(rinfo.data());
- }
- num_prinfo_ = 0;
- last_const_pool_end_ = pc_offset();
- RecordComment("]");
+ RecordComment("[ Constant Pool");
+
+ // Put down constant pool marker "Undefined instruction" as specified by
+ // A5.6 (ARMv7) Instruction set encoding.
+ emit(kConstantPoolMarker | num_pending_reloc_info_);
+
+ // Emit constant pool entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION &&
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
+
+ Instr instr = instr_at(rinfo.pc());
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ ASSERT(IsLdrPcImmediateOffset(instr) &&
+ GetLdrRegisterImmediateOffset(instr) == 0);
+
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+ // 0 is the smallest delta:
+ // ldr rd, [pc, #0]
+ // constant pool marker
+ // data
+ ASSERT(is_uint12(delta));
+
+ instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
+ emit(rinfo.data());
+ }
+
+ num_pending_reloc_info_ = 0;
+ first_const_pool_use_ = -1;
- if (after_pool.is_linked()) {
- bind(&after_pool);
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
}
// Since a constant pool was just emitted, move the check offset forward by
// the standard interval.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
+ next_buffer_check_ = pc_offset() + kCheckPoolInterval;
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index f5eb5075f..a97cf6b9c 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -32,7 +32,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5
@@ -72,6 +72,7 @@ namespace internal {
struct Register {
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 8;
+ static const int kSizeInBytes = 4;
static int ToAllocationIndex(Register reg) {
ASSERT(reg.code() < kNumAllocatableRegisters);
@@ -166,13 +167,14 @@ struct SwVfpRegister {
// Double word VFP register.
struct DwVfpRegister {
- // d0 has been excluded from allocation. This is following ia32
- // where xmm0 is excluded. This should be revisited.
- // Currently d0 is used as a scratch register.
- // d1 has also been excluded from allocation to be used as a scratch
- // register as well.
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 15;
+ // A few double registers are reserved: one as a scratch register and one to
+ // hold 0.0, that does not fit in the immediate field of vmov instructions.
+ // d14: 0.0
+ // d15: scratch register.
+ static const int kNumReservedRegisters = 2;
+ static const int kNumAllocatableRegisters = kNumRegisters -
+ kNumReservedRegisters;
static int ToAllocationIndex(DwVfpRegister reg) {
ASSERT(reg.code() != 0);
@@ -187,6 +189,7 @@ struct DwVfpRegister {
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
+ "d0",
"d1",
"d2",
"d3",
@@ -199,9 +202,7 @@ struct DwVfpRegister {
"d10",
"d11",
"d12",
- "d13",
- "d14",
- "d15"
+ "d13"
};
return names[index];
}
@@ -302,6 +303,11 @@ const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
+// Aliases for double registers.
+const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
+const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
+const DwVfpRegister kDoubleRegZero = d14;
+
// Coprocessor register
struct CRegister {
@@ -389,8 +395,11 @@ class Operand BASE_EMBEDDED {
INLINE(bool is_reg() const);
// Return true if this operand fits in one instruction so that no
- // 2-instruction solution with a load into the ip register is necessary.
- bool is_single_instruction() const;
+ // 2-instruction solution with a load into the ip register is necessary. If
+ // the instruction this operand is used for is a MOV or MVN instruction the
+ // actual instruction to use is required for this calculation. For other
+ // instructions instr is ignored.
+ bool is_single_instruction(Instr instr = 0) const;
bool must_use_constant_pool() const;
inline int32_t immediate() const {
@@ -447,6 +456,7 @@ class MemOperand BASE_EMBEDDED {
Register rn() const { return rn_; }
Register rm() const { return rm_; }
+ AddrMode am() const { return am_; }
bool OffsetIsUint12Encodable() const {
return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
@@ -469,43 +479,98 @@ class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
- static void Probe(bool portable);
+ static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false;
return (supported_ & (1u << f)) != 0;
}
+#ifdef DEBUG
// Check whether a feature is currently enabled.
static bool IsEnabled(CpuFeature f) {
- return (enabled_ & (1u << f)) != 0;
+ ASSERT(initialized_);
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL) {
+ // When no isolate is available, work as if we're running in
+ // release mode.
+ return IsSupported(f);
+ }
+ unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
+ return (enabled & (1u << f)) != 0;
}
+#endif
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
+
public:
explicit Scope(CpuFeature f) {
+ unsigned mask = 1u << f;
ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
- (found_by_runtime_probing_ & (1u << f)) == 0);
- old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= 1u << f;
+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+ isolate_ = Isolate::UncheckedCurrent();
+ old_enabled_ = 0;
+ if (isolate_ != NULL) {
+ old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
+ isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+ }
}
- ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+ ~Scope() {
+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+ if (isolate_ != NULL) {
+ isolate_->set_enabled_cpu_features(old_enabled_);
+ }
+ }
+
private:
+ Isolate* isolate_;
unsigned old_enabled_;
#else
+
public:
explicit Scope(CpuFeature f) {}
#endif
};
+ class TryForceFeatureScope BASE_EMBEDDED {
+ public:
+ explicit TryForceFeatureScope(CpuFeature f)
+ : old_supported_(CpuFeatures::supported_) {
+ if (CanForce()) {
+ CpuFeatures::supported_ |= (1u << f);
+ }
+ }
+
+ ~TryForceFeatureScope() {
+ if (CanForce()) {
+ CpuFeatures::supported_ = old_supported_;
+ }
+ }
+
+ private:
+ static bool CanForce() {
+ // It's only safe to temporarily force support of CPU features
+ // when there's only a single isolate, which is guaranteed when
+ // the serializer is enabled.
+ return Serializer::enabled();
+ }
+
+ const unsigned old_supported_;
+ };
+
private:
+#ifdef DEBUG
+ static bool initialized_;
+#endif
static unsigned supported_;
- static unsigned enabled_;
static unsigned found_by_runtime_probing_;
+
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -533,7 +598,7 @@ extern const Instr kAndBicFlip;
-class Assembler : public Malloced {
+class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@@ -548,9 +613,12 @@ class Assembler : public Malloced {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- Assembler(void* buffer, int buffer_size);
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -889,16 +957,6 @@ class Assembler : public Malloced {
void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short); // v5 and above
- void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
- LFlag l = Short, Condition cond = al);
- void stc(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short, Condition cond = al);
-
- void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
- LFlag l = Short); // v5 and above
- void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short); // v5 and above
-
// Support for VFP.
// All these APIs support S0 to S31 and D0 to D15.
// Currently these APIs do not support extended D registers, i.e, D16 to D31.
@@ -937,6 +995,30 @@ class Assembler : public Malloced {
const MemOperand& dst,
const Condition cond = al);
+ void vldm(BlockAddrMode am,
+ Register base,
+ DwVfpRegister first,
+ DwVfpRegister last,
+ Condition cond = al);
+
+ void vstm(BlockAddrMode am,
+ Register base,
+ DwVfpRegister first,
+ DwVfpRegister last,
+ Condition cond = al);
+
+ void vldm(BlockAddrMode am,
+ Register base,
+ SwVfpRegister first,
+ SwVfpRegister last,
+ Condition cond = al);
+
+ void vstm(BlockAddrMode am,
+ Register base,
+ SwVfpRegister first,
+ SwVfpRegister last,
+ Condition cond = al);
+
void vmov(const DwVfpRegister dst,
double imm,
const Condition cond = al);
@@ -989,6 +1071,9 @@ class Assembler : public Malloced {
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
+ void vneg(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond = al);
void vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
@@ -1079,10 +1164,6 @@ class Assembler : public Malloced {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
- // Postpone the generation of the constant pool for the specified number of
- // instructions.
- void BlockConstPoolFor(int instructions);
-
// Debugging
// Mark address of the ExitJSFrame code.
@@ -1091,6 +1172,10 @@ class Assembler : public Malloced {
// Mark address of a debug break slot.
void RecordDebugBreakSlot();
+ // Record the AST id of the CallIC being compiled, so that it can be placed
+ // in the relocation information.
+ void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; }
+
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
@@ -1106,12 +1191,6 @@ class Assembler : public Malloced {
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
- bool can_peephole_optimize(int instructions) {
- if (!allow_peephole_optimization_) return false;
- if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
- return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
- }
-
// Read/patch instructions
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) {
@@ -1144,10 +1223,27 @@ class Assembler : public Malloced {
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
- // Check if is time to emit a constant pool for pending reloc info entries
+ // Constants in pools are accessed via pc relative addressing, which can
+ // reach +/-4KB thereby defining a maximum distance between the instruction
+ // and the accessed constant.
+ static const int kMaxDistToPool = 4*KB;
+ static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
+
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
+ // Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
protected:
+ // Relocation for a type-recording IC has the AST id added to it. This
+ // member variable is a way to pass the information from the call site to
+ // the relocation info.
+ unsigned ast_id_for_reloc_info_;
+
+ bool emit_debug_code() const { return emit_debug_code_; }
+
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions
@@ -1162,18 +1258,37 @@ class Assembler : public Malloced {
// Patch branch instruction at pos to branch to given branch target pos
void target_at_put(int pos, int target_pos);
- // Block the emission of the constant pool before pc_offset
- void BlockConstPoolBefore(int pc_offset) {
- if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
- }
-
+ // Prevent contant pool emission until EndBlockConstPool is called.
+ // Call to this function can be nested but must be followed by an equal
+ // number of call to EndBlockConstpool.
void StartBlockConstPool() {
- const_pool_blocked_nesting_++;
+ if (const_pool_blocked_nesting_++ == 0) {
+ // Prevent constant pool checks happening by setting the next check to
+ // the biggest possible offset.
+ next_buffer_check_ = kMaxInt;
+ }
}
+
+ // Resume constant pool emission. Need to be called as many time as
+ // StartBlockConstPool to have an effect.
void EndBlockConstPool() {
- const_pool_blocked_nesting_--;
+ if (--const_pool_blocked_nesting_ == 0) {
+ // Check the constant pool hasn't been blocked for too long.
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
+ // Two cases:
+ // * no_const_pool_before_ >= next_buffer_check_ and the emission is
+ // still blocked
+ // * no_const_pool_before_ < next_buffer_check_ and the next emit will
+ // trigger a check.
+ next_buffer_check_ = no_const_pool_before_;
+ }
+ }
+
+ bool is_const_pool_blocked() const {
+ return (const_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_const_pool_before_);
}
- bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; }
private:
// Code buffer:
@@ -1183,9 +1298,6 @@ class Assembler : public Malloced {
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
- // Buffer size and constant pool distance are checked together at regular
- // intervals of kBufferCheckInterval emitted bytes
- static const int kBufferCheckInterval = 1*KB/2;
int next_buffer_check_; // pc offset of next buffer check
// Code generation
@@ -1210,40 +1322,41 @@ class Assembler : public Malloced {
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckConstIntervalInst = 32;
- static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
-
+ static const int kCheckPoolIntervalInst = 32;
+ static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
- // Pools are emitted after function return and in dead code at (more or less)
- // regular intervals of kDistBetweenPools bytes
- static const int kDistBetweenPools = 1*KB;
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant. We satisfy this constraint by limiting the
- // distance between pools.
- static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
+ // Average distance beetween a constant pool and the first instruction
+ // accessing the constant pool. Longer distance should result in less I-cache
+ // pollution.
+ // In practice the distance will be smaller since constant pool emission is
+ // forced after function return and sometimes after unconditional branches.
+ static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
- // Keep track of the last emitted pool to guarantee a maximal distance
- int last_const_pool_end_; // pc offset following the last constant pool
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_const_pool_use_;
// Relocation info generation
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
+
// Relocation info records are also used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These pending relocation info records are temporarily
// stored in a separate buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
- static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
- RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
- int num_prinfo_; // number of pending reloc info entries in the buffer
+
+ // the buffer of pending relocation info
+ RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
+ // number of pending reloc info entries in the buffer
+ int num_pending_reloc_info_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
@@ -1275,7 +1388,7 @@ class Assembler : public Malloced {
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
- bool allow_peephole_optimization_;
+ bool emit_debug_code_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 6e8fe28a2..f87fd8383 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_ARM)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
#include "deoptimizer.h"
#include "full-codegen.h"
@@ -68,7 +68,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects r0 to contain the number of arguments
// including the receiver and the extra arguments.
__ add(r0, r0, Operand(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id));
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -310,6 +310,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// construct call and normal call.
static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) {
+ Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments or one.
@@ -325,7 +326,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r5,
JSArray::kPreallocatedArrayElements,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1, r3, r4);
+ __ IncrementCounter(counters->array_function_native(), 1, r3, r4);
// Setup return value, remove receiver from stack and return.
__ mov(r0, r2);
__ add(sp, sp, Operand(kPointerSize));
@@ -361,7 +362,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r7,
true,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1, r2, r4);
+ __ IncrementCounter(counters->array_function_native(), 1, r2, r4);
// Setup return value, remove receiver and argument from stack and return.
__ mov(r0, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
@@ -385,7 +386,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r7,
false,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1, r2, r6);
+ __ IncrementCounter(counters->array_function_native(), 1, r2, r6);
// Fill arguments as array elements. Copy from the top of the stack (last
// element) to the array backing store filling it backwards. Note:
@@ -428,7 +429,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
GenerateLoadArrayFunction(masm, r1);
if (FLAG_debug_code) {
- // Initial map for the builtin Array function shoud be a map.
+ // Initial map for the builtin Array functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask));
__ Assert(ne, "Unexpected initial map for Array function");
@@ -442,8 +443,9 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Jump to the generic array code if the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
- Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
- Handle<Code> array_code(code);
+
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->ArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
@@ -458,11 +460,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
Label generic_constructor;
if (FLAG_debug_code) {
- // The array construct code is only set for the builtin Array function which
- // always have a map.
- GenerateLoadArrayFunction(masm, r2);
- __ cmp(r1, r2);
- __ Assert(eq, "Unexpected Array function");
+ // The array construct code is only set for the builtin and internal
+ // Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask));
@@ -477,8 +476,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
- Handle<Code> generic_construct_stub(code);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
}
@@ -491,7 +490,8 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- __ IncrementCounter(&Counters::string_ctor_calls, 1, r2, r3);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1, r2, r3);
Register function = r1;
if (FLAG_debug_code) {
@@ -521,7 +521,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
r5, // Scratch.
false, // Is it a Smi?
&not_cached);
- __ IncrementCounter(&Counters::string_ctor_cached_number, 1, r3, r4);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
__ bind(&argument_is_string);
// ----------- S t a t e -------------
@@ -575,16 +575,16 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ tst(r3, Operand(kIsNotStringMask));
__ b(ne, &convert_argument);
__ mov(argument, r0);
- __ IncrementCounter(&Counters::string_ctor_conversions, 1, r3, r4);
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
__ b(&argument_is_string);
// Invoke the conversion builtin and put the result into r2.
__ bind(&convert_argument);
__ push(function); // Preserve the function.
- __ IncrementCounter(&Counters::string_ctor_conversions, 1, r3, r4);
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
__ EnterInternalFrame();
__ push(r0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_JS);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
__ LeaveInternalFrame();
__ pop(function);
__ mov(argument, r0);
@@ -600,7 +600,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// At this point the argument is already a string. Call runtime to
// create a string wrapper.
__ bind(&gc_required);
- __ IncrementCounter(&Counters::string_ctor_gc_required, 1, r3, r4);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
__ EnterInternalFrame();
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
@@ -619,8 +619,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
Label non_function_call;
// Check that the function is not a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &non_function_call);
+ __ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function_call);
@@ -636,7 +635,8 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ __ SetCallKind(r5, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
@@ -647,6 +647,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
+ Isolate* isolate = masm->isolate();
+
// Enter a construct frame.
__ EnterConstructFrame();
@@ -662,7 +664,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address();
+ ExternalReference::debug_step_in_fp_address(isolate);
__ mov(r2, Operand(debug_step_in_fp));
__ ldr(r2, MemOperand(r2));
__ tst(r2, r2);
@@ -672,8 +674,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &rt_call);
+ __ JumpIfSmi(r2, &rt_call);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ b(ne, &rt_call);
@@ -908,14 +909,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
if (is_api_function) {
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- Handle<Code> code = Handle<Code>(
- Builtins::builtin(Builtins::HandleApiCallConstruct));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
ParameterCount expected(0);
__ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
} else {
ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
// Pop the function from the stack.
@@ -942,12 +944,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// sp[0]: receiver (newly allocated object)
// sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged)
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &use_receiver);
+ __ JumpIfSmi(r0, &use_receiver);
// If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r3, r3, FIRST_JS_OBJECT_TYPE);
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &exit);
// Throw away the result of the constructor invocation and use the
@@ -966,7 +967,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ LeaveConstructFrame();
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
- __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2);
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
__ Jump(lr);
}
@@ -1006,7 +1007,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address();
+ ExternalReference roots_address =
+ ExternalReference::roots_address(masm->isolate());
__ mov(r10, Operand(roots_address));
// Push the function and the receiver onto the stack.
@@ -1042,11 +1044,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
- __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+ __ Call(masm->isolate()->builtins()->JSConstructCall(),
RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
// Exit the JS frame and remove the parameters (except function), and return.
@@ -1074,12 +1077,17 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Preserve the function.
__ push(r1);
+ // Push call kind information.
+ __ push(r5);
// Push the function on the stack as the argument to the runtime function.
__ push(r1);
__ CallRuntime(Runtime::kLazyCompile, 1);
// Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Restore call kind information.
+ __ pop(r5);
// Restore saved function.
__ pop(r1);
@@ -1097,12 +1105,17 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Preserve the function.
__ push(r1);
+ // Push call kind information.
+ __ push(r5);
// Push the function on the stack as the argument to the runtime function.
__ push(r1);
__ CallRuntime(Runtime::kLazyRecompile, 1);
// Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Restore call kind information.
+ __ pop(r5);
// Restore saved function.
__ pop(r1);
@@ -1170,9 +1183,11 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Probe the CPU to set the supported features, because this builtin
- // may be called before the initialization performs CPU setup.
- CpuFeatures::Probe(false);
+ CpuFeatures::TryForceFeatureScope scope(VFP3);
+ if (!CpuFeatures::IsSupported(VFP3)) {
+ __ Abort("Unreachable code: Cannot optimize without VFP3 support.");
+ return;
+ }
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
@@ -1218,8 +1233,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// r0: actual number of arguments
Label non_function;
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &non_function);
+ __ JumpIfSmi(r1, &non_function);
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function);
@@ -1233,31 +1247,33 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Do not transform the receiver for strict mode functions.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
__ b(ne, &shift_arguments);
+ // Do not transform the receiver for native (Compilerhints already in r3).
+ __ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &shift_arguments);
+
// Compute the receiver in non-strict mode.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments
// r1: function
// r2: first argument
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &convert_to_object);
+ __ JumpIfSmi(r2, &convert_to_object);
- __ LoadRoot(r3, Heap::kNullValueRootIndex);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r3);
__ b(eq, &use_global_receiver);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r3, Heap::kNullValueRootIndex);
__ cmp(r2, r3);
__ b(eq, &use_global_receiver);
- __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &convert_to_object);
- __ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
- __ b(le, &shift_arguments);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &shift_arguments);
__ bind(&convert_to_object);
__ EnterInternalFrame(); // In order to preserve argument count.
@@ -1265,7 +1281,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ push(r0);
__ push(r2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(r2, r0);
__ pop(r0);
@@ -1335,8 +1351,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
+ __ SetCallKind(r5, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
__ bind(&function);
}
@@ -1350,12 +1367,15 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r2, r0); // Check formal and actual parameter counts.
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET, ne);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET,
+ ne);
ParameterCount expected(0);
- __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
+ __ InvokeCode(r3, expected, expected, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
@@ -1372,7 +1392,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(r0);
__ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
__ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
// Check the stack for overflow. We are not trying need to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -1390,7 +1410,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ push(r1);
__ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
// End of stack check.
// Push current limit and index.
@@ -1410,14 +1430,17 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ ldr(r0, MemOperand(fp, kRecvOffset));
// Do not transform the receiver for strict mode functions.
- __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ __ ldr(r2, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
__ b(ne, &push_receiver);
+ // Do not transform the receiver for strict mode functions.
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &push_receiver);
+
// Compute the receiver in non-strict mode.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &call_to_object);
+ __ JumpIfSmi(r0, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
__ b(eq, &use_global_receiver);
@@ -1427,16 +1450,15 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Check if the receiver is already a JavaScript object.
// r0: receiver
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &call_to_object);
- __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
- __ b(le, &push_receiver);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &push_receiver);
// Convert the receiver to a regular object.
// r0: receiver
__ bind(&call_to_object);
__ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ b(&push_receiver);
// Use the current global receiver object as the receiver.
@@ -1486,7 +1508,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
ParameterCount actual(r0);
__ mov(r0, Operand(r0, ASR, kSmiTagSize));
__ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ InvokeFunction(r1, actual, CALL_FUNCTION);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
// Tear down the internal frame and remove function, receiver and args.
__ LeaveInternalFrame();
@@ -1523,6 +1546,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r1 : function (passed through to callee)
// -- r2 : expected number of arguments
// -- r3 : code entry to call
+ // -- r5 : call kind information
// -----------------------------------
Label invoke, dont_adapt_arguments;
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index e8f217d27..452e08cad 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -55,23 +55,30 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register rhs);
+// Check if the operand is a heap number.
+static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
+ Register scratch1, Register scratch2,
+ Label* not_a_heap_number) {
+ __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
+ __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch1, scratch2);
+ __ b(ne, not_a_heap_number);
+}
+
+
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in eax.
Label check_heap_number, call_builtin;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &check_heap_number);
+ __ JumpIfNotSmi(r0, &check_heap_number);
__ Ret();
__ bind(&check_heap_number);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &call_builtin);
+ EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
__ Ret();
__ bind(&call_builtin);
__ push(r0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
}
@@ -91,11 +98,15 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
&gc,
TAG_OBJECT);
+ int map_index = strict_mode_ == kStrictMode
+ ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+ : Context::FUNCTION_MAP_INDEX;
+
// Compute the function map in the current global context and set that
// as the map of the allocated object.
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
- __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
// Initialize the rest of the function. We don't have to update the
@@ -146,7 +157,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ ldr(r3, MemOperand(sp, 0));
// Setup the object header.
- __ LoadRoot(r2, Heap::kContextMapRootIndex);
+ __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
@@ -154,11 +165,10 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the fixed slots.
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- // Copy the global object from the surrounding context.
+ // Copy the global object from the previous context.
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -175,7 +185,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+ __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
}
@@ -304,13 +314,9 @@ class ConvertToDoubleStub : public CodeStub {
void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
Register exponent = result1_;
Register mantissa = result2_;
-#else
- Register exponent = result2_;
- Register mantissa = result1_;
-#endif
+
Label not_special;
// Convert from Smi to integer.
__ mov(source_, Operand(source_, ASR, kSmiTagSize));
@@ -364,138 +370,6 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
}
-class FloatingPointHelper : public AllStatic {
- public:
-
- enum Destination {
- kVFPRegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Loads objects from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will still be scratched. If
- // either r0 or r1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with r0 and r1 intact.
- static void LoadOperands(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-
- // Loads the number from object into dst as a 32-bit integer if possible. If
- // the object cannot be converted to a 32-bit integer control continues at
- // the label not_int32. If VFP is supported double_scratch is used
- // but not scratch2.
- // Floating point value in the 32-bit integer range will be rounded
- // to an integer.
- static void LoadNumberAsInteger(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- DwVfpRegister double_scratch,
- Label* not_int32);
-
- // Load the number from object into double_dst in the double format.
- // Control will jump to not_int32 if the value cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be loaded.
- static void LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- SwVfpRegister single_scratch,
- Label* not_int32);
-
- // Loads the number from object into dst as a 32-bit integer.
- // Control will jump to not_int32 if the object cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be converted.
- // scratch3 is not used when VFP3 is supported.
- static void LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_int32);
-
- // Generate non VFP3 code to check if a double can be exactly represented by a
- // 32-bit integer. This does not check for 0 or -0, which need
- // to be checked for separately.
- // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
- // through otherwise.
- // src1 and src2 will be cloberred.
- //
- // Expected input:
- // - src1: higher (exponent) part of the double value.
- // - src2: lower (mantissa) part of the double value.
- // Output status:
- // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
- // - src2: contains 1.
- // - other registers are clobbered.
- static void DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32);
-
- // Generates code to call a C function to do a double operation using core
- // registers. (Used when VFP3 is not supported.)
- // This code never falls through, but returns with a heap number containing
- // the result in r0.
- // Register heapnumber_result must be a heap number in which the
- // result of the operation will be stored.
- // Requires the following layout on entry:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch);
-
- private:
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
-
void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register scratch1,
@@ -519,7 +393,7 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
__ push(lr);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from r1 to r1 and r0 in double format. r9 is scratch.
+ // Write Smi from r1 to r1 and r0 in double format.
__ mov(scratch1, Operand(r1));
ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
@@ -568,7 +442,8 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) {
+ if (CpuFeatures::IsSupported(VFP3) &&
+ destination == kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
// Load the double from tagged HeapNumber to double register.
__ sub(scratch1, object, Operand(kHeapObjectTag));
@@ -606,57 +481,69 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
}
-void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- DwVfpRegister double_scratch,
- Label* not_int32) {
+void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ DwVfpRegister double_scratch,
+ Label* not_number) {
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
- Label is_smi, done;
+ Label is_smi;
+ Label done;
+ Label not_in_int32_range;
+
__ JumpIfSmi(object, &is_smi);
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
__ cmp(scratch1, heap_number_map);
- __ b(ne, not_int32);
- __ ConvertToInt32(
- object, dst, scratch1, scratch2, double_scratch, not_int32);
+ __ b(ne, not_number);
+ __ ConvertToInt32(object,
+ dst,
+ scratch1,
+ scratch2,
+ double_scratch,
+ &not_in_int32_range);
+ __ jmp(&done);
+
+ __ bind(&not_in_int32_range);
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ __ EmitOutOfInt32RangeTruncate(dst,
+ scratch1,
+ scratch2,
+ scratch3);
__ jmp(&done);
+
__ bind(&is_smi);
__ SmiUntag(dst, object);
__ bind(&done);
}
-void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- SwVfpRegister single_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
+void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
+ Register int_scratch,
+ Destination destination,
+ DwVfpRegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register scratch2,
+ SwVfpRegister single_scratch) {
+ ASSERT(!int_scratch.is(scratch2));
+ ASSERT(!int_scratch.is(dst1));
+ ASSERT(!int_scratch.is(dst2));
- Label done, obj_is_not_smi;
+ Label done;
- __ JumpIfNotSmi(object, &obj_is_not_smi);
- __ SmiUntag(scratch1, object);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
- __ vmov(single_scratch, scratch1);
+ __ vmov(single_scratch, int_scratch);
__ vcvt_f64_s32(double_dst, single_scratch);
if (destination == kCoreRegisters) {
__ vmov(dst1, dst2, double_dst);
@@ -664,53 +551,79 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
} else {
Label fewer_than_20_useful_bits;
// Expected output:
- // | dst1 | dst2 |
+ // | dst2 | dst1 |
// | s | exp | mantissa |
// Check for zero.
- __ cmp(scratch1, Operand(0));
- __ mov(dst1, scratch1);
- __ mov(dst2, scratch1);
+ __ cmp(int_scratch, Operand(0));
+ __ mov(dst2, int_scratch);
+ __ mov(dst1, int_scratch);
__ b(eq, &done);
// Preload the sign of the value.
- __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC);
+ __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
// Get the absolute value of the object (as an unsigned integer).
- __ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
+ __ rsb(int_scratch, int_scratch, Operand(0), SetCC, mi);
// Get mantisssa[51:20].
// Get the position of the first set bit.
- __ CountLeadingZeros(dst2, scratch1, scratch2);
- __ rsb(dst2, dst2, Operand(31));
+ __ CountLeadingZeros(dst1, int_scratch, scratch2);
+ __ rsb(dst1, dst1, Operand(31));
// Set the exponent.
- __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias));
- __ Bfi(dst1, scratch2, scratch2,
+ __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+ __ Bfi(dst2, scratch2, scratch2,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Clear the first non null bit.
__ mov(scratch2, Operand(1));
- __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2));
+ __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
- __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
// Get the number of bits to set in the lower part of the mantissa.
- __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+ __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ b(mi, &fewer_than_20_useful_bits);
// Set the higher 20 bits of the mantissa.
- __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2));
+ __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
__ rsb(scratch2, scratch2, Operand(32));
- __ mov(dst2, Operand(scratch1, LSL, scratch2));
+ __ mov(dst1, Operand(int_scratch, LSL, scratch2));
__ b(&done);
__ bind(&fewer_than_20_useful_bits);
- __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
- __ mov(scratch2, Operand(scratch1, LSL, scratch2));
- __ orr(dst1, dst1, scratch2);
- // Set dst2 to 0.
- __ mov(dst2, Operand(0));
+ __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
+ __ orr(dst2, dst2, scratch2);
+ // Set dst1 to 0.
+ __ mov(dst1, Operand(0));
}
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
+ Register object,
+ Destination destination,
+ DwVfpRegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ SwVfpRegister single_scratch,
+ Label* not_int32) {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!heap_number_map.is(object) &&
+ !heap_number_map.is(scratch1) &&
+ !heap_number_map.is(scratch2));
+ Label done, obj_is_not_smi;
+
+ __ JumpIfNotSmi(object, &obj_is_not_smi);
+ __ SmiUntag(scratch1, object);
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
+ scratch2, single_scratch);
__ b(&done);
__ bind(&obj_is_not_smi);
@@ -872,12 +785,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Exponent greater than 31 cannot yield 32-bit integers.
// Also, a positive value with an exponent equal to 31 is outside of the
// signed 32-bit integer range.
- __ tst(src1, Operand(HeapNumber::kSignMask));
- __ cmp(scratch, Operand(30), eq); // Executed for positive. If exponent is 30
- // the gt condition will be "correct" and
- // the next instruction will be skipped.
- __ cmp(scratch, Operand(31), ne); // Executed for negative and positive where
- // exponent is not 30.
+ // Another way to put it is that if (exponent - signbit) > 30 then the
+ // number cannot be represented as an int32.
+ Register tmp = dst;
+ __ sub(tmp, scratch, Operand(src1, LSR, 31));
+ __ cmp(tmp, Operand(30));
__ b(gt, not_int32);
// - Bits [21:0] in the mantissa are not null.
__ tst(src2, Operand(0x3fffff));
@@ -926,21 +838,25 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
// Push the current return address before the C call. Return will be
// through pop(pc) below.
__ push(lr);
- __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
+ __ PrepareCallCFunction(0, 2, scratch);
+ if (masm->use_eabi_hardfloat()) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(d0, r0, r1);
+ __ vmov(d1, r2, r3);
+ }
// Call C routine that may not cause GC or other trouble.
- __ CallCFunction(ExternalReference::double_fp_operation(op), 4);
- // Store answer in the overwritable heap number.
-#if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from heap_number_result.
- __ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
-#else
- // Double returned in registers 0 and 1.
- __ Strd(r0, r1, FieldMemOperand(heap_number_result,
- HeapNumber::kValueOffset));
-#endif
+ __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
+ 0, 2);
+ // Store answer in the overwritable heap number. Double returned in
+ // registers r0 and r1 or in d0.
+ if (masm->use_eabi_hardfloat()) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vstr(d0,
+ FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ } else {
+ __ Strd(r0, r1, FieldMemOperand(heap_number_result,
+ HeapNumber::kValueOffset));
+ }
// Place heap_number_result in r0 and return to the pushed return address.
__ mov(r0, Operand(heap_number_result));
__ pop(pc);
@@ -1008,19 +924,19 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// The two objects are identical. If we know that one of them isn't NaN then
// we now know they test equal.
if (cond != eq || !never_nan_nan) {
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, slow);
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
// Comparing JS objects with <=, >= is complicated.
if (cond != eq) {
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
@@ -1111,8 +1027,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
(lhs.is(r1) && rhs.is(r0)));
Label rhs_is_smi;
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &rhs_is_smi);
+ __ JumpIfSmi(rhs, &rhs_is_smi);
// Lhs is a Smi. Check whether the rhs is a heap number.
__ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
@@ -1282,8 +1197,14 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
// Call a native function to do a comparison between two non-NaNs.
// Call C routine that may not cause GC or other trouble.
__ push(lr);
- __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments.
- __ CallCFunction(ExternalReference::compare_doubles(), 4);
+ __ PrepareCallCFunction(0, 2, r5);
+ if (masm->use_eabi_hardfloat()) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(d0, r0, r1);
+ __ vmov(d1, r2, r3);
+ }
+ __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
+ 0, 2);
__ pop(pc); // Return.
}
}
@@ -1296,14 +1217,14 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
- // If either operand is a JSObject or an oddball value, then they are
+ // If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
- // FIRST_JS_OBJECT_TYPE.
- __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
+ // FIRST_SPEC_OBJECT_TYPE.
+ __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, &first_non_object);
// Return non-zero (r0 is not zero)
@@ -1316,7 +1237,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
__ cmp(r2, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
- __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -1393,9 +1314,9 @@ static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
__ Ret();
__ bind(&object_test);
- __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(lt, not_both_strings);
- __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, not_both_strings);
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -1436,6 +1357,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// number string cache for smis is just the smi value, and the hash for
// doubles is the xor of the upper and lower words. See
// Heap::GetNumberStringCache.
+ Isolate* isolate = masm->isolate();
Label is_smi;
Label load_result_from_cache;
if (!object_is_smi) {
@@ -1446,7 +1368,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
scratch1,
Heap::kHeapNumberMapRootIndex,
not_found,
- true);
+ DONT_DO_SMI_CHECK);
STATIC_ASSERT(8 == kDoubleSize);
__ add(scratch1,
@@ -1497,7 +1419,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ bind(&load_result_from_cache);
__ ldr(result,
FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native,
+ __ IncrementCounter(isolate->counters()->number_to_string_native(),
1,
scratch1,
scratch2);
@@ -1533,8 +1455,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (include_smi_compare_) {
Label not_two_smis, smi_done;
__ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &not_two_smis);
+ __ JumpIfNotSmi(r2, &not_two_smis);
__ mov(r1, Operand(r1, ASR, 1));
__ sub(r0, r1, Operand(r0, ASR, 1));
__ Ret();
@@ -1557,8 +1478,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
__ and_(r2, lhs_, Operand(rhs_));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &not_smis);
+ __ JumpIfNotSmi(r2, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
// 2) Go to slow.
@@ -1573,6 +1493,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if
// VFP3 is supported, or in r0, r1, r2, and r3.
+ Isolate* isolate = masm->isolate();
if (CpuFeatures::IsSupported(VFP3)) {
__ bind(&lhs_not_nan);
CpuFeatures::Scope scope(VFP3);
@@ -1643,14 +1564,23 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
- __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
+ if (cc_ == eq) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm,
lhs_,
rhs_,
r2,
r3,
- r4,
- r5);
+ r4);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ lhs_,
+ rhs_,
+ r2,
+ r3,
+ r4,
+ r5);
+ }
// Never falls through to here.
__ bind(&slow);
@@ -1675,32 +1605,72 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_JS);
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
}
-// This stub does not handle the inlined cases (Smis, Booleans, undefined).
// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses VFP3 instructions.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ CpuFeatures::Scope scope(VFP3);
+
+ Label false_result, true_result, not_string;
+ const Register map = r9.is(tos_) ? r7 : r9;
+
+ // undefined -> false
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(tos_, ip);
+ __ b(eq, &false_result);
+
+ // Boolean -> its value
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(tos_, ip);
+ __ b(eq, &false_result);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(tos_, ip);
+ // "tos_" is a register and contains a non-zero value. Hence we implicitly
+ // return true if the equal condition is satisfied.
+ __ Ret(eq);
- Label false_result;
- Label not_heap_number;
- Register scratch = r9.is(tos_) ? r7 : r9;
+ // Smis: 0 -> false, all other -> true
+ __ tst(tos_, tos_);
+ __ b(eq, &false_result);
+ __ tst(tos_, Operand(kSmiTagMask));
+ // "tos_" is a register and contains a non-zero value. Hence we implicitly
+ // return true if the not equal condition is satisfied.
+ __ Ret(eq);
+ // 'null' -> false
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(tos_, ip);
__ b(eq, &false_result);
- // HeapNumber => false iff +0, -0, or NaN.
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, ip);
- __ b(&not_heap_number, ne);
+ // Get the map of the heap object.
+ __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
+
+ // Undetectable -> false.
+ __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ b(&false_result, ne);
+
+ // JavaScript object -> true.
+ __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
+ // "tos_" is a register and contains a non-zero value. Hence we implicitly
+ // return true if the greater than condition is satisfied.
+ __ Ret(ge);
+
+ // String value -> false iff empty.
+ __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+ __ b(&not_string, ge);
+ __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ // Return string length as boolean value, i.e. return false iff length is 0.
+ __ Ret();
- __ sub(ip, tos_, Operand(kHeapObjectTag));
- __ vldr(d1, ip, HeapNumber::kValueOffset);
+ __ bind(&not_string);
+ // HeapNumber -> false iff +0, -0, or NaN.
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ b(&true_result, ne);
+ __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
__ VFPCompareAndSetFlags(d1, 0.0);
// "tos_" is a register, and contains a non zero value by default.
// Hence we only need to overwrite "tos_" with zero to return false for
@@ -1709,542 +1679,143 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
__ Ret();
- __ bind(&not_heap_number);
-
- // Check if the value is 'null'.
- // 'null' => false.
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos_, ip);
- __ b(&false_result, eq);
-
- // It can be an undetectable object.
- // Undetectable => false.
- __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset));
- __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
- __ b(&false_result, eq);
-
- // JavaScript object => true.
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // Check for string
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // String value => false iff empty, i.e., length is zero
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- // If length is zero, "tos_" contains zero ==> false.
- // If length is not zero, "tos_" contains a non-zero value ==> true.
+ // Return 1/0 for true/false in tos_.
+ __ bind(&true_result);
+ __ mov(tos_, Operand(1, RelocInfo::NONE));
__ Ret();
-
- // Return 0 in "tos_" for false .
__ bind(&false_result);
__ mov(tos_, Operand(0, RelocInfo::NONE));
__ Ret();
}
-// We fall into this code if the operands were Smis, but the result was
-// not (eg. overflow). We branch into this code (to the not_smi label) if
-// the operands were not both Smi. The operands are in r0 and r1. In order
-// to call the C-implemented binary fp operation routines we need to end up
-// with the double precision floating point operands in r0 and r1 (for the
-// value in r1) and r2 and r3 (for the value in r0).
-void GenericBinaryOpStub::HandleBinaryOpSlowCases(
- MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin) {
- Label slow, slow_reverse, do_the_call;
- bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
-
- ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
- Register heap_number_map = r6;
-
- if (ShouldGenerateSmiCode()) {
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // Smi-smi case (overflow).
- // Since both are Smis there is no heap number to overwrite, so allocate.
- // The new heap number is in r5. r3 and r7 are scratch.
- __ AllocateHeapNumber(
- r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
-
- // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
- // using registers d7 and d6 for the double values.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
- __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
- if (!use_fp_registers) {
- __ vmov(r2, r3, d7);
- __ vmov(r0, r1, d6);
- }
- } else {
- // Write Smi from rhs to r3 and r2 in double format. r9 is scratch.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r9);
- __ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r9);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- __ jmp(&do_the_call); // Tail call. No return.
- }
-
- // We branch here if at least one of r0 and r1 is not a Smi.
- __ bind(not_smi);
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // After this point we have the left hand side in r1 and the right hand side
- // in r0.
- if (lhs.is(r0)) {
- __ Swap(r0, r1, ip);
+const char* UnaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name = NULL; // Make g++ happy.
+ switch (mode_) {
+ case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
}
- // The type transition also calculates the answer.
- bool generate_code_to_calculate_answer = true;
-
- if (ShouldGenerateFPCode()) {
- // DIV has neither SmiSmi fast code nor specialized slow code.
- // So don't try to patch a DIV Stub.
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- GenerateTypeTransition(masm); // Tail call.
- generate_code_to_calculate_answer = false;
- break;
-
- case Token::DIV:
- // DIV has neither SmiSmi fast code nor specialized slow code.
- // So don't try to patch a DIV Stub.
- break;
-
- default:
- break;
- }
- }
-
- if (generate_code_to_calculate_answer) {
- Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
- if (mode_ == NO_OVERWRITE) {
- // In the case where there is no chance of an overwritable float we may
- // as well do the allocation immediately while r0 and r1 are untouched.
- __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
- }
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "UnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ UnaryOpIC::GetName(operand_type_));
+ return name_;
+}
- // Move r0 to a double in r2-r3.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- if (mode_ == OVERWRITE_RIGHT) {
- __ mov(r5, Operand(r0)); // Overwrite this heap number.
- }
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r0 to d7.
- __ sub(r7, r0, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that second double is in r2 and r3.
- __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
- }
- __ jmp(&finished_loading_r0);
- __ bind(&r0_is_smi);
- if (mode_ == OVERWRITE_RIGHT) {
- // We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi in r0 to double in d7.
- __ mov(r7, Operand(r0, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
- if (!use_fp_registers) {
- __ vmov(r2, r3, d7);
- }
- } else {
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r4);
- __ push(lr);
- __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operand_type_) {
+ case UnaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case UnaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case UnaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case UnaryOpIC::GENERIC:
+ GenerateGenericStub(masm);
+ break;
+ }
+}
- // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
- // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
- Label r1_is_not_smi;
- if ((runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) &&
- HasSmiSmiFastPath()) {
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &r1_is_not_smi);
- GenerateTypeTransition(masm); // Tail call.
- }
- __ bind(&finished_loading_r0);
-
- // Move r1 to a double in r0-r1.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
- __ bind(&r1_is_not_smi);
- __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- if (mode_ == OVERWRITE_LEFT) {
- __ mov(r5, Operand(r1)); // Overwrite this heap number.
- }
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r1 to d6.
- __ sub(r7, r1, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that first double is in r0 and r1.
- __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
- }
- __ jmp(&finished_loading_r1);
- __ bind(&r1_is_smi);
- if (mode_ == OVERWRITE_LEFT) {
- // We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ // Prepare to push argument.
+ __ mov(r3, Operand(r0));
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi in r1 to double in d6.
- __ mov(r7, Operand(r1, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
- if (!use_fp_registers) {
- __ vmov(r0, r1, d6);
- }
- } else {
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r9);
- __ push(lr);
- __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ mov(r2, Operand(Smi::FromInt(MinorKey())));
+ __ mov(r1, Operand(Smi::FromInt(op_)));
+ __ mov(r0, Operand(Smi::FromInt(operand_type_)));
- __ bind(&finished_loading_r1);
- }
+ __ Push(r3, r2, r1, r0);
- if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
- __ bind(&do_the_call);
- // If we are inlining the operation using VFP3 instructions for
- // add, subtract, multiply, or divide, the arguments are in d6 and d7.
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // ARMv7 VFP3 instructions to implement
- // double precision, add, subtract, multiply, divide.
-
- if (Token::MUL == op_) {
- __ vmul(d5, d6, d7);
- } else if (Token::DIV == op_) {
- __ vdiv(d5, d6, d7);
- } else if (Token::ADD == op_) {
- __ vadd(d5, d6, d7);
- } else if (Token::SUB == op_) {
- __ vsub(d5, d6, d7);
- } else {
- UNREACHABLE();
- }
- __ sub(r0, r5, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // If we did not inline the operation, then the arguments are in:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- // r5: Address of heap number for result.
-
- __ push(lr); // For later.
- __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
- // Call C routine that may not cause GC or other trouble. r5 is callee
- // save.
- __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
- // Store answer in the overwritable heap number.
- #if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from r5.
- __ sub(r4, r5, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
- #else
- // Double returned in registers 0 and 1.
- __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
- #endif
- __ mov(r0, Operand(r5));
- // And we are done.
- __ pop(pc);
- }
- }
- }
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
+ masm->isolate()),
+ 4,
+ 1);
+}
- if (!generate_code_to_calculate_answer &&
- !slow_reverse.is_linked() &&
- !slow.is_linked()) {
- return;
- }
- if (lhs.is(r0)) {
- __ b(&slow);
- __ bind(&slow_reverse);
- __ Swap(r0, r1, ip);
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateSmiStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateSmiStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
}
+}
- heap_number_map = no_reg; // Don't use this any more from here on.
- // We jump to here if something goes wrong (one param is not a number of any
- // sort or new-space allocation fails).
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
__ bind(&slow);
-
- // Push arguments to the stack
- __ Push(r1, r0);
-
- if (Token::ADD == op_) {
- // Test for string arguments before calling runtime.
- // r1 : first argument
- // r0 : second argument
- // sp[0] : second argument
- // sp[4] : first argument
-
- Label not_strings, not_string1, string1, string1_smi2;
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &not_string1);
- __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_string1);
-
- // First argument is a a string, test second.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &string1_smi2);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &string1);
-
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, r0, r2, r4, r5, r6, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ str(r2, MemOperand(sp, 0));
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
-
- // First argument was not a string, test second.
- __ bind(&not_string1);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &not_strings);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
-
- __ bind(&not_strings);
- }
-
- __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
+ GenerateTypeTransition(masm);
}
-// For bitwise ops where the inputs are not both Smis we here try to determine
-// whether both inputs are either Smis or at least heap numbers that can be
-// represented by a 32 bit signed value. We truncate towards zero as required
-// by the ES spec. If this is the case we do the bitwise op and see if the
-// result is a Smi. If so, great, otherwise we try to find a heap number to
-// write the answer into (either by allocating or by overwriting).
-// On entry the operands are in lhs and rhs. On exit the answer is in r0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- Label slow, result_not_a_smi;
- Label rhs_is_smi, lhs_is_smi;
- Label done_checking_rhs, done_checking_lhs;
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+ Label non_smi;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateTypeTransition(masm);
+}
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ tst(lhs, Operand(kSmiTagMask));
- __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
- __ jmp(&done_checking_lhs);
- __ bind(&lhs_is_smi);
- __ mov(r3, Operand(lhs, ASR, 1));
- __ bind(&done_checking_lhs);
-
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
- __ jmp(&done_checking_rhs);
- __ bind(&rhs_is_smi);
- __ mov(r2, Operand(rhs, ASR, 1));
- __ bind(&done_checking_rhs);
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+ Label* non_smi,
+ Label* slow) {
+ __ JumpIfNotSmi(r0, non_smi);
- ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
+ // The result of negating zero or the smallest negative smi is not a smi.
+ __ bic(ip, r0, Operand(0x80000000), SetCC);
+ __ b(eq, slow);
- // r0 and r1: Original operands (Smi or heap numbers).
- // r2 and r3: Signed int32 operands.
- switch (op_) {
- case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
- case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
- case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of writing
- // the register as an unsigned int so we go to slow case if we hit this
- // case.
- if (CpuFeatures::IsSupported(VFP3)) {
- __ b(mi, &result_not_a_smi);
- } else {
- __ b(mi, &slow);
- }
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default: UNREACHABLE();
- }
- // check that the *signed* result fits in a smi
- __ add(r3, r2, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+ // Return '0 - value'.
+ __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
__ Ret();
+}
- Label have_to_allocate, got_a_heap_number;
- __ bind(&result_not_a_smi);
- switch (mode_) {
- case OVERWRITE_RIGHT: {
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &have_to_allocate);
- __ mov(r5, Operand(rhs));
- break;
- }
- case OVERWRITE_LEFT: {
- __ tst(lhs, Operand(kSmiTagMask));
- __ b(eq, &have_to_allocate);
- __ mov(r5, Operand(lhs));
- break;
- }
- case NO_OVERWRITE: {
- // Get a new heap number in r5. r4 and r7 are scratch.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
- default: break;
- }
- __ bind(&got_a_heap_number);
- // r2: Answer as signed int32.
- // r5: Heap number to write answer into.
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(r5));
+void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+ Label* non_smi) {
+ __ JumpIfNotSmi(r0, non_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r2);
- if (op_ == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
+ // Flip bits and revert inverted smi-tag.
+ __ mvn(r0, Operand(r0));
+ __ bic(r0, r0, Operand(kSmiTagMask));
+ __ Ret();
+}
- if (mode_ != NO_OVERWRITE) {
- __ bind(&have_to_allocate);
- // Get a new heap number in r5. r4 and r7 are scratch.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- __ jmp(&got_a_heap_number);
- }
- // If all else failed then we go to the runtime system.
- __ bind(&slow);
- __ Push(lhs, rhs); // Restore stack.
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
switch (op_) {
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
+ case Token::SUB:
+ GenerateHeapNumberStubSub(masm);
break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
+ case Token::BIT_NOT:
+ GenerateHeapNumberStubBitNot(masm);
break;
default:
UNREACHABLE();
@@ -2252,574 +1823,179 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
}
-
-
-// This function takes the known int in a register for the cases
-// where it doesn't know a good trick, and may deliver
-// a result that needs shifting.
-static void MultiplyByKnownIntInStub(
- MacroAssembler* masm,
- Register result,
- Register source,
- Register known_int_register, // Smi tagged.
- int known_int,
- int* required_shift) { // Including Smi tag shift
- switch (known_int) {
- case 3:
- __ add(result, source, Operand(source, LSL, 1));
- *required_shift = 1;
- break;
- case 5:
- __ add(result, source, Operand(source, LSL, 2));
- *required_shift = 1;
- break;
- case 6:
- __ add(result, source, Operand(source, LSL, 1));
- *required_shift = 2;
- break;
- case 7:
- __ rsb(result, source, Operand(source, LSL, 3));
- *required_shift = 1;
- break;
- case 9:
- __ add(result, source, Operand(source, LSL, 3));
- *required_shift = 1;
- break;
- case 10:
- __ add(result, source, Operand(source, LSL, 2));
- *required_shift = 2;
- break;
- default:
- ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
- __ mul(result, source, known_int_register);
- *required_shift = 0;
- }
-}
-
-
-// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
-// trick. See http://en.wikipedia.org/wiki/Divisibility_rule
-// Takes the sum of the digits base (mask + 1) repeatedly until we have a
-// number from 0 to mask. On exit the 'eq' condition flags are set if the
-// answer is exactly the mask.
-void IntegerModStub::DigitSum(MacroAssembler* masm,
- Register lhs,
- int mask,
- int shift,
- Label* entry) {
- ASSERT(mask > 0);
- ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
- Label loop;
- __ bind(&loop);
- __ and_(ip, lhs, Operand(mask));
- __ add(lhs, ip, Operand(lhs, LSR, shift));
- __ bind(entry);
- __ cmp(lhs, Operand(mask));
- __ b(gt, &loop);
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+ Label non_smi, slow, call_builtin;
+ GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+ __ bind(&call_builtin);
+ GenerateGenericCodeFallback(masm);
}
-void IntegerModStub::DigitSum(MacroAssembler* masm,
- Register lhs,
- Register scratch,
- int mask,
- int shift1,
- int shift2,
- Label* entry) {
- ASSERT(mask > 0);
- ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
- Label loop;
- __ bind(&loop);
- __ bic(scratch, lhs, Operand(mask));
- __ and_(ip, lhs, Operand(mask));
- __ add(lhs, ip, Operand(lhs, LSR, shift1));
- __ add(lhs, lhs, Operand(scratch, LSR, shift2));
- __ bind(entry);
- __ cmp(lhs, Operand(mask));
- __ b(gt, &loop);
+void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
}
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+ Label* slow) {
+ EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
+ // r0 is a heap number. Get a new heap number in r1.
+ if (mode_ == UNARY_OVERWRITE) {
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ } else {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
-// Splits the number into two halves (bottom half has shift bits). The top
-// half is subtracted from the bottom half. If the result is negative then
-// rhs is added.
-void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
- Register lhs,
- int shift,
- int rhs) {
- int mask = (1 << shift) - 1;
- __ and_(ip, lhs, Operand(mask));
- __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
- __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
-}
-
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(r0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r1, Operand(r0));
+ __ pop(r0);
+ __ LeaveInternalFrame();
-void IntegerModStub::ModReduce(MacroAssembler* masm,
- Register lhs,
- int max,
- int denominator) {
- int limit = denominator;
- while (limit * 2 <= max) limit *= 2;
- while (limit >= denominator) {
- __ cmp(lhs, Operand(limit));
- __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
- limit >>= 1;
+ __ bind(&heapnumber_allocated);
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+ __ mov(r0, Operand(r1));
}
-}
-
-
-void IntegerModStub::ModAnswer(MacroAssembler* masm,
- Register result,
- Register shift_distance,
- Register mask_bits,
- Register sum_of_digits) {
- __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
__ Ret();
}
-// See comment for class.
-void IntegerModStub::Generate(MacroAssembler* masm) {
- __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
- __ bic(odd_number_, odd_number_, Operand(1));
- __ mov(odd_number_, Operand(odd_number_, LSL, 1));
- // We now have (odd_number_ - 1) * 2 in the register.
- // Build a switch out of branches instead of data because it avoids
- // having to teach the assembler about intra-code-object pointers
- // that are not in relative branch instructions.
- Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
- Label mod21, mod23, mod25;
- { Assembler::BlockConstPoolScope block_const_pool(masm);
- __ add(pc, pc, Operand(odd_number_));
- // When you read pc it is always 8 ahead, but when you write it you always
- // write the actual value. So we put in two nops to take up the slack.
- __ nop();
- __ nop();
- __ b(&mod3);
- __ b(&mod5);
- __ b(&mod7);
- __ b(&mod9);
- __ b(&mod11);
- __ b(&mod13);
- __ b(&mod15);
- __ b(&mod17);
- __ b(&mod19);
- __ b(&mod21);
- __ b(&mod23);
- __ b(&mod25);
- }
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(
+ MacroAssembler* masm, Label* slow) {
+ Label impossible;
- // For each denominator we find a multiple that is almost only ones
- // when expressed in binary. Then we do the sum-of-digits trick for
- // that number. If the multiple is not 1 then we have to do a little
- // more work afterwards to get the answer into the 0-denominator-1
- // range.
- DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11.
- __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111.
- ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111.
- __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111.
- ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111.
- ModReduce(masm, lhs_, 0x3f, 11);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111.
- ModReduce(masm, lhs_, 0xff, 13);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111.
- __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111.
- ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111.
- ModReduce(masm, lhs_, 0xff, 19);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111.
- ModReduce(masm, lhs_, 0x3f, 21);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101.
- ModReduce(masm, lhs_, 0xff, 23);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101.
- ModReduce(masm, lhs_, 0x7f, 25);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-}
+ EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
+ // Convert the heap number is r0 to an untagged integer in r1.
+ __ ConvertToInt32(r0, r1, r2, r3, d0, slow);
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ mvn(r1, Operand(r1));
+ __ add(r2, r1, Operand(0x40000000), SetCC);
+ __ b(mi, &try_float);
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- // lhs_ : x
- // rhs_ : y
- // r0 : result
+ // Tag the result as a smi and we're done.
+ __ mov(r0, Operand(r1, LSL, kSmiTagSize));
+ __ Ret();
- Register result = r0;
- Register lhs = lhs_;
- Register rhs = rhs_;
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (mode_ == UNARY_NO_OVERWRITE) {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ // Allocate a new heap number without zapping r0, which we need if it fails.
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
- // This code can't cope with other register allocations yet.
- ASSERT(result.is(r0) &&
- ((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0))));
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(r0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r2, r0); // Move the new heap number into r2.
+ // Get the heap number into r0, now that the new heap number is in r2.
+ __ pop(r0);
+ __ LeaveInternalFrame();
- Register smi_test_reg = r7;
- Register scratch = r9;
+ // Convert the heap number in r0 to an untagged integer in r1.
+ // This can't go slow-case because it's the same number we already
+ // converted once again.
+ __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
+ __ mvn(r1, Operand(r1));
- // All ops need to know whether we are dealing with two Smis. Set up
- // smi_test_reg to tell us that.
- if (ShouldGenerateSmiCode()) {
- __ orr(smi_test_reg, lhs, Operand(rhs));
+ __ bind(&heapnumber_allocated);
+ __ mov(r0, r2); // Move newly allocated heap number to r0.
}
- switch (op_) {
- case Token::ADD: {
- Label not_smi;
- // Fast path.
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &not_smi);
- __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
- }
- HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
- break;
- }
-
- case Token::SUB: {
- Label not_smi;
- // Fast path.
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &not_smi);
- if (lhs.is(r1)) {
- __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
- } else {
- __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ add(r0, r0, Operand(r1)); // Revert optimistic subtract.
- }
- }
- HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
- break;
- }
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r1);
+ __ vcvt_f64_s32(d0, s0);
+ __ sub(r2, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
+ __ Ret();
+ } else {
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ }
- case Token::MUL: {
- Label not_smi, slow;
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // adjust code below
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- __ b(ne, &not_smi);
- // Remove tag from one operand (but keep sign), so that result is Smi.
- __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
- // Do multiplication
- // scratch = lower 32 bits of ip * lhs.
- __ smull(scratch, scratch2, lhs, ip);
- // Go slow on overflows (overflow bit is not set).
- __ mov(ip, Operand(scratch, ASR, 31));
- // No overflow if higher 33 bits are identical.
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &slow);
- // Go slow on zero result to handle -0.
- __ tst(scratch, Operand(scratch));
- __ mov(result, Operand(scratch), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, rhs, Operand(lhs), SetCC);
- __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
- // Slow case. We fall through here if we multiplied a negative number
- // with 0, because that would mean we should produce -0.
- __ bind(&slow);
- }
- HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
- break;
- }
+ __ bind(&impossible);
+ if (FLAG_debug_code) {
+ __ stop("Incorrect assumption in bit-not stub");
+ }
+}
- case Token::DIV:
- case Token::MOD: {
- Label not_smi;
- if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
- Label lhs_is_unsuitable;
- __ JumpIfNotSmi(lhs, &not_smi);
- if (IsPowerOf2(constant_rhs_)) {
- if (op_ == Token::MOD) {
- __ and_(rhs,
- lhs,
- Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
- SetCC);
- // We now have the answer, but if the input was negative we also
- // have the sign bit. Our work is done if the result is
- // positive or zero:
- if (!rhs.is(r0)) {
- __ mov(r0, rhs, LeaveCC, pl);
- }
- __ Ret(pl);
- // A mod of a negative left hand side must return a negative number.
- // Unfortunately if the answer is 0 then we must return -0. And we
- // already optimistically trashed rhs so we may need to restore it.
- __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
- // Next two instructions are conditional on the answer being -0.
- __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
- __ b(eq, &lhs_is_unsuitable);
- // We need to subtract the dividend. Eg. -3 % 4 == -3.
- __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
- } else {
- ASSERT(op_ == Token::DIV);
- __ tst(lhs,
- Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
- __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder.
- int shift = 0;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- d >>= 1;
- shift++;
- }
- __ mov(r0, Operand(lhs, LSR, shift));
- __ bic(r0, r0, Operand(kSmiTagMask));
- }
- } else {
- // Not a power of 2.
- __ tst(lhs, Operand(0x80000000u));
- __ b(ne, &lhs_is_unsuitable);
- // Find a fixed point reciprocal of the divisor so we can divide by
- // multiplying.
- double divisor = 1.0 / constant_rhs_;
- int shift = 32;
- double scale = 4294967296.0; // 1 << 32.
- uint32_t mul;
- // Maximise the precision of the fixed point reciprocal.
- while (true) {
- mul = static_cast<uint32_t>(scale * divisor);
- if (mul >= 0x7fffffff) break;
- scale *= 2.0;
- shift++;
- }
- mul++;
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- __ mov(scratch2, Operand(mul));
- __ umull(scratch, scratch2, scratch2, lhs);
- __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
- // scratch2 is lhs / rhs. scratch2 is not Smi tagged.
- // rhs is still the known rhs. rhs is Smi tagged.
- // lhs is still the unkown lhs. lhs is Smi tagged.
- int required_scratch_shift = 0; // Including the Smi tag shift of 1.
- // scratch = scratch2 * rhs.
- MultiplyByKnownIntInStub(masm,
- scratch,
- scratch2,
- rhs,
- constant_rhs_,
- &required_scratch_shift);
- // scratch << required_scratch_shift is now the Smi tagged rhs *
- // (lhs / rhs) where / indicates integer division.
- if (op_ == Token::DIV) {
- __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
- __ b(ne, &lhs_is_unsuitable); // There was a remainder.
- __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
- } else {
- ASSERT(op_ == Token::MOD);
- __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
- }
- }
- __ Ret();
- __ bind(&lhs_is_unsuitable);
- } else if (op_ == Token::MOD &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS) {
- // Do generate a bit of smi code for modulus even though the default for
- // modulus is not to do it, but as the ARM processor has no coprocessor
- // support for modulus checking for smis makes sense. We can handle
- // 1 to 25 times any power of 2. This covers over half the numbers from
- // 1 to 100 including all of the first 25. (Actually the constants < 10
- // are handled above by reciprocal multiplication. We only get here for
- // those cases if the right hand side is not a constant or for cases
- // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
- // stub.)
- Label slow;
- Label not_power_of_2;
- ASSERT(!ShouldGenerateSmiCode());
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- // Check for two positive smis.
- __ orr(smi_test_reg, lhs, Operand(rhs));
- __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
- __ b(ne, &slow);
- // Check that rhs is a power of two and not zero.
- Register mask_bits = r3;
- __ sub(scratch, rhs, Operand(1), SetCC);
- __ b(mi, &slow);
- __ and_(mask_bits, rhs, Operand(scratch), SetCC);
- __ b(ne, &not_power_of_2);
- // Calculate power of two modulus.
- __ and_(result, lhs, Operand(scratch));
- __ Ret();
- __ bind(&not_power_of_2);
- __ eor(scratch, scratch, Operand(mask_bits));
- // At least two bits are set in the modulus. The high one(s) are in
- // mask_bits and the low one is scratch + 1.
- __ and_(mask_bits, scratch, Operand(lhs));
- Register shift_distance = scratch;
- scratch = no_reg;
-
- // The rhs consists of a power of 2 multiplied by some odd number.
- // The power-of-2 part we handle by putting the corresponding bits
- // from the lhs in the mask_bits register, and the power in the
- // shift_distance register. Shift distance is never 0 due to Smi
- // tagging.
- __ CountLeadingZeros(r4, shift_distance, shift_distance);
- __ rsb(shift_distance, r4, Operand(32));
-
- // Now we need to find out what the odd number is. The last bit is
- // always 1.
- Register odd_number = r4;
- __ mov(odd_number, Operand(rhs, LSR, shift_distance));
- __ cmp(odd_number, Operand(25));
- __ b(gt, &slow);
-
- IntegerModStub stub(
- result, shift_distance, odd_number, mask_bits, lhs, r5);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call.
-
- __ bind(&slow);
- }
- HandleBinaryOpSlowCases(
- masm,
- &not_smi,
- lhs,
- rhs,
- op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateGenericStubSub(masm);
break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label slow;
- STATIC_ASSERT(kSmiTag == 0); // adjust code below
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &slow);
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- switch (op_) {
- case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break;
- case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
- case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(result, Operand(lhs, ASR, scratch2));
- // Smi tag result.
- __ bic(result, result, Operand(kSmiTagMask));
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(scratch, Operand(scratch, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch, Operand(0xc0000000));
- __ b(ne, &slow);
- // Smi tag result.
- __ mov(result, Operand(scratch, LSL, kSmiTagSize));
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(scratch, Operand(scratch, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ add(scratch2, scratch, Operand(0x40000000), SetCC);
- __ b(mi, &slow);
- __ mov(result, Operand(scratch, LSL, kSmiTagSize));
- break;
- default: UNREACHABLE();
- }
- __ Ret();
- __ bind(&slow);
- HandleNonSmiBitwiseOp(masm, lhs, rhs);
+ case Token::BIT_NOT:
+ GenerateGenericStubBitNot(masm);
break;
- }
-
- default: UNREACHABLE();
- }
- // This code should be unreachable.
- __ stop("Unreachable");
-
- // Generate an unreachable reference to the DEFAULT stub so that it can be
- // found at the end of this stub when clearing ICs at GC.
- // TODO(kaznacheev): Check performance impact and get rid of this.
- if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
- GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
- __ CallStub(&uninit);
+ default:
+ UNREACHABLE();
}
}
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
- __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
- __ Push(r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
}
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
}
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
- TRBinaryOpIC::TypeInfo type_info,
- TRBinaryOpIC::TypeInfo result_type_info) {
- TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
+void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ push(r0);
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
__ Push(r1, r0);
@@ -2830,39 +2006,43 @@ void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ Push(r2, r1, r0);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+ masm->isolate()),
5,
1);
}
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
MacroAssembler* masm) {
UNIMPLEMENTED();
}
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
switch (operands_type_) {
- case TRBinaryOpIC::UNINITIALIZED:
+ case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
break;
- case TRBinaryOpIC::SMI:
+ case BinaryOpIC::SMI:
GenerateSmiStub(masm);
break;
- case TRBinaryOpIC::INT32:
+ case BinaryOpIC::INT32:
GenerateInt32Stub(masm);
break;
- case TRBinaryOpIC::HEAP_NUMBER:
+ case BinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm);
break;
- case TRBinaryOpIC::ODDBALL:
+ case BinaryOpIC::ODDBALL:
GenerateOddballStub(masm);
break;
- case TRBinaryOpIC::STRING:
+ case BinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
+ case BinaryOpIC::STRING:
GenerateStringStub(masm);
break;
- case TRBinaryOpIC::GENERIC:
+ case BinaryOpIC::GENERIC:
GenerateGeneric(masm);
break;
default:
@@ -2871,10 +2051,11 @@ void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
}
-const char* TypeRecordingBinaryOpStub::GetName() {
+const char* BinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
@@ -2886,16 +2067,15 @@ const char* TypeRecordingBinaryOpStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "TypeRecordingBinaryOpStub_%s_%s_%s",
+ "BinaryOpStub_%s_%s_%s",
op_name,
overwrite_name,
- TRBinaryOpIC::GetName(operands_type_));
+ BinaryOpIC::GetName(operands_type_));
return name_;
}
-void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
- MacroAssembler* masm) {
+void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
Register left = r1;
Register right = r0;
Register scratch1 = r7;
@@ -3020,14 +2200,15 @@ void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
}
-void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required) {
+void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required) {
Register left = r1;
Register right = r0;
Register scratch1 = r7;
Register scratch2 = r9;
+ Register scratch3 = r4;
ASSERT(smi_operands || (not_numbers != NULL));
if (smi_operands && FLAG_debug_code) {
@@ -3047,7 +2228,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
+ CpuFeatures::IsSupported(VFP3) &&
+ op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
@@ -3101,6 +2283,9 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
op_,
result,
scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
}
break;
}
@@ -3115,22 +2300,24 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ SmiUntag(r2, right);
} else {
// Convert operands to 32-bit integers. Right in r2 and left in r3.
- FloatingPointHelper::LoadNumberAsInteger(masm,
- left,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- d0,
- not_numbers);
- FloatingPointHelper::LoadNumberAsInteger(masm,
- right,
- r2,
- heap_number_map,
- scratch1,
- scratch2,
- d0,
- not_numbers);
+ FloatingPointHelper::ConvertNumberToInt32(masm,
+ left,
+ r3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ d0,
+ not_numbers);
+ FloatingPointHelper::ConvertNumberToInt32(masm,
+ right,
+ r2,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ d0,
+ not_numbers);
}
Label result_not_a_smi;
@@ -3227,7 +2414,9 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// generated. If the result is not a smi and heap number allocation is not
// requested the code falls through. If number allocation is requested but a
// heap number cannot be allocated the code jumps to the lable gc_required.
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+ MacroAssembler* masm,
+ Label* use_runtime,
Label* gc_required,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
Label not_smis;
@@ -3240,8 +2429,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
// Perform combined smi check on both operands.
__ orr(scratch1, left, Operand(right));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(scratch1, Operand(kSmiTagMask));
- __ b(ne, &not_smis);
+ __ JumpIfNotSmi(scratch1, &not_smis);
// If the smi-smi operation results in a smi return is generated.
GenerateSmiSmiOperation(masm);
@@ -3249,23 +2437,26 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
// If heap number results are possible generate the result in an allocated
// heap number.
if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
- GenerateFPOperation(masm, true, NULL, gc_required);
+ GenerateFPOperation(masm, true, use_runtime, gc_required);
}
__ bind(&not_smis);
}
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label not_smis, call_runtime;
- if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
- result_type_ == TRBinaryOpIC::SMI) {
+ if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+ result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+ GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ GenerateSmiCode(masm,
+ &call_runtime,
+ &call_runtime,
+ ALLOW_HEAPNUMBER_RESULTS);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -3277,18 +2468,48 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// Try to add arguments as strings, otherwise, transition to the generic
- // TRBinaryOpIC type.
+ // BinaryOpIC type.
GenerateAddStrings(masm);
GenerateTypeTransition(masm);
}
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = r1;
+ Register right = r0;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ // Test if right operand is a string.
+ __ JumpIfSmi(right, &call_runtime);
+ __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
+ GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::INT32);
Register left = r1;
Register right = r0;
@@ -3321,36 +2542,36 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV:
case Token::MOD: {
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers r0 and r1 (right
- // and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
- FloatingPointHelper::kVFPRegisters :
- FloatingPointHelper::kCoreRegisters;
-
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- d7,
- r2,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- d6,
- r4,
- r5,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
+ // Load both operands and check that they are 32-bit integer.
+ // Jump to type transition if they are not. The registers r0 and r1 (right
+ // and left) are preserved for the runtime call.
+ FloatingPointHelper::Destination destination =
+ (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
+ ? FloatingPointHelper::kVFPRegisters
+ : FloatingPointHelper::kCoreRegisters;
+
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ right,
+ destination,
+ d7,
+ r2,
+ r3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ s0,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ left,
+ destination,
+ d6,
+ r4,
+ r5,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ s0,
+ &transition);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
@@ -3384,7 +2605,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch1,
scratch2);
- if (result_type_ <= TRBinaryOpIC::INT32) {
+ if (result_type_ <= BinaryOpIC::INT32) {
// If the ne condition is set, result does
// not fit in a 32-bit integer.
__ b(ne, &transition);
@@ -3395,14 +2616,27 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
// If not try to return a heap number.
__ b(mi, &return_heap_number);
+ // Check for minus zero. Return heap number for minus zero.
+ Label not_zero;
+ __ cmp(scratch1, Operand(0));
+ __ b(ne, &not_zero);
+ __ vmov(scratch2, d5.high());
+ __ tst(scratch2, Operand(HeapNumber::kSignMask));
+ __ b(ne, &return_heap_number);
+ __ bind(&not_zero);
+
// Tag the result and return.
__ SmiTag(r0, scratch1);
__ Ret();
+ } else {
+ // DIV just falls through to allocating a heap number.
}
- if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
- : TRBinaryOpIC::INT32) {
- __ bind(&return_heap_number);
+ __ bind(&return_heap_number);
+ // Return a heap number, or fall through to type transition or runtime
+ // call if we can't.
+ if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+ : BinaryOpIC::INT32)) {
// We are using vfp registers so r5 is available.
heap_number_result = r5;
GenerateHeapResultAllocation(masm,
@@ -3442,6 +2676,9 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(
masm, op_, heap_number_result, scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
__ bind(&pop_and_call_runtime);
__ Drop(2);
@@ -3507,12 +2744,13 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// The non vfp3 code does not support this special case, so jump to
// runtime if we don't support it.
if (CpuFeatures::IsSupported(VFP3)) {
- __ b(mi,
- (result_type_ <= TRBinaryOpIC::INT32) ? &transition
- : &return_heap_number);
+ __ b(mi, (result_type_ <= BinaryOpIC::INT32)
+ ? &transition
+ : &return_heap_number);
} else {
- __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition
- : &call_runtime);
+ __ b(mi, (result_type_ <= BinaryOpIC::INT32)
+ ? &transition
+ : &call_runtime);
}
break;
case Token::SHL:
@@ -3532,16 +2770,16 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ Ret();
__ bind(&return_heap_number);
+ heap_number_result = r5;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
- heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
-
if (op_ != Token::SHR) {
// Convert the result to a floating point value.
__ vmov(double_scratch.low(), r2);
@@ -3560,6 +2798,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
} else {
// Tail call that writes the int32 in r2 to the heap number in r0, using
// r3 as scratch. r0 is preserved and returned.
+ __ mov(r0, r5);
WriteInt32ToHeapNumberStub stub(r2, r0, r3);
__ TailCallStub(&stub);
}
@@ -3571,7 +2810,11 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
UNREACHABLE();
}
- if (transition.is_linked()) {
+ // We never expect DIV to yield an integer result, so we always generate
+ // type transition code for DIV operations expecting an integer result: the
+ // code will fall through to this type transition.
+ if (transition.is_linked() ||
+ ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
__ bind(&transition);
GenerateTypeTransition(masm);
}
@@ -3581,7 +2824,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
Label call_runtime;
if (op_ == Token::ADD) {
@@ -3592,8 +2835,7 @@ void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
// Convert oddball arguments to numbers.
Label check, done;
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r1, ip);
+ __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
__ b(ne, &check);
if (Token::IsBitOp(op_)) {
__ mov(r1, Operand(Smi::FromInt(0)));
@@ -3602,8 +2844,7 @@ void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
__ jmp(&done);
__ bind(&check);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(ne, &done);
if (Token::IsBitOp(op_)) {
__ mov(r0, Operand(Smi::FromInt(0)));
@@ -3616,22 +2857,19 @@ void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label not_numbers, call_runtime;
- GenerateFPOperation(masm, false, &not_numbers, &call_runtime);
-
- __ bind(&not_numbers);
- GenerateTypeTransition(masm);
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ Label call_runtime;
+ GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
__ bind(&call_runtime);
GenerateCallRuntime(masm);
}
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime, call_string_add_or_runtime;
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
@@ -3645,7 +2883,7 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
Label left_not_string, call_runtime;
@@ -3676,41 +2914,41 @@ void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
GenerateRegisterArgsPush(masm);
switch (op_) {
case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break;
case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
break;
case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
break;
case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_JS);
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
break;
case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
break;
case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
break;
case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
break;
case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
break;
case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
@@ -3718,14 +2956,12 @@ void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
-
+void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
// Code below will scratch result if allocation fails. To keep both arguments
// intact for the runtime call result cannot be one of these.
ASSERT(!result.is(r0) && !result.is(r1));
@@ -3752,38 +2988,53 @@ void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
}
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
__ Push(r1, r0);
}
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Argument is a number and is on stack and in r0.
- Label runtime_call;
+ // Untagged case: double input in d2, double result goes
+ // into d2.
+ // Tagged case: tagged input on top of stack and in r0,
+ // tagged result (heap number) goes into r0.
+
Label input_not_smi;
Label loaded;
+ Label calculate;
+ Label invalid_cache;
+ const Register scratch0 = r9;
+ const Register scratch1 = r7;
+ const Register cache_entry = r0;
+ const bool tagged = (argument_type_ == TAGGED);
if (CpuFeatures::IsSupported(VFP3)) {
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(r0, &input_not_smi);
-
CpuFeatures::Scope scope(VFP3);
- // Input is a smi. Convert to double and load the low and high words
- // of the double into r2, r3.
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
- __ b(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(r0,
- r1,
- Heap::kHeapNumberMapRootIndex,
- &runtime_call,
- true);
- // Input is a HeapNumber. Load it to a double register and store the
- // low and high words into r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
-
+ if (tagged) {
+ // Argument is a number and is on stack and in r0.
+ // Load argument and check if it is a smi.
+ __ JumpIfNotSmi(r0, &input_not_smi);
+
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into r2, r3.
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ b(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(r0,
+ r1,
+ Heap::kHeapNumberMapRootIndex,
+ &calculate,
+ DONT_DO_SMI_CHECK);
+ // Input is a HeapNumber. Load it to a double register and store the
+ // low and high words into r2, r3.
+ __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ vmov(r2, r3, d0);
+ } else {
+ // Input is untagged double in d2. Output goes to d2.
+ __ vmov(r2, r3, d2);
+ }
__ bind(&loaded);
// r2 = low 32 bits of double value
// r3 = high 32 bits of double value
@@ -3792,24 +3043,28 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ eor(r1, r2, Operand(r3));
__ eor(r1, r1, Operand(r1, ASR, 16));
__ eor(r1, r1, Operand(r1, ASR, 8));
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
// r2 = low 32 bits of double value.
// r3 = high 32 bits of double value.
// r1 = TranscendentalCache::hash(double value).
- __ mov(r0,
- Operand(ExternalReference::transcendental_cache_array_address()));
- // r0 points to cache array.
- __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
+ Isolate* isolate = masm->isolate();
+ ExternalReference cache_array =
+ ExternalReference::transcendental_cache_array_address(isolate);
+ __ mov(cache_entry, Operand(cache_array));
+ // cache_entry points to cache array.
+ int cache_array_index
+ = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
+ __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
// r0 points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- __ b(eq, &runtime_call);
+ __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
+ __ b(eq, &invalid_cache);
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
- { TranscendentalCache::Element test_elem[2];
+ { TranscendentalCache::SubCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
@@ -3824,21 +3079,120 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
__ add(r1, r1, Operand(r1, LSL, 1));
- __ add(r0, r0, Operand(r1, LSL, 2));
+ __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
// Check if cache matches: Double value is stored in uint32_t[2] array.
- __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit());
+ __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
__ cmp(r2, r4);
- __ b(ne, &runtime_call);
+ __ b(ne, &calculate);
__ cmp(r3, r5);
- __ b(ne, &runtime_call);
- // Cache hit. Load result, pop argument and return.
- __ mov(r0, Operand(r6));
- __ pop();
+ __ b(ne, &calculate);
+ // Cache hit. Load result, cleanup and return.
+ if (tagged) {
+ // Pop input value from stack and load result into r0.
+ __ pop();
+ __ mov(r0, Operand(r6));
+ } else {
+ // Load result into d2.
+ __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
+ }
+ __ Ret();
+ } // if (CpuFeatures::IsSupported(VFP3))
+
+ __ bind(&calculate);
+ if (tagged) {
+ __ bind(&invalid_cache);
+ ExternalReference runtime_function =
+ ExternalReference(RuntimeFunction(), masm->isolate());
+ __ TailCallExternalReference(runtime_function, 1, 1);
+ } else {
+ if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
+ CpuFeatures::Scope scope(VFP3);
+
+ Label no_update;
+ Label skip_cache;
+ const Register heap_number_map = r5;
+
+ // Call C function to calculate the result and update the cache.
+ // Register r0 holds precalculated cache entry address; preserve
+ // it on the stack and pop it into register cache_entry after the
+ // call.
+ __ push(cache_entry);
+ GenerateCallCFunction(masm, scratch0);
+ __ GetCFunctionDoubleResult(d2);
+
+ // Try to update the cache. If we cannot allocate a
+ // heap number, we return the result without updating.
+ __ pop(cache_entry);
+ __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
+ __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
+ __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
+ __ Ret();
+
+ __ bind(&invalid_cache);
+ // The cache is invalid. Call runtime which will recreate the
+ // cache.
+ __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
+ __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ EnterInternalFrame();
+ __ push(r0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ __ LeaveInternalFrame();
+ __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ Ret();
+
+ __ bind(&skip_cache);
+ // Call C function to calculate the result and answer directly
+ // without updating the cache.
+ GenerateCallCFunction(masm, scratch0);
+ __ GetCFunctionDoubleResult(d2);
+ __ bind(&no_update);
+
+ // We return the value in d2 without adding it to the cache, but
+ // we cause a scavenging GC so that future allocations will succeed.
+ __ EnterInternalFrame();
+
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ mov(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ __ LeaveInternalFrame();
__ Ret();
}
+}
+
+
+void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
+ Register scratch) {
+ Isolate* isolate = masm->isolate();
- __ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+ __ push(lr);
+ __ PrepareCallCFunction(0, 1, scratch);
+ if (masm->use_eabi_hardfloat()) {
+ __ vmov(d0, d2);
+ } else {
+ __ vmov(r0, r1, d2);
+ }
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
+ 0, 1);
+ break;
+ case TranscendentalCache::COS:
+ __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
+ 0, 1);
+ break;
+ case TranscendentalCache::LOG:
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate),
+ 0, 1);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ __ pop(lr);
}
@@ -3860,138 +3214,110 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
}
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- if (op_ == Token::SUB) {
- if (include_smi_code_) {
- // Check whether the value is a smi.
- Label try_float;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &try_float);
-
- // Go slow case if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- if (negative_zero_ == kStrictNegativeZero) {
- // If we have to check for zero, then we can check for the max negative
- // smi while we are at it.
- __ bic(ip, r0, Operand(0x80000000), SetCC);
- __ b(eq, &slow);
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
- __ Ret();
- } else {
- // The value of the expression is a smi and 0 is OK for -0. Try
- // optimistic subtraction '0 - value'.
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
- __ Ret(vc);
- // We don't have to reverse the optimistic neg since the only case
- // where we fall through is the minimum negative Smi, which is the case
- // where the neg leaves the register unchanged.
- __ jmp(&slow); // Go slow on max negative Smi.
- }
- __ bind(&try_float);
- } else if (FLAG_debug_code) {
- __ tst(r0, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected smi operand.");
- }
-
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, heap_number_map);
- __ b(ne, &slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (overwrite_ == UNARY_OVERWRITE) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
- }
- } else if (op_ == Token::BIT_NOT) {
- if (include_smi_code_) {
- Label non_smi;
- __ JumpIfNotSmi(r0, &non_smi);
- __ mvn(r0, Operand(r0));
- // Bit-clear inverted smi-tag.
- __ bic(r0, r0, Operand(kSmiTagMask));
- __ Ret();
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- __ tst(r0, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected smi operand.");
- }
-
- // Check if the operand is a heap number.
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, heap_number_map);
- __ b(ne, &slow);
-
- // Convert the heap number is r0 to an untagged integer in r1.
- __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
-
- // Do the bitwise operation (move negated) and check if the result
- // fits in a smi.
- Label try_float;
- __ mvn(r1, Operand(r1));
- __ add(r2, r1, Operand(0x40000000), SetCC);
- __ b(mi, &try_float);
- __ mov(r0, Operand(r1, LSL, kSmiTagSize));
- __ b(&done);
+void MathPowStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
- __ bind(&try_float);
- if (!overwrite_ == UNARY_OVERWRITE) {
- // Allocate a fresh heap number, but don't overwrite r0 until
- // we're sure we can do it without going through the slow case
- // that needs the value in r0.
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ mov(r0, Operand(r2));
- }
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r1);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r2, HeapNumber::kValueOffset);
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ push(lr);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- } else {
- UNIMPLEMENTED();
+ Label base_not_smi;
+ Label exponent_not_smi;
+ Label convert_exponent;
+
+ const Register base = r0;
+ const Register exponent = r1;
+ const Register heapnumbermap = r5;
+ const Register heapnumber = r6;
+ const DoubleRegister double_base = d0;
+ const DoubleRegister double_exponent = d1;
+ const DoubleRegister double_result = d2;
+ const SwVfpRegister single_scratch = s0;
+ const Register scratch = r9;
+ const Register scratch2 = r7;
+
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+ __ ldr(base, MemOperand(sp, 1 * kPointerSize));
+ __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
+
+ // Convert base to double value and store it in d0.
+ __ JumpIfNotSmi(base, &base_not_smi);
+ // Base is a Smi. Untag and convert it.
+ __ SmiUntag(base);
+ __ vmov(single_scratch, base);
+ __ vcvt_f64_s32(double_base, single_scratch);
+ __ b(&convert_exponent);
+
+ __ bind(&base_not_smi);
+ __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+ __ cmp(scratch, heapnumbermap);
+ __ b(ne, &call_runtime);
+ // Base is a heapnumber. Load it into double register.
+ __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+
+ __ bind(&convert_exponent);
+ __ JumpIfNotSmi(exponent, &exponent_not_smi);
+ __ SmiUntag(exponent);
+
+ // The base is in a double register and the exponent is
+ // an untagged smi. Allocate a heap number and call a
+ // C function for integer exponents. The register containing
+ // the heap number is callee-saved.
+ __ AllocateHeapNumber(heapnumber,
+ scratch,
+ scratch2,
+ heapnumbermap,
+ &call_runtime);
+ __ push(lr);
+ __ PrepareCallCFunction(1, 1, scratch);
+ __ SetCallCDoubleArguments(double_base, exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()),
+ 1, 1);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ __ vstr(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ __ mov(r0, heapnumber);
+ __ Ret(2 * kPointerSize);
+
+ __ bind(&exponent_not_smi);
+ __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+ __ cmp(scratch, heapnumbermap);
+ __ b(ne, &call_runtime);
+ // Exponent is a heapnumber. Load it into double register.
+ __ vldr(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+
+ // The base and the exponent are in double registers.
+ // Allocate a heap number and call a C function for
+ // double exponents. The register containing
+ // the heap number is callee-saved.
+ __ AllocateHeapNumber(heapnumber,
+ scratch,
+ scratch2,
+ heapnumbermap,
+ &call_runtime);
+ __ push(lr);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ __ vstr(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ __ mov(r0, heapnumber);
+ __ Ret(2 * kPointerSize);
}
- __ bind(&done);
- __ Ret();
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+}
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ push(r0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
- break;
- default:
- UNREACHABLE();
- }
+
+bool CEntryStub::NeedsImmovableCode() {
+ return true;
}
@@ -4016,15 +3342,17 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// r4: number of arguments including receiver (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
// r6: pointer to the first argument (C callee-saved)
+ Isolate* isolate = masm->isolate();
if (do_gc) {
// Passing r0.
- __ PrepareCallCFunction(1, r1);
- __ CallCFunction(ExternalReference::perform_gc_function(), 1);
+ __ PrepareCallCFunction(1, 0, r1);
+ __ CallCFunction(ExternalReference::perform_gc_function(isolate),
+ 1, 0);
}
ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
+ ExternalReference::heap_always_allocate_scope_depth(isolate);
if (always_allocate) {
__ mov(r0, Operand(scope_depth));
__ ldr(r1, MemOperand(r0));
@@ -4053,14 +3381,12 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
#endif
- // TODO(1242173): To let the GC traverse the return address of the exit
- // frames, we need to know where the return address is. Right now,
- // we store it on the stack to be able to find it again, but we never
- // restore from it in case of changes, which makes it impossible to
- // support moving the C entry code stub. This should be fixed, but currently
- // this is OK because the CEntryStub gets generated so early in the V8 boot
- // sequence that it is not moving ever.
+ __ mov(r2, Operand(ExternalReference::isolate_address()));
+ // To let the GC traverse the return address of the exit frames, we need to
+ // know where the return address is. The CEntryStub is unmovable, so
+ // we can store the address on the stack to be able to find it again and
+ // we never have to restore it, because it will not change.
// Compute the return address in lr to return to after the jump below. Pc is
// already at '+ 8' from the current instruction but return is after three
// instructions so add another 4 to pc to get the return address.
@@ -4106,15 +3432,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ b(eq, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
__ ldr(r3, MemOperand(ip));
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ isolate)));
__ ldr(r0, MemOperand(ip));
__ str(r3, MemOperand(ip));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ cmp(r0, Operand(Factory::termination_exception()));
+ __ cmp(r0, Operand(isolate->factory()->termination_exception()));
__ b(eq, throw_termination_exception);
// Handle normal exception.
@@ -4209,12 +3536,26 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Save callee-saved registers (incl. cp and fp), sp, and lr
__ stm(db_w, sp, kCalleeSaved | lr.bit());
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Save callee-saved vfp registers.
+ __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
+ // Set up the reserved register for 0.0.
+ __ vmov(kDoubleRegZero, 0.0);
+ }
+
// Get address of argv, see stm above.
// r0: code entry
// r1: function
// r2: receiver
// r3: argc
- __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
+
+ // Setup argv in r4.
+ int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
+ if (CpuFeatures::IsSupported(VFP3)) {
+ offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
+ }
+ __ ldr(r4, MemOperand(sp, offset_to_argv));
// Push a frame with special values setup to mark it as an entry frame.
// r0: code entry
@@ -4222,11 +3563,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r2: receiver
// r3: argc
// r4: argv
+ Isolate* isolate = masm->isolate();
__ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
__ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
- __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ mov(r5,
+ Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate)));
__ ldr(r5, MemOperand(r5));
__ Push(r8, r7, r6, r5);
@@ -4235,11 +3578,20 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ Label non_outermost_js;
+ ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
__ ldr(r6, MemOperand(r5));
- __ cmp(r6, Operand(0, RelocInfo::NONE));
- __ str(fp, MemOperand(r5), eq);
+ __ cmp(r6, Operand(0));
+ __ b(ne, &non_outermost_js);
+ __ str(fp, MemOperand(r5));
+ __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ Label cont;
+ __ b(&cont);
+ __ bind(&non_outermost_js);
+ __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+ __ bind(&cont);
+ __ push(ip);
#endif
// Call a faked try-block that does the invoke.
@@ -4249,7 +3601,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// exception field in the JSEnv and return a failure sentinel.
// Coming in here the fp will be invalid because the PushTryHandler below
// sets it to 0 to signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ isolate)));
__ str(r0, MemOperand(ip));
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit);
@@ -4264,9 +3617,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
__ ldr(r5, MemOperand(ip));
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ isolate)));
__ str(r5, MemOperand(ip));
// Invoke the function by calling through JS entry trampoline builtin.
@@ -4280,10 +3634,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r3: argc
// r4: argv
if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+ isolate);
__ mov(ip, Operand(construct_entry));
} else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
__ mov(ip, Operand(entry));
}
__ ldr(ip, MemOperand(ip)); // deref address
@@ -4294,30 +3649,26 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ mov(lr, Operand(pc));
masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Unlink this frame from the handler chain. When reading the
- // address of the next handler, there is no need to use the address
- // displacement since the current stack pointer (sp) points directly
- // to the stack handler.
- __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
- __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
- __ str(r3, MemOperand(ip));
- // No need to restore registers
- __ add(sp, sp, Operand(StackHandlerConstants::kSize));
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
+ __ bind(&exit); // r0 holds result
#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current FP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(r5);
+ __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ b(ne, &non_outermost_js_2);
+ __ mov(r6, Operand(0));
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
- __ ldr(r6, MemOperand(r5));
- __ cmp(fp, Operand(r6));
- __ mov(r6, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ str(r6, MemOperand(r5), eq);
+ __ str(r6, MemOperand(r5));
+ __ bind(&non_outermost_js_2);
#endif
- __ bind(&exit); // r0 holds result
// Restore the top frame descriptors from the stack.
__ pop(r3);
- __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ mov(ip,
+ Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate)));
__ str(r3, MemOperand(ip));
// Reset the stack to the callee saved registers.
@@ -4329,6 +3680,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ mov(lr, Operand(pc));
}
#endif
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Restore callee-saved vfp registers.
+ __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
+ }
+
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
@@ -4474,7 +3832,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
// Null is not instance of anything.
- __ cmp(scratch, Operand(Factory::null_value()));
+ __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
__ b(ne, &object_not_null);
__ mov(r0, Operand(Smi::FromInt(1)));
__ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -4497,11 +3855,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (HasArgsInRegisters()) {
__ Push(r0, r1);
}
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
__ EnterInternalFrame();
__ Push(r0, r1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
__ LeaveInternalFrame();
__ cmp(r0, Operand(0));
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
@@ -4568,12 +3926,233 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
// Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
+ __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &runtime);
+
+ // Patch the arguments.length and the parameters pointer in the current frame.
+ __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ str(r2, MemOperand(sp, 0 * kPointerSize));
+ __ add(r3, r3, Operand(r2, LSL, 1));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // Stack layout:
+ // sp[0] : number of parameters (tagged)
+ // sp[4] : address of receiver argument
+ // sp[8] : function
+ // Registers used over whole function:
+ // r6 : allocated object (tagged)
+ // r9 : mapped parameter count (tagged)
+
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+ // r1 = parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
+ __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(eq, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+ __ mov(r2, r1);
+ __ b(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ add(r3, r3, Operand(r2, LSL, 1));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+ // r1 = parameter count (tagged)
+ // r2 = argument count (tagged)
+ // Compute the mapped parameter count = min(r1, r2) in r1.
+ __ cmp(r1, Operand(r2));
+ __ mov(r1, Operand(r2), LeaveCC, gt);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ __ cmp(r1, Operand(Smi::FromInt(0)));
+ __ mov(r9, Operand(0), LeaveCC, eq);
+ __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
+ __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
+
+ // 2. Backing store.
+ __ add(r9, r9, Operand(r2, LSL, 1));
+ __ add(r9, r9, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
+
+ // r0 = address of new object(s) (tagged)
+ // r2 = argument count (tagged)
+ // Get the arguments boilerplate from the current (global) context into r4.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+
+ __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+ __ cmp(r1, Operand(0));
+ __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
+ __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
+
+ // r0 = address of new object (tagged)
+ // r1 = mapped parameter count (tagged)
+ // r2 = argument count (tagged)
+ // r4 = address of boilerplate object (tagged)
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ ldr(r3, FieldMemOperand(r4, i));
+ __ str(r3, FieldMemOperand(r0, i));
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ str(r3, FieldMemOperand(r0, kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ str(r2, FieldMemOperand(r0, kLengthOffset));
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, r4 will point there, otherwise
+ // it will point to the backing store.
+ __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+ __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+
+ // r0 = address of new object (tagged)
+ // r1 = mapped parameter count (tagged)
+ // r2 = argument count (tagged)
+ // r4 = address of parameter map or backing store (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ cmp(r1, Operand(Smi::FromInt(0)));
+ // Move backing store address to r3, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(r3, r4, LeaveCC, eq);
+ __ b(eq, &skip_parameter_map);
+
+ __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
+ __ add(r6, r1, Operand(Smi::FromInt(2)));
+ __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ add(r6, r4, Operand(r1, LSL, 1));
+ __ add(r6, r6, Operand(kParameterMapHeaderSize));
+ __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(r6, r1);
+ __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
+ __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ sub(r9, r9, Operand(r1));
+ __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+ __ add(r3, r4, Operand(r6, LSL, 1));
+ __ add(r3, r3, Operand(kParameterMapHeaderSize));
+
+ // r6 = loop variable (tagged)
+ // r1 = mapping index (tagged)
+ // r3 = address of backing store (tagged)
+ // r4 = address of parameter map (tagged)
+ // r5 = temporary scratch (a.o., for address calculation)
+ // r7 = the hole value
+ __ jmp(&parameters_test);
+
+ __ bind(&parameters_loop);
+ __ sub(r6, r6, Operand(Smi::FromInt(1)));
+ __ mov(r5, Operand(r6, LSL, 1));
+ __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ str(r9, MemOperand(r4, r5));
+ __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ str(r7, MemOperand(r3, r5));
+ __ add(r9, r9, Operand(Smi::FromInt(1)));
+ __ bind(&parameters_test);
+ __ cmp(r6, Operand(Smi::FromInt(0)));
+ __ b(ne, &parameters_loop);
+
+ __ bind(&skip_parameter_map);
+ // r2 = argument count (tagged)
+ // r3 = address of backing store (tagged)
+ // r5 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
+ __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
+ __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ mov(r9, r1);
+ __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
+ __ sub(r4, r4, Operand(r9, LSL, 1));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ sub(r4, r4, Operand(kPointerSize));
+ __ ldr(r6, MemOperand(r4, 0));
+ __ add(r5, r3, Operand(r9, LSL, 1));
+ __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ add(r9, r9, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ cmp(r9, Operand(r2));
+ __ b(lt, &arguments_loop);
+
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // r2 = argument count (taggged)
+ __ bind(&runtime);
+ __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+ // Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
@@ -4601,35 +4180,31 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ mov(r1, Operand(r1, LSR, kSmiTagSize));
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
+ __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
// Do the allocation of both objects in one go.
- __ AllocateInNewSpace(
- r1,
- r0,
- r2,
- r3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+ __ AllocateInNewSpace(r1,
+ r0,
+ r2,
+ r3,
+ &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT |
+ SIZE_IN_WORDS));
// Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
- __ ldr(r4, MemOperand(r4, offset));
+ __ ldr(r4, MemOperand(r4, Context::SlotOffset(
+ Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::arguments_callee_index == 0);
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
-
// Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::arguments_length_index == 1);
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
- __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
+ __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
// If there are no actual arguments, we're done.
Label done;
@@ -4641,12 +4216,13 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+ __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop.
+ // Untag the length for the loop.
+ __ mov(r1, Operand(r1, LSR, kSmiTagSize));
// Copy the fixed array slots.
Label loop;
@@ -4669,7 +4245,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
@@ -4708,10 +4284,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Register last_match_info_elements = r6;
// Ensure that a RegExp stack is allocated.
+ Isolate* isolate = masm->isolate();
ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference::address_of_regexp_stack_memory_address(isolate);
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
+ ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
__ ldr(r0, MemOperand(r0, 0));
__ tst(r0, Operand(r0));
@@ -4720,8 +4297,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the first argument is a JSRegExp object.
__ ldr(r0, MemOperand(sp, kJSRegExpOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &runtime);
+ __ JumpIfSmi(r0, &runtime);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
__ b(ne, &runtime);
@@ -4757,8 +4333,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// regexp_data: RegExp data (FixedArray)
// Check that the second argument is a string.
__ ldr(subject, MemOperand(sp, kSubjectOffset));
- __ tst(subject, Operand(kSmiTagMask));
- __ b(eq, &runtime);
+ __ JumpIfSmi(subject, &runtime);
Condition is_string = masm->IsObjectStringType(subject, r0);
__ b(NegateCondition(is_string), &runtime);
// Get the length of the string to r3.
@@ -4771,8 +4346,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
__ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &runtime);
+ __ JumpIfNotSmi(r0, &runtime);
__ cmp(r3, Operand(r0));
__ b(ls, &runtime);
@@ -4781,8 +4355,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// regexp_data: RegExp data (FixedArray)
// Check that the fourth object is a JSArray object.
__ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &runtime);
+ __ JumpIfSmi(r0, &runtime);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
__ b(ne, &runtime);
// Check that the JSArray is in fast case.
@@ -4852,7 +4425,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ CompareObjectType(r7, r0, r0, CODE_TYPE);
__ b(ne, &runtime);
- // r3: encoding of subject string (1 if ascii, 0 if two_byte);
+ // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
// r7: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
@@ -4862,20 +4435,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(r1, Operand(r1, ASR, kSmiTagSize));
// r1: previous index
- // r3: encoding of subject string (1 if ascii, 0 if two_byte);
+ // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
// r7: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
+ __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
- static const int kRegExpExecuteArguments = 7;
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ static const int kRegExpExecuteArguments = 8;
static const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers.
+ // Argument 8 (sp[16]): Pass current isolate address.
+ __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ str(r0, MemOperand(sp, 4 * kPointerSize));
+
// Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
@@ -4889,7 +4467,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer.
- __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
+ __ mov(r0,
+ Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
__ str(r0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data and
@@ -4937,9 +4516,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r1, Operand(ExternalReference::the_hole_value_location()));
+ __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
__ ldr(r1, MemOperand(r1, 0));
- __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ isolate)));
__ ldr(r0, MemOperand(r2, 0));
__ cmp(r0, r1);
__ b(eq, &runtime);
@@ -4959,7 +4539,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&failure);
// For failure and exception return null.
- __ mov(r0, Operand(Factory::null_value()));
+ __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
@@ -4992,7 +4572,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector();
+ ExternalReference::address_of_static_offsets_vector(isolate);
__ mov(r2, Operand(address_of_static_offsets_vector));
// r1: number of capture registers
@@ -5030,11 +4610,12 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
const int kMaxInlineLength = 100;
Label slowcase;
Label done;
+ Factory* factory = masm->isolate()->factory();
+
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &slowcase);
+ __ JumpIfNotSmi(r1, &slowcase);
__ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
__ b(hi, &slowcase);
// Smi-tagging is equivalent to multiplying by 2.
@@ -5064,7 +4645,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Interleave operations for better latency.
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ add(r3, r0, Operand(JSRegExpResult::kSize));
- __ mov(r4, Operand(Factory::empty_fixed_array()));
+ __ mov(r4, Operand(factory->empty_fixed_array()));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
@@ -5085,13 +4666,13 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// r5: Number of elements in array, untagged.
// Set map.
- __ mov(r2, Operand(Factory::fixed_array_map()));
+ __ mov(r2, Operand(factory->fixed_array_map()));
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
// Set FixedArray length.
__ mov(r6, Operand(r5, LSL, kSmiTagSize));
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
// Fill contents of fixed-array with the-hole.
- __ mov(r2, Operand(Factory::the_hole_value()));
+ __ mov(r2, Operand(factory->the_hole_value()));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Fill fixed array elements with hole.
// r0: JSArray, tagged.
@@ -5118,30 +4699,22 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
+ // The receiver might implicitly be the global object. This is
+ // indicated by passing the hole as the receiver to the call
+ // function stub.
+ if (ReceiverMightBeImplicit()) {
+ Label call;
// Get the receiver from the stack.
// function, receiver [, arguments]
- Label receiver_is_value, receiver_is_js_object;
- __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ JumpIfSmi(r1, &receiver_is_value);
-
- // Check if the receiver is a valid JS object.
- __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
- __ b(ge, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(r1);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
- __ LeaveInternalFrame();
- __ str(r0, MemOperand(sp, argc_ * kPointerSize));
-
- __ bind(&receiver_is_js_object);
+ __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
+ // Call as function is indicated with the hole.
+ __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &call);
+ // Patch the receiver on the stack with the global receiver object.
+ __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ str(r1, MemOperand(sp, argc_ * kPointerSize));
+ __ bind(&call);
}
// Get the function to call from the stack.
@@ -5158,7 +4731,23 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Fast-case: Invoke the function now.
// r1: pushed function
ParameterCount actual(argc_);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+
+ if (ReceiverMightBeImplicit()) {
+ Label call_as_function;
+ __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ b(eq, &call_as_function);
+ __ InvokeFunction(r1,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_METHOD);
+ __ bind(&call_as_function);
+ }
+ __ InvokeFunction(r1,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_FUNCTION);
// Slow-case: Non-function called.
__ bind(&slow);
@@ -5168,7 +4757,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ mov(r0, Operand(argc_)); // Setup the number of arguments.
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
@@ -5181,7 +4770,8 @@ const char* CompareStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* cc_name;
@@ -5340,7 +4930,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
scratch_,
Heap::kHeapNumberMapRootIndex,
index_not_number_,
- true);
+ DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
__ Push(object_, index_);
__ push(index_); // Consumed by runtime conversion function.
@@ -5394,7 +4984,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ascii char code.
+ // At this point code register contains smi tagged ASCII char code.
STATIC_ASSERT(kSmiTag == 0);
__ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
@@ -5726,7 +5316,6 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register symbol_table = c2;
__ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
- // Load undefined value
Register undefined = scratch4;
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
@@ -5747,6 +5336,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// mask: capacity mask
// first_symbol_table_element: address of the first element of
// the symbol table
+ // undefined: the undefined object
// scratch: -
// Perform a number of probes in the symbol table.
@@ -5774,20 +5364,32 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
kPointerSizeLog2));
// If entry is undefined no string with this hash can be found.
- __ cmp(candidate, undefined);
+ Label is_string;
+ __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
+ __ b(ne, &is_string);
+
+ __ cmp(undefined, candidate);
__ b(eq, not_found);
+ // Must be null (deleted entry).
+ if (FLAG_debug_code) {
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(ip, candidate);
+ __ Assert(eq, "oddball in symbol table is not undefined or null");
+ }
+ __ jmp(&next_probe[i]);
+
+ __ bind(&is_string);
+
+ // Check that the candidate is a non-external ASCII string. The instance
+ // type is still in the scratch register from the CompareObjectType
+ // operation.
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
// If length is not 2 the string is not a candidate.
__ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
__ cmp(scratch, Operand(Smi::FromInt(2)));
__ b(ne, &next_probe[i]);
- // Check that the candidate is a non-external ascii string.
- __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
- &next_probe[i]);
-
// Check if the two characters match.
// Assumes that word load is little endian.
__ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
@@ -5862,7 +5464,6 @@ void SubStringStub::Generate(MacroAssembler* masm) {
static const int kFromOffset = 1 * kPointerSize;
static const int kStringOffset = 2 * kPointerSize;
-
// Check bounds and smi-ness.
Register to = r6;
Register from = r7;
@@ -5895,8 +5496,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Make sure first argument is a sequential (or flat) string.
__ ldr(r5, MemOperand(sp, kStringOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(r5, Operand(kSmiTagMask));
- __ b(eq, &runtime);
+ __ JumpIfSmi(r5, &runtime);
Condition is_string = masm->IsObjectStringType(r5, r1);
__ b(NegateCondition(is_string), &runtime);
@@ -5943,7 +5543,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r3: from index (untaged smi)
// r5: string.
// r7 (a.k.a. from): from offset (smi)
- // Check for flat ascii string.
+ // Check for flat ASCII string.
Label non_ascii_flat;
__ tst(r1, Operand(kStringEncodingMask));
STATIC_ASSERT(kTwoByteStringTag == 0);
@@ -5963,7 +5563,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
Label make_two_character_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
@@ -5972,7 +5573,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&make_two_character_string);
__ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
__ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
@@ -5998,7 +5599,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
COPY_ASCII | DEST_ALWAYS_ALIGNED);
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
@@ -6030,7 +5631,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
@@ -6040,6 +5641,45 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ Register length = scratch1;
+
+ // Compare lengths.
+ Label strings_not_equal, check_zero_length;
+ __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
+ __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ cmp(length, scratch2);
+ __ b(eq, &check_zero_length);
+ __ bind(&strings_not_equal);
+ __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
+ __ Ret();
+
+ // Check if the length is zero.
+ Label compare_chars;
+ __ bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(length, Operand(length));
+ __ b(ne, &compare_chars);
+ __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+
+ // Compare characters.
+ __ bind(&compare_chars);
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, length, scratch2, scratch3,
+ &strings_not_equal);
+
+ // Characters are equal.
+ __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+}
+
+
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
@@ -6047,7 +5687,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register scratch2,
Register scratch3,
Register scratch4) {
- Label compare_lengths;
+ Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
__ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
__ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
@@ -6059,49 +5699,61 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ tst(min_length, Operand(min_length));
__ b(eq, &compare_lengths);
- // Untag smi.
- __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
-
- // Setup registers so that we only need to increment one register
- // in the loop.
- __ add(scratch2, min_length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ add(left, left, Operand(scratch2));
- __ add(right, right, Operand(scratch2));
- // Registers left and right points to the min_length character of strings.
- __ rsb(min_length, min_length, Operand(-1));
- Register index = min_length;
- // Index starts at -min_length.
+ // Compare loop.
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, min_length, scratch2, scratch4,
+ &result_not_equal);
- {
- // Compare loop.
- Label loop;
- __ bind(&loop);
- // Compare characters.
- __ add(index, index, Operand(1), SetCC);
- __ ldrb(scratch2, MemOperand(left, index), ne);
- __ ldrb(scratch4, MemOperand(right, index), ne);
- // Skip to compare lengths with eq condition true.
- __ b(eq, &compare_lengths);
- __ cmp(scratch2, scratch4);
- __ b(eq, &loop);
- // Fallthrough with eq condition false.
- }
- // Compare lengths - strings up to min-length are equal.
+ // Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use zero length_delta as result.
- __ mov(r0, Operand(length_delta), SetCC, eq);
- // Fall through to here if characters compare not-equal.
+ // Use length_delta as result if it's zero.
+ __ mov(r0, Operand(length_delta), SetCC);
+ __ bind(&result_not_equal);
+ // Conditionally update the result based either on length_delta or
+ // the last comparion performed in the loop above.
__ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
__ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
__ Ret();
}
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal) {
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ add(scratch1, length,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(left, left, Operand(scratch1));
+ __ add(right, right, Operand(scratch1));
+ __ rsb(length, length, Operand(0));
+ Register index = length; // index = -length;
+
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ __ ldrb(scratch1, MemOperand(left, index));
+ __ ldrb(scratch2, MemOperand(right, index));
+ __ cmp(scratch1, scratch2);
+ __ b(ne, chars_not_equal);
+ __ add(index, index, Operand(1), SetCC);
+ __ b(ne, &loop);
+}
+
+
void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
+ Counters* counters = masm->isolate()->counters();
+
// Stack frame on entry.
// sp[0]: right string
// sp[4]: left string
@@ -6113,17 +5765,17 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
+ __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ bind(&not_same);
- // Check that both objects are sequential ascii strings.
+ // Check that both objects are sequential ASCII strings.
__ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
- // Compare flat ascii strings natively. Remove arguments from stack first.
- __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ // Compare flat ASCII strings natively. Remove arguments from stack first.
+ __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
@@ -6138,6 +5790,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label string_add_runtime, call_builtin;
Builtins::JavaScript builtin_id = Builtins::ADD;
+ Counters* counters = masm->isolate()->counters();
+
// Stack on entry:
// sp[0]: second argument (right).
// sp[4]: first argument (left).
@@ -6193,7 +5847,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ cmp(r3, Operand(Smi::FromInt(0)), ne);
__ b(ne, &strings_not_empty); // If either string was empty, return r0.
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6214,12 +5868,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Adding two lengths can't overflow.
STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
__ add(r6, r2, Operand(r3));
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
+ // Use the symbol table when adding two one character strings, as it
+ // helps later optimizations to return a symbol here.
__ cmp(r6, Operand(2));
__ b(ne, &longer_than_two);
- // Check that both strings are non-external ascii strings.
+ // Check that both strings are non-external ASCII strings.
if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -6238,7 +5892,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label make_two_character_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6251,7 +5905,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(r6, Operand(2));
__ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
__ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6267,7 +5921,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ b(hs, &string_add_runtime);
// If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ascii the result is an ascii cons string.
+ // If both strings are ASCII the result is an ASCII cons string.
if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -6288,13 +5942,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
__ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
__ mov(r0, Operand(r7));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
+ // to contain only ASCII characters.
// r4: first instance type.
// r5: second instance type.
__ tst(r4, Operand(kAsciiDataHintMask));
@@ -6370,7 +6024,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r7: result string.
StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
__ mov(r0, Operand(r7));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6411,7 +6065,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
__ mov(r0, Operand(r7));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6421,7 +6075,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_JS);
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
@@ -6475,62 +6129,11 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
}
-void StringCharAtStub::Generate(MacroAssembler* masm) {
- // Expects two arguments (object, index) on the stack:
- // lr: return address
- // sp[0]: index
- // sp[4]: object
- Register object = r1;
- Register index = r0;
- Register scratch1 = r2;
- Register scratch2 = r3;
- Register result = r0;
-
- // Get object and index from the stack.
- __ pop(index);
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- __ b(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kEmptyStringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ mov(result, Operand(Smi::FromInt(0)));
- __ b(&done);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm, call_helper);
-
- __ bind(&done);
- __ Ret();
-}
-
-
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
Label miss;
__ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &miss);
+ __ JumpIfNotSmi(r2, &miss);
if (GetCondition() == eq) {
// For equality we do not care about the sign of the result.
@@ -6554,8 +6157,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
Label unordered;
Label miss;
__ and_(r2, r1, Operand(r0));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &generic_stub);
+ __ JumpIfSmi(r2, &generic_stub);
__ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &miss);
@@ -6597,12 +6199,114 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::SYMBOLS);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = r1;
+ Register right = r0;
+ Register tmp1 = r2;
+ Register tmp2 = r3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are symbols.
+ __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(tmp1, tmp1, Operand(tmp2));
+ __ tst(tmp1, Operand(kIsSymbolMask));
+ __ b(eq, &miss);
+
+ // Symbols are compared by identity.
+ __ cmp(left, right);
+ // Make sure r0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(r0));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRINGS);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = r1;
+ Register right = r0;
+ Register tmp1 = r2;
+ Register tmp2 = r3;
+ Register tmp3 = r4;
+ Register tmp4 = r5;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are strings. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ orr(tmp3, tmp1, tmp2);
+ __ tst(tmp3, Operand(kIsNotStringMask));
+ __ b(ne, &miss);
+
+ // Fast check for identical strings.
+ __ cmp(left, right);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
+ __ Ret(eq);
+
+ // Handle not identical strings.
+
+ // Check that both strings are symbols. If they are, we're done
+ // because we already know they are not identical.
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(tmp3, tmp1, Operand(tmp2));
+ __ tst(tmp3, Operand(kIsSymbolMask));
+ // Make sure r0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(r0));
+ __ Ret(ne);
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
+ &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2, tmp3);
+
+ // Handle more complex cases in runtime.
+ __ bind(&runtime);
+ __ Push(left, right);
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::OBJECTS);
Label miss;
__ and_(r2, r1, Operand(r0));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r2, &miss);
__ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
__ b(ne, &miss);
@@ -6623,7 +6327,8 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ push(lr);
// Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
__ EnterInternalFrame();
__ Push(r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
@@ -6666,154 +6371,235 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
}
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements_map,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // elements - set to be the receiver's elements on exit.
- //
- // elements_map - set to be the map of the receiver's elements
- // on exit.
- //
- // result - holds the result of the pixel array load on exit,
- // tagged as a smi if successful.
- //
- // Scratch registers:
- //
- // scratch1 - used a scratch register in map check, if map
- // check is successful, contains the length of the
- // pixel array, the pointer to external elements and
- // the untagged result.
- //
- // scratch2 - holds the untaged key.
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ String* name,
+ Register scratch0) {
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch0;
+ // Capacity is smi 2^n.
+ __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
+ __ sub(index, index, Operand(1));
+ __ and_(index, index, Operand(
+ Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ Register entity_name = scratch0;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ Register tmp = properties;
+ __ add(tmp, properties, Operand(index, LSL, 1));
+ __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ ASSERT(!tmp.is(entity_name));
+ __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+ __ cmp(entity_name, tmp);
+ __ b(eq, done);
+
+ if (i != kInlinedProbes - 1) {
+ // Stop if found the property.
+ __ cmp(entity_name, Operand(Handle<String>(name)));
+ __ b(eq, miss);
+
+ // Check if the entry name is not a symbol.
+ __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ ldrb(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ tst(entity_name, Operand(kIsSymbolMask));
+ __ b(eq, miss);
+
+ // Restore the properties.
+ __ ldr(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ }
+ }
- // Some callers already have verified that the key is a smi. key_not_smi is
- // set to NULL as a sentinel for that case. Otherwise, add an explicit check
- // to ensure the key is a smi must be added.
- if (key_not_smi != NULL) {
- __ JumpIfNotSmi(key, key_not_smi);
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(key);
+ const int spill_mask =
+ (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
+ r2.bit() | r1.bit() | r0.bit());
+
+ __ stm(db_w, sp, spill_mask);
+ __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ mov(r1, Operand(Handle<String>(name)));
+ StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ MaybeObject* result = masm->TryCallStub(&stub);
+ if (result->IsFailure()) return result;
+ __ tst(r0, Operand(r0));
+ __ ldm(ia_w, sp, spill_mask);
+
+ __ b(eq, done);
+ __ b(ne, miss);
+ return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
+ // Assert that name contains a string.
+ if (FLAG_debug_code) __ AbortIfNotString(name);
+
+ // Compute the capacity mask.
+ __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
+ __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
+ __ sub(scratch1, scratch1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(StringDictionary::GetProbeOffset(i) <
+ 1 << (32 - String::kHashFieldOffset));
+ __ add(scratch2, scratch2, Operand(
+ StringDictionary::GetProbeOffset(i) << String::kHashShift));
}
+ __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ // scratch2 = scratch2 * 3.
+ __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+ // Check if the key is identical to the name.
+ __ add(scratch2, elements, Operand(scratch2, LSL, 2));
+ __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
+ __ cmp(name, Operand(ip));
+ __ b(eq, done);
}
- __ SmiUntag(scratch2, key);
-
- // Verify that the receiver has pixel array elements.
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex,
- not_pixel_array, true);
-
- // Key must be in range of the pixel array.
- __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset));
- __ cmp(scratch2, scratch1);
- __ b(hs, out_of_range); // unsigned check handles negative keys.
-
- // Perform the indexed load and tag the result as a smi.
- __ ldr(scratch1,
- FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
- __ ldrb(scratch1, MemOperand(scratch1, scratch2));
- __ SmiTag(r0, scratch1);
- __ Ret();
+
+ const int spill_mask =
+ (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
+ r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
+ ~(scratch1.bit() | scratch2.bit());
+
+ __ stm(db_w, sp, spill_mask);
+ __ Move(r0, elements);
+ __ Move(r1, name);
+ StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ tst(r0, Operand(r0));
+ __ mov(scratch2, Operand(r2));
+ __ ldm(ia_w, sp, spill_mask);
+
+ __ b(ne, done);
+ __ b(eq, miss);
}
-void GenerateFastPixelArrayStore(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register elements,
- Register elements_map,
- Register scratch1,
- Register scratch2,
- bool load_elements_from_receiver,
- bool load_elements_map_from_elements,
- Label* key_not_smi,
- Label* value_not_smi,
- Label* not_pixel_array,
- Label* out_of_range) {
- // Register use:
- // receiver - holds the receiver and is unchanged unless the
- // store succeeds.
- // key - holds the key (must be a smi) and is unchanged.
- // value - holds the value (must be a smi) and is unchanged.
- // elements - holds the element object of the receiver on entry if
- // load_elements_from_receiver is false, otherwise used
- // internally to store the pixel arrays elements and
- // external array pointer.
- // elements_map - holds the map of the element object if
- // load_elements_map_from_elements is false, otherwise
- // loaded with the element map.
- //
- Register external_pointer = elements;
- Register untagged_key = scratch1;
- Register untagged_value = scratch2;
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // Registers:
+ // result: StringDictionary to probe
+ // r1: key
+ // : StringDictionary to probe.
+ // index_: will hold an index of entry if lookup is successful.
+ // might alias with result_.
+ // Returns:
+ // result_ is zero if lookup failed, non zero otherwise.
- if (load_elements_from_receiver) {
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- }
+ Register result = r0;
+ Register dictionary = r0;
+ Register key = r1;
+ Register index = r2;
+ Register mask = r3;
+ Register hash = r4;
+ Register undefined = r5;
+ Register entry_key = r6;
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
+ __ mov(mask, Operand(mask, ASR, kSmiTagSize));
+ __ sub(mask, mask, Operand(1));
+
+ __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
+
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
- // By passing NULL as not_pixel_array, callers signal that they have already
- // verified that the receiver has pixel array elements.
- if (not_pixel_array != NULL) {
- if (load_elements_map_from_elements) {
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ // Capacity is smi 2^n.
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(StringDictionary::GetProbeOffset(i) <
+ 1 << (32 - String::kHashFieldOffset));
+ __ add(index, hash, Operand(
+ StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ } else {
+ __ mov(index, Operand(hash));
}
- __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
- __ cmp(elements_map, ip);
- __ b(ne, not_pixel_array);
- } else {
- if (FLAG_debug_code) {
- // Map check should have already made sure that elements is a pixel array.
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
- __ cmp(elements_map, ip);
- __ Assert(eq, "Elements isn't a pixel array");
+ __ and_(index, mask, Operand(index, LSR, String::kHashShift));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ add(index, dictionary, Operand(index, LSL, 2));
+ __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+ // Having undefined at this place means the name is not contained.
+ __ cmp(entry_key, Operand(undefined));
+ __ b(eq, &not_in_dictionary);
+
+ // Stop if found the property.
+ __ cmp(entry_key, Operand(key));
+ __ b(eq, &in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a symbol.
+ __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ ldrb(entry_key,
+ FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ tst(entry_key, Operand(kIsSymbolMask));
+ __ b(eq, &maybe_in_dictionary);
}
}
- // Some callers already have verified that the key is a smi. key_not_smi is
- // set to NULL as a sentinel for that case. Otherwise, add an explicit check
- // to ensure the key is a smi must be added.
- if (key_not_smi != NULL) {
- __ JumpIfNotSmi(key, key_not_smi);
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(key);
- }
+ __ bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ mov(result, Operand(0));
+ __ Ret();
}
- __ SmiUntag(untagged_key, key);
-
- // Perform bounds check.
- __ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset));
- __ cmp(untagged_key, scratch2);
- __ b(hs, out_of_range); // unsigned check handles negative keys.
-
- __ JumpIfNotSmi(value, value_not_smi);
- __ SmiUntag(untagged_value, value);
+ __ bind(&in_dictionary);
+ __ mov(result, Operand(1));
+ __ Ret();
- // Clamp the value to [0..255].
- __ Usat(untagged_value, 8, Operand(untagged_value));
- // Get the pointer to the external array. This clobbers elements.
- __ ldr(external_pointer,
- FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
- __ strb(untagged_value, MemOperand(external_pointer, untagged_key));
+ __ bind(&not_in_dictionary);
+ __ mov(result, Operand(0));
__ Ret();
}
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index e0d05a3b8..8e3e9dc00 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -38,202 +38,128 @@ namespace internal {
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
public:
- explicit TranscendentalCacheStub(TranscendentalCache::Type type)
- : type_(type) {}
+ enum ArgumentType {
+ TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
+ UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
+ };
+
+ TranscendentalCacheStub(TranscendentalCache::Type type,
+ ArgumentType argument_type)
+ : type_(type), argument_type_(argument_type) { }
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
+ ArgumentType argument_type_;
+ void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
+
Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_; }
+ int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
};
-class ToBooleanStub: public CodeStub {
- public:
- explicit ToBooleanStub(Register tos) : tos_(tos) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register tos_;
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return tos_.code(); }
-};
-
-
-class GenericBinaryOpStub : public CodeStub {
+class UnaryOpStub: public CodeStub {
public:
- static const int kUnknownIntValue = -1;
-
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- Register lhs,
- Register rhs,
- int constant_rhs = kUnknownIntValue)
+ UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
: op_(op),
mode_(mode),
- lhs_(lhs),
- rhs_(rhs),
- constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
- runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
- name_(NULL) { }
+ operand_type_(UnaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ }
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
+ UnaryOpStub(
+ int key,
+ UnaryOpIC::TypeInfo operand_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
- lhs_(LhsRegister(RegisterBits::decode(key))),
- rhs_(RhsRegister(RegisterBits::decode(key))),
- constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
- runtime_operands_type_(type_info),
- name_(NULL) { }
+ operand_type_(operand_type),
+ name_(NULL) {
+ }
private:
Token::Value op_;
- OverwriteMode mode_;
- Register lhs_;
- Register rhs_;
- int constant_rhs_;
- bool specialized_on_rhs_;
- BinaryOpIC::TypeInfo runtime_operands_type_;
- char* name_;
-
- static const int kMaxKnownRhs = 0x40000000;
- static const int kKnownRhsKeyBits = 6;
+ UnaryOverwriteMode mode_;
- // Minor key encoding in 17 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 6> {};
- class TypeInfoBits: public BitField<int, 8, 3> {};
- class RegisterBits: public BitField<bool, 11, 1> {};
- class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
+ // Operand type information determined at runtime.
+ UnaryOpIC::TypeInfo operand_type_;
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | KnownIntBits::encode(MinorKeyForKnownInt())
- | TypeInfoBits::encode(runtime_operands_type_)
- | RegisterBits::encode(lhs_.is(r0));
- }
+ char* name_;
- void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs);
- void HandleBinaryOpSlowCases(MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin);
- void GenerateTypeTransition(MacroAssembler* masm);
+ const char* GetName();
- static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
- if (constant_rhs == kUnknownIntValue) return false;
- if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
- if (op == Token::MOD) {
- if (constant_rhs <= 1) return false;
- if (constant_rhs <= 10) return true;
- if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
- return false;
- }
- return false;
+#ifdef DEBUG
+ void Print() {
+ PrintF("UnaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ UnaryOpIC::GetName(operand_type_));
}
+#endif
- int MinorKeyForKnownInt() {
- if (!specialized_on_rhs_) return 0;
- if (constant_rhs_ <= 10) return constant_rhs_ + 1;
- ASSERT(IsPowerOf2(constant_rhs_));
- int key = 12;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- key++;
- d >>= 1;
- }
- ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
- return key;
- }
+ class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+ class OpBits: public BitField<Token::Value, 1, 7> {};
+ class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
- int KnownBitsForMinorKey(int key) {
- if (!key) return 0;
- if (key <= 11) return key - 1;
- int d = 1;
- while (key != 12) {
- key--;
- d <<= 1;
- }
- return d;
+ Major MajorKey() { return UnaryOp; }
+ int MinorKey() {
+ return ModeBits::encode(mode_)
+ | OpBits::encode(op_)
+ | OperandTypeInfoBits::encode(operand_type_);
}
- Register LhsRegister(bool lhs_is_r0) {
- return lhs_is_r0 ? r0 : r1;
- }
+ // Note: A lot of the helper functions below will vanish when we use virtual
+ // function instead of switch more often.
+ void Generate(MacroAssembler* masm);
- Register RhsRegister(bool lhs_is_r0) {
- return lhs_is_r0 ? r1 : r0;
- }
+ void GenerateTypeTransition(MacroAssembler* masm);
- bool HasSmiSmiFastPath() {
- return op_ != Token::DIV;
- }
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateSmiStubSub(MacroAssembler* masm);
+ void GenerateSmiStubBitNot(MacroAssembler* masm);
+ void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
+ void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
- bool ShouldGenerateSmiCode() {
- return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateHeapNumberStubSub(MacroAssembler* masm);
+ void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+ void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateGenericStubSub(MacroAssembler* masm);
+ void GenerateGenericStubBitNot(MacroAssembler* masm);
+ void GenerateGenericCodeFallback(MacroAssembler* masm);
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+ virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
+ return UnaryOpIC::ToState(operand_type_);
}
- const char* GetName();
-
virtual void FinishCode(Code* code) {
- code->set_binary_op_type(runtime_operands_type_);
+ code->set_unary_op_type(operand_type_);
}
-
-#ifdef DEBUG
- void Print() {
- if (!specialized_on_rhs_) {
- PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
- } else {
- PrintF("GenericBinaryOpStub (%s by %d)\n",
- Token::String(op_),
- constant_rhs_);
- }
- }
-#endif
};
-class TypeRecordingBinaryOpStub: public CodeStub {
+class BinaryOpStub: public CodeStub {
public:
- TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
- operands_type_(TRBinaryOpIC::UNINITIALIZED),
- result_type_(TRBinaryOpIC::UNINITIALIZED),
+ operands_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED),
name_(NULL) {
use_vfp3_ = CpuFeatures::IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- TypeRecordingBinaryOpStub(
+ BinaryOpStub(
int key,
- TRBinaryOpIC::TypeInfo operands_type,
- TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+ BinaryOpIC::TypeInfo operands_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_vfp3_(VFP3Bits::decode(key)),
@@ -252,8 +178,8 @@ class TypeRecordingBinaryOpStub: public CodeStub {
bool use_vfp3_;
// Operand type information determined at runtime.
- TRBinaryOpIC::TypeInfo operands_type_;
- TRBinaryOpIC::TypeInfo result_type_;
+ BinaryOpIC::TypeInfo operands_type_;
+ BinaryOpIC::TypeInfo result_type_;
char* name_;
@@ -261,12 +187,12 @@ class TypeRecordingBinaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
- PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+ PrintF("BinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
- TRBinaryOpIC::GetName(operands_type_));
+ BinaryOpIC::GetName(operands_type_));
}
#endif
@@ -274,10 +200,10 @@ class TypeRecordingBinaryOpStub: public CodeStub {
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class VFP3Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+ class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+ class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
- Major MajorKey() { return TypeRecordingBinaryOp; }
+ Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
@@ -294,6 +220,7 @@ class TypeRecordingBinaryOpStub: public CodeStub {
Label* not_numbers,
Label* gc_required);
void GenerateSmiCode(MacroAssembler* masm,
+ Label* use_runtime,
Label* gc_required,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
@@ -304,6 +231,7 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
@@ -318,15 +246,15 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
- virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
- return TRBinaryOpIC::ToState(operands_type_);
+ return BinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Code* code) {
- code->set_type_recording_binary_op_type(operands_type_);
- code->set_type_recording_binary_op_result_type(result_type_);
+ code->set_binary_op_type(operands_type_);
+ code->set_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
@@ -386,8 +314,7 @@ class StringCompareStub: public CodeStub {
public:
StringCompareStub() { }
- // Compare two flat ASCII strings and returns result in r0.
- // Does not use the stack.
+ // Compares two flat ASCII strings and returns result in r0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
@@ -396,107 +323,27 @@ class StringCompareStub: public CodeStub {
Register scratch3,
Register scratch4);
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-// This stub can do a fast mod operation without using fp.
-// It is tail called from the GenericBinaryOpStub and it always
-// returns an answer. It never causes GC so it doesn't need a real frame.
-//
-// The inputs are always positive Smis. This is never called
-// where the denominator is a power of 2. We handle that separately.
-//
-// If we consider the denominator as an odd number multiplied by a power of 2,
-// then:
-// * The exponent (power of 2) is in the shift_distance register.
-// * The odd number is in the odd_number register. It is always in the range
-// of 3 to 25.
-// * The bits from the numerator that are to be copied to the answer (there are
-// shift_distance of them) are in the mask_bits register.
-// * The other bits of the numerator have been shifted down and are in the lhs
-// register.
-class IntegerModStub : public CodeStub {
- public:
- IntegerModStub(Register result,
- Register shift_distance,
- Register odd_number,
- Register mask_bits,
- Register lhs,
- Register scratch)
- : result_(result),
- shift_distance_(shift_distance),
- odd_number_(odd_number),
- mask_bits_(mask_bits),
- lhs_(lhs),
- scratch_(scratch) {
- // We don't code these in the minor key, so they should always be the same.
- // We don't really want to fix that since this stub is rather large and we
- // don't want many copies of it.
- ASSERT(shift_distance_.is(r9));
- ASSERT(odd_number_.is(r4));
- ASSERT(mask_bits_.is(r3));
- ASSERT(scratch_.is(r5));
- }
+ // Compares two flat ASCII strings for equality and returns result
+ // in r0.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
private:
- Register result_;
- Register shift_distance_;
- Register odd_number_;
- Register mask_bits_;
- Register lhs_;
- Register scratch_;
-
- // Minor key encoding in 16 bits.
- class ResultRegisterBits: public BitField<int, 0, 4> {};
- class LhsRegisterBits: public BitField<int, 4, 4> {};
-
- Major MajorKey() { return IntegerMod; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return ResultRegisterBits::encode(result_.code())
- | LhsRegisterBits::encode(lhs_.code());
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "IntegerModStub"; }
-
- // Utility functions.
- void DigitSum(MacroAssembler* masm,
- Register lhs,
- int mask,
- int shift,
- Label* entry);
- void DigitSum(MacroAssembler* masm,
- Register lhs,
- Register scratch,
- int mask,
- int shift1,
- int shift2,
- Label* entry);
- void ModGetInRangeBySubtraction(MacroAssembler* masm,
- Register lhs,
- int shift,
- int rhs);
- void ModReduce(MacroAssembler* masm,
- Register lhs,
- int max,
- int denominator);
- void ModAnswer(MacroAssembler* masm,
- Register result,
- Register shift_distance,
- Register mask_bits,
- Register sum_of_digits);
-
-
-#ifdef DEBUG
- void Print() { PrintF("IntegerModStub\n"); }
-#endif
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal);
};
@@ -580,6 +427,9 @@ class RegExpCEntryStub: public CodeStub {
private:
Major MajorKey() { return RegExpCEntry; }
int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+
const char* GetName() { return "RegExpCEntryStub"; }
};
@@ -599,59 +449,210 @@ class DirectCEntryStub: public CodeStub {
private:
Major MajorKey() { return DirectCEntry; }
int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+
const char* GetName() { return "DirectCEntryStub"; }
};
-// Generate code to load an element from a pixel array. The receiver is assumed
-// to not be a smi and to have elements, the caller must guarantee this
-// precondition. If key is not a smi, then the generated code branches to
-// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
-// check has already been performed on key so that the smi check is not
-// generated. If key is not a valid index within the bounds of the pixel array,
-// the generated code jumps to out_of_range. receiver, key and elements are
-// unchanged throughout the generated code sequence.
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements_map,
- Register elements,
+class FloatingPointHelper : public AllStatic {
+ public:
+ enum Destination {
+ kVFPRegisters,
+ kCoreRegisters
+ };
+
+
+ // Loads smis from r0 and r1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+ // floating point registers VFP3 must be supported. If core registers are
+ // requested when VFP3 is supported d6 and d7 will be scratched.
+ static void LoadSmis(MacroAssembler* masm,
+ Destination destination,
+ Register scratch1,
+ Register scratch2);
+
+ // Loads objects from r0 and r1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+ // floating point registers VFP3 must be supported. If core registers are
+ // requested when VFP3 is supported d6 and d7 will still be scratched. If
+ // either r0 or r1 is not a number (not smi and not heap number object) the
+ // not_number label is jumped to with r0 and r1 intact.
+ static void LoadOperands(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+
+ // Convert the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1.
+ static void ConvertNumberToInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ DwVfpRegister double_scratch,
+ Label* not_int32);
+
+ // Converts the integer (untagged smi) in |int_scratch| to a double, storing
+ // the result either in |double_dst| or |dst2:dst1|, depending on
+ // |destination|.
+ // Warning: The value in |int_scratch| will be changed in the process!
+ static void ConvertIntToDouble(MacroAssembler* masm,
+ Register int_scratch,
+ Destination destination,
+ DwVfpRegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register scratch2,
+ SwVfpRegister single_scratch);
+
+ // Load the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ static void LoadNumberAsInt32Double(MacroAssembler* masm,
+ Register object,
+ Destination destination,
+ DwVfpRegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ SwVfpRegister single_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ // scratch3 is not used when VFP3 is supported.
+ static void LoadNumberAsInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
Register scratch1,
Register scratch2,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range);
-
-// Generate code to store an element into a pixel array, clamping values between
-// [0..255]. The receiver is assumed to not be a smi and to have elements, the
-// caller must guarantee this precondition. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated. If value is not a smi, the generated
-// code will branch to value_not_smi. If the receiver doesn't have pixel array
-// elements, the generated code will branch to not_pixel_array, unless
-// not_pixel_array is NULL, in which case the caller must ensure that the
-// receiver has pixel array elements. If key is not a valid index within the
-// bounds of the pixel array, the generated code jumps to out_of_range. If
-// load_elements_from_receiver is true, then the elements of receiver is loaded
-// into elements, otherwise elements is assumed to already be the receiver's
-// elements. If load_elements_map_from_elements is true, elements_map is loaded
-// from elements, otherwise it is assumed to already contain the element map.
-void GenerateFastPixelArrayStore(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register elements,
- Register elements_map,
- Register scratch1,
- Register scratch2,
- bool load_elements_from_receiver,
- bool load_elements_map_from_elements,
- Label* key_not_smi,
- Label* value_not_smi,
- Label* not_pixel_array,
- Label* out_of_range);
+ Register scratch3,
+ DwVfpRegister double_scratch,
+ Label* not_int32);
+
+ // Generate non VFP3 code to check if a double can be exactly represented by a
+ // 32-bit integer. This does not check for 0 or -0, which need
+ // to be checked for separately.
+ // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+ // through otherwise.
+ // src1 and src2 will be cloberred.
+ //
+ // Expected input:
+ // - src1: higher (exponent) part of the double value.
+ // - src2: lower (mantissa) part of the double value.
+ // Output status:
+ // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+ // - src2: contains 1.
+ // - other registers are clobbered.
+ static void DoubleIs32BitInteger(MacroAssembler* masm,
+ Register src1,
+ Register src2,
+ Register dst,
+ Register scratch,
+ Label* not_int32);
+
+ // Generates code to call a C function to do a double operation using core
+ // registers. (Used when VFP3 is not supported.)
+ // This code never falls through, but returns with a heap number containing
+ // the result in r0.
+ // Register heapnumber_result must be a heap number in which the
+ // result of the operation will be stored.
+ // Requires the following layout on entry:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+ static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch);
+
+ private:
+ static void LoadNumber(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register object,
+ DwVfpRegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+};
+
+
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ String* name,
+ Register scratch0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1);
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("StringDictionaryLookupStub\n");
+ }
+#endif
+
+ Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+ int MinorKey() {
+ return LookupModeBits::encode(mode_);
+ }
+
+ class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+ LookupMode mode_;
+};
+
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/codegen-arm-inl.h b/deps/v8/src/arm/codegen-arm-inl.h
deleted file mode 100644
index 81ed2d043..000000000
--- a/deps/v8/src/arm/codegen-arm-inl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_ARM_CODEGEN_ARM_INL_H_
-#define V8_ARM_CODEGEN_ARM_INL_H_
-
-#include "virtual-frame-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_CODEGEN_ARM_INL_H_
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 1cd86d1da..bf748a9b6 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,56 +29,14 @@
#if defined(V8_TARGET_ARCH_ARM)
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "jsregexp.h"
-#include "jump-target-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "register-allocator-inl.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-#include "virtual-frame-arm-inl.h"
+#include "codegen.h"
namespace v8 {
namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
- // On ARM you either have a completely spilled frame or you
- // handle it yourself, but at the moment there's no automation
- // of registers and deferred code.
-}
-
-
-void DeferredCode::RestoreRegisters() {
-}
-
-
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- frame_state_->frame()->AssertIsSpilled();
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-}
-
-
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterInternalFrame();
}
@@ -89,7320 +47,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
}
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- previous_(owner->state()) {
- owner->set_state(this);
-}
-
-
-ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target)
- : CodeGenState(owner),
- true_target_(true_target),
- false_target_(false_target) {
- owner->set_state(this);
-}
-
-
-TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
- Slot* slot,
- TypeInfo type_info)
- : CodeGenState(owner),
- slot_(slot) {
- owner->set_state(this);
- old_type_info_ = owner->set_type_info(slot, type_info);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-
-TypeInfoCodeGenState::~TypeInfoCodeGenState() {
- owner()->set_type_info(slot_, old_type_info_);
-}
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation
-
-int CodeGenerator::inlined_write_barrier_size_ = -1;
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- info_(NULL),
- frame_(NULL),
- allocator_(NULL),
- cc_reg_(al),
- state_(NULL),
- loop_nesting_(0),
- type_info_(NULL),
- function_return_(JumpTarget::BIDIRECTIONAL),
- function_return_is_shadowed_(false) {
-}
-
-
-// Calling conventions:
-// fp: caller's frame pointer
-// sp: stack pointer
-// r1: called JS function
-// cp: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- // Record the position for debugging purposes.
- CodeForFunctionPosition(info->function());
- Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
- // Initialize state.
- info_ = info;
-
- int slots = scope()->num_parameters() + scope()->num_stack_slots();
- ScopedVector<TypeInfo> type_info_array(slots);
- for (int i = 0; i < slots; i++) {
- type_info_array[i] = TypeInfo::Unknown();
- }
- type_info_ = &type_info_array;
-
- ASSERT(allocator_ == NULL);
- RegisterAllocator register_allocator(this);
- allocator_ = &register_allocator;
- ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame();
- cc_reg_ = al;
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(0, loop_nesting_);
- loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
- {
- CodeGenState state(this);
-
- // Entry:
- // Stack: receiver, arguments
- // lr: return address
- // fp: caller's frame pointer
- // sp: stack pointer
- // r1: called JS function
- // cp: callee's context
- allocator_->Initialize();
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- frame_->SpillAll();
- __ stop("stop-at");
- }
-#endif
-
- frame_->Enter();
- // tos: code slot
-
- // Allocate space for locals and initialize them. This also checks
- // for stack overflow.
- frame_->AllocateStackSlots();
-
- frame_->AssertIsSpilled();
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- // Allocate local context.
- // Get outer context and create a new context based on it.
- __ ldr(r0, frame_->Function());
- frame_->EmitPush(r0);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kNewContext, 1);
- }
-
-#ifdef DEBUG
- JumpTarget verified_true;
- __ cmp(r0, cp);
- verified_true.Branch(eq);
- __ stop("NewContext: r0 is expected to be the same as cp");
- verified_true.Bind();
-#endif
- // Update context local.
- __ str(cp, frame_->Context());
- }
-
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- frame_->AssertIsSpilled();
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- ASSERT(!scope()->is_global_scope()); // No params in global scope.
- __ ldr(r1, frame_->ParameterAt(i));
- // Loads r2 with context; used below in RecordWrite.
- __ str(r1, SlotOperand(slot, r2));
- // Load the offset into r3.
- int slot_offset =
- FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(r2, Operand(slot_offset), r3, r1);
- }
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
-
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
- StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
- }
-
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.SetExpectedHeight();
- function_return_is_shadowed_ = false;
-
- // Generate code to 'execute' declarations and initialize functions
- // (source elements). In case of an illegal redeclaration we need to
- // handle that instead of processing the declarations.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ illegal redeclarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope()->declarations());
- // Bail out if a stack-overflow exception occurred when processing
- // declarations.
- if (HasStackOverflow()) return;
- }
-
- if (FLAG_trace) {
- frame_->CallRuntime(Runtime::kTraceEnter, 0);
- // Ignore the return value.
- }
-
- // Compile the body of the function in a vanilla state. Don't
- // bother compiling all the code if the scope has an illegal
- // redeclaration.
- if (!scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
- bool is_builtin = Bootstrapper::IsActive();
- bool should_trace =
- is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
- if (should_trace) {
- frame_->CallRuntime(Runtime::kDebugTrace, 0);
- // Ignore the return value.
- }
-#endif
- VisitStatements(info->function()->body());
- }
- }
-
- // Handle the return from the function.
- if (has_valid_frame()) {
- // If there is a valid frame, control flow can fall off the end of
- // the body. In that case there is an implicit return statement.
- ASSERT(!function_return_is_shadowed_);
- frame_->PrepareForReturn();
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- if (function_return_.is_bound()) {
- function_return_.Jump();
- } else {
- function_return_.Bind();
- GenerateReturnSequence();
- }
- } else if (function_return_.is_linked()) {
- // If the return target has dangling jumps to it, then we have not
- // yet generated the return sequence. This can happen when (a)
- // control does not flow off the end of the body so we did not
- // compile an artificial return statement just above, and (b) there
- // are return statements in the body but (c) they are all shadowed.
- function_return_.Bind();
- GenerateReturnSequence();
- }
-
- // Adjust for function-level loop nesting.
- ASSERT(loop_nesting_ == info->is_in_loop()? 1 : 0);
- loop_nesting_ = 0;
-
- // Code generation state must be reset.
- ASSERT(!has_cc());
- ASSERT(state_ == NULL);
- ASSERT(loop_nesting() == 0);
- ASSERT(!function_return_is_shadowed_);
- function_return_.Unuse();
- DeleteFrame();
-
- // Process any deferred code using the register allocator.
- if (!HasStackOverflow()) {
- ProcessDeferred();
- }
-
- allocator_ = NULL;
- type_info_ = NULL;
-}
-
-
-int CodeGenerator::NumberOfSlot(Slot* slot) {
- if (slot == NULL) return kInvalidSlotNumber;
- switch (slot->type()) {
- case Slot::PARAMETER:
- return slot->index();
- case Slot::LOCAL:
- return slot->index() + scope()->num_parameters();
- default:
- break;
- }
- return kInvalidSlotNumber;
-}
-
-
-MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return frame_->ParameterAt(index);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(cp)); // do not overwrite context register
- Register context = cp;
- int chain_length = scope()->ContextChainLength(slot->var()->scope());
- for (int i = 0; i < chain_length; i++) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
-
- default:
- UNREACHABLE();
- return MemOperand(r0, 0);
- }
-}
-
-
-MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
- Slot* slot,
- Register tmp,
- Register tmp2,
- JumpTarget* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- Register context = cp;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(tmp2, tmp2);
- slow->Branch(ne);
- }
- __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- }
- // Check that last extension is NULL.
- __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(tmp2, tmp2);
- slow->Branch(ne);
- __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, slot->index());
-}
-
-
-// Loads a value on TOS. If it is a boolean value, the result may have been
-// (partially) translated into branches, or it may have set the condition
-// code register. If force_cc is set, the value is forced to set the
-// condition code register and no value is pushed. If the condition code
-// register was set, has_cc() is true and cc_reg_ contains the condition to
-// test for 'true'.
-void CodeGenerator::LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc) {
- ASSERT(!has_cc());
- int original_height = frame_->height();
-
- { ConditionCodeGenState new_state(this, true_target, false_target);
- Visit(x);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- has_valid_frame() &&
- !has_cc() &&
- frame_->height() == original_height) {
- true_target->Jump();
- }
- }
- if (force_cc && frame_ != NULL && !has_cc()) {
- // Convert the TOS value to a boolean in the condition code register.
- ToBoolean(true_target, false_target);
- }
- ASSERT(!force_cc || !has_valid_frame() || has_cc());
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
- // We generally assume that we are not in a spilled scope for most
- // of the code generator. A failure to ensure this caused issue 815
- // and this assert is designed to catch similar issues.
- frame_->AssertIsNotSpilled();
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- JumpTarget true_target;
- JumpTarget false_target;
- LoadCondition(expr, &true_target, &false_target, false);
-
- if (has_cc()) {
- // Convert cc_reg_ into a boolean value.
- JumpTarget loaded;
- JumpTarget materialize_true;
- materialize_true.Branch(cc_reg_);
- frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
- loaded.Jump();
- materialize_true.Bind();
- frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
- loaded.Bind();
- cc_reg_ = al;
- }
-
- if (true_target.is_linked() || false_target.is_linked()) {
- // We have at least one condition value that has been "translated"
- // into a branch, thus it needs to be loaded explicitly.
- JumpTarget loaded;
- if (frame_ != NULL) {
- loaded.Jump(); // Don't lose the current TOS.
- }
- bool both = true_target.is_linked() && false_target.is_linked();
- // Load "true" if necessary.
- if (true_target.is_linked()) {
- true_target.Bind();
- frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
- }
- // If both "true" and "false" need to be loaded jump across the code for
- // "false".
- if (both) {
- loaded.Jump();
- }
- // Load "false" if necessary.
- if (false_target.is_linked()) {
- false_target.Bind();
- frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
- }
- // A value is loaded on all paths reaching this point.
- loaded.Bind();
- }
- ASSERT(has_valid_frame());
- ASSERT(!has_cc());
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::LoadGlobal() {
- Register reg = frame_->GetTOSRegister();
- __ ldr(reg, GlobalObjectOperand());
- frame_->EmitPush(reg);
-}
-
-
-void CodeGenerator::LoadGlobalReceiver(Register scratch) {
- Register reg = frame_->GetTOSRegister();
- __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(reg,
- FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
- frame_->EmitPush(reg);
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
- ASSERT(scope()->arguments_shadow() != NULL);
- // We don't want to do lazy arguments allocation for functions that
- // have heap-allocated contexts, because it interfers with the
- // uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0)
- ? EAGER_ARGUMENTS_ALLOCATION
- : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-void CodeGenerator::StoreArgumentsObject(bool initial) {
- ArgumentsAllocationMode mode = ArgumentsMode();
- ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
- Comment cmnt(masm_, "[ store arguments object");
- if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the hole value
- // as a sentinel indicating that the arguments object hasn't been
- // allocated yet.
- frame_->EmitPushRoot(Heap::kArgumentsMarkerRootIndex);
- } else {
- frame_->SpillAll();
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ ldr(r2, frame_->Function());
- // The receiver is below the arguments, the return address, and the
- // frame pointer on the stack.
- const int kReceiverDisplacement = 2 + scope()->num_parameters();
- __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
- frame_->Adjust(3);
- __ Push(r2, r1, r0);
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
- }
-
- Variable* arguments = scope()->arguments();
- Variable* shadow = scope()->arguments_shadow();
- ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
- ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
- JumpTarget done;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
- Register arguments = frame_->PopToRegister();
- __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
- __ cmp(arguments, ip);
- done.Branch(ne);
- }
- StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
- // Special handling of identifiers as subexpressions of typeof.
- Variable* variable = expr->AsVariableProxy()->AsVariable();
- if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // For a global variable we build the property reference
- // <global>.<variable> and perform a (regular non-contextual) property
- // load to make sure we do not get reference errors.
- Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
- Literal key(variable->name());
- Property property(&global, &key, RelocInfo::kNoPosition);
- Reference ref(this, &property);
- ref.GetValue();
- } else if (variable != NULL && variable->AsSlot() != NULL) {
- // For a variable that rewrites to a slot, we signal it is the immediate
- // subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
- } else {
- // Anything else can be handled normally.
- Load(expr);
- }
-}
-
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- // We generally assume that we are not in a spilled scope for most
- // of the code generator. A failure to ensure this caused issue 815
- // and this assert is designed to catch similar issues.
- cgen->frame()->AssertIsNotSpilled();
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- // The expression is either a property or a variable proxy that rewrites
- // to a property.
- Load(property->obj());
- if (property->key()->IsPropertyName()) {
- ref->set_type(Reference::NAMED);
- } else {
- Load(property->key());
- ref->set_type(Reference::KEYED);
- }
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->AsSlot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- // Anything else is a runtime error.
- Load(e);
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- }
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
- int size = ref->size();
- ref->set_unloaded();
- if (size == 0) return;
-
- // Pop a reference from the stack while preserving TOS.
- VirtualFrame::RegisterAllocationScope scope(this);
- Comment cmnt(masm_, "[ UnloadReference");
- if (size > 0) {
- Register tos = frame_->PopToRegister();
- frame_->Drop(size);
- frame_->EmitPush(tos);
- }
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
-// register to a boolean in the condition code register. The code
-// may jump to 'false_target' in case the register converts to 'false'.
-void CodeGenerator::ToBoolean(JumpTarget* true_target,
- JumpTarget* false_target) {
- // Note: The generated code snippet does not change stack variables.
- // Only the condition code should be set.
- bool known_smi = frame_->KnownSmiAt(0);
- Register tos = frame_->PopToRegister();
-
- // Fast case checks
-
- // Check if the value is 'false'.
- if (!known_smi) {
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(tos, ip);
- false_target->Branch(eq);
-
- // Check if the value is 'true'.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(tos, ip);
- true_target->Branch(eq);
-
- // Check if the value is 'undefined'.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(tos, ip);
- false_target->Branch(eq);
- }
-
- // Check if the value is a smi.
- __ cmp(tos, Operand(Smi::FromInt(0)));
-
- if (!known_smi) {
- false_target->Branch(eq);
- __ tst(tos, Operand(kSmiTagMask));
- true_target->Branch(eq);
-
- // Slow case.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Implements the slow case by using ToBooleanStub.
- // The ToBooleanStub takes a single argument, and
- // returns a non-zero value for true, or zero for false.
- // Both the argument value and the return value use the
- // register assigned to tos_
- ToBooleanStub stub(tos);
- frame_->CallStub(&stub, 0);
- // Convert the result in "tos" to a condition code.
- __ cmp(tos, Operand(0, RelocInfo::NONE));
- } else {
- // Implements slow case by calling the runtime.
- frame_->EmitPush(tos);
- frame_->CallRuntime(Runtime::kToBool, 1);
- // Convert the result (r0) to a condition code.
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
- }
- }
-
- cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- GenerateInlineSmi inline_smi,
- int constant_rhs) {
- // top of virtual frame: y
- // 2nd elt. on virtual frame : x
- // result : top of virtual frame
-
- // Stub is entered with a call: 'return address' is in lr.
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- if (inline_smi) {
- JumpTarget done;
- Register rhs = frame_->PopToRegister();
- Register lhs = frame_->PopToRegister(rhs);
- Register scratch = VirtualFrame::scratch0();
- __ orr(scratch, rhs, Operand(lhs));
- // Check they are both small and positive.
- __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
- ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
- STATIC_ASSERT(kSmiTag == 0);
- if (op == Token::ADD) {
- __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
- } else {
- __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
- }
- done.Branch(eq);
- GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
- frame_->SpillAll();
- frame_->CallStub(&stub, 0);
- done.Bind();
- frame_->EmitPush(r0);
- break;
- } else {
- // Fall through!
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- if (inline_smi) {
- bool rhs_is_smi = frame_->KnownSmiAt(0);
- bool lhs_is_smi = frame_->KnownSmiAt(1);
- Register rhs = frame_->PopToRegister();
- Register lhs = frame_->PopToRegister(rhs);
- Register smi_test_reg;
- Condition cond;
- if (!rhs_is_smi || !lhs_is_smi) {
- if (rhs_is_smi) {
- smi_test_reg = lhs;
- } else if (lhs_is_smi) {
- smi_test_reg = rhs;
- } else {
- smi_test_reg = VirtualFrame::scratch0();
- __ orr(smi_test_reg, rhs, Operand(lhs));
- }
- // Check they are both Smis.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- cond = eq;
- } else {
- cond = al;
- }
- ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
- if (op == Token::BIT_OR) {
- __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
- } else if (op == Token::BIT_AND) {
- __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
- } else {
- ASSERT(op == Token::BIT_XOR);
- STATIC_ASSERT(kSmiTag == 0);
- __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
- }
- if (cond != al) {
- JumpTarget done;
- done.Branch(cond);
- GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
- frame_->SpillAll();
- frame_->CallStub(&stub, 0);
- done.Bind();
- }
- frame_->EmitPush(r0);
- break;
- } else {
- // Fall through!
- }
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- Register rhs = frame_->PopToRegister();
- Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register.
- GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
- frame_->SpillAll();
- frame_->CallStub(&stub, 0);
- frame_->EmitPush(r0);
- break;
- }
-
- case Token::COMMA: {
- Register scratch = frame_->PopToRegister();
- // Simply discard left value.
- frame_->Drop();
- frame_->EmitPush(scratch);
- break;
- }
-
- default:
- // Other cases should have been handled before this point.
- UNREACHABLE();
- break;
- }
-}
-
-
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- int value,
- bool reversed,
- OverwriteMode overwrite_mode,
- Register tos)
- : op_(op),
- value_(value),
- reversed_(reversed),
- overwrite_mode_(overwrite_mode),
- tos_register_(tos) {
- set_comment("[ DeferredInlinedSmiOperation");
- }
-
- virtual void Generate();
- // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
- // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty
- // methods, it is the responsibility of the deferred code to save and restore
- // registers.
- virtual bool AutoSaveAndRestore() { return false; }
-
- void JumpToNonSmiInput(Condition cond);
- void JumpToAnswerOutOfRange(Condition cond);
-
- private:
- void GenerateNonSmiInput();
- void GenerateAnswerOutOfRange();
- void WriteNonSmiAnswer(Register answer,
- Register heap_number,
- Register scratch);
-
- Token::Value op_;
- int value_;
- bool reversed_;
- OverwriteMode overwrite_mode_;
- Register tos_register_;
- Label non_smi_input_;
- Label answer_out_of_range_;
-};
-
-
-// For bit operations we try harder and handle the case where the input is not
-// a Smi but a 32bits integer without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
- ASSERT(Token::IsBitOp(op_));
-
- __ b(cond, &non_smi_input_);
-}
-
-
-// For bit operations the result is always 32bits so we handle the case where
-// the result does not fit in a Smi without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
- ASSERT(Token::IsBitOp(op_));
-
- if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
- // >>> requires an unsigned to double conversion and the non VFP code
- // does not support this conversion.
- __ b(cond, entry_label());
- } else {
- __ b(cond, &answer_out_of_range_);
- }
-}
-
-
-// On entry the non-constant side of the binary operation is in tos_register_
-// and the constant smi side is nowhere. The tos_register_ is not used by the
-// virtual frame. On exit the answer is in the tos_register_ and the virtual
-// frame is unchanged.
-void DeferredInlineSmiOperation::Generate() {
- VirtualFrame copied_frame(*frame_state()->frame());
- copied_frame.SpillAll();
-
- Register lhs = r1;
- Register rhs = r0;
- switch (op_) {
- case Token::ADD: {
- // Revert optimistic add.
- if (reversed_) {
- __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- } else {
- __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
- __ mov(r0, Operand(Smi::FromInt(value_)));
- }
- break;
- }
-
- case Token::SUB: {
- // Revert optimistic sub.
- if (reversed_) {
- __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- } else {
- __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
- __ mov(r0, Operand(Smi::FromInt(value_)));
- }
- break;
- }
-
- // For these operations there is no optimistic operation that needs to be
- // reverted.
- case Token::MUL:
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- if (tos_register_.is(r1)) {
- __ mov(r0, Operand(Smi::FromInt(value_)));
- } else {
- ASSERT(tos_register_.is(r0));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- }
- if (reversed_ == tos_register_.is(r1)) {
- lhs = r0;
- rhs = r1;
- }
- break;
- }
-
- default:
- // Other cases should have been handled before this point.
- UNREACHABLE();
- break;
- }
-
- GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
- __ CallStub(&stub);
-
- // The generic stub returns its value in r0, but that's not
- // necessarily what we want. We want whatever the inlined code
- // expected, which is that the answer is in the same register as
- // the operand was.
- __ Move(tos_register_, r0);
-
- // The tos register was not in use for the virtual frame that we
- // came into this function with, so we can merge back to that frame
- // without trashing it.
- copied_frame.MergeTo(frame_state()->frame());
-
- Exit();
-
- if (non_smi_input_.is_linked()) {
- GenerateNonSmiInput();
- }
-
- if (answer_out_of_range_.is_linked()) {
- GenerateAnswerOutOfRange();
- }
-}
-
-
-// Convert and write the integer answer into heap_number.
-void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
- Register heap_number,
- Register scratch) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, answer);
- if (op_ == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(scratch, heap_number, Operand(kHeapObjectTag));
- __ vstr(d0, scratch, HeapNumber::kValueOffset);
- } else {
- WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch);
- __ CallStub(&stub);
- }
-}
-
-
-void DeferredInlineSmiOperation::GenerateNonSmiInput() {
- // We know the left hand side is not a Smi and the right hand side is an
- // immediate value (value_) which can be represented as a Smi. We only
- // handle bit operations.
- ASSERT(Token::IsBitOp(op_));
-
- if (FLAG_debug_code) {
- __ Abort("Should not fall through!");
- }
-
- __ bind(&non_smi_input_);
- if (FLAG_debug_code) {
- __ AbortIfSmi(tos_register_);
- }
-
- // This routine uses the registers from r2 to r6. At the moment they are
- // not used by the register allocator, but when they are it should use
- // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
-
- Register heap_number_map = r7;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
- __ cmp(r3, heap_number_map);
- // Not a number, fall back to the GenericBinaryOpStub.
- __ b(ne, entry_label());
-
- Register int32 = r2;
- // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
- __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
-
- // tos_register_ (r0 or r1): Original heap number.
- // int32: signed 32bits int.
-
- Label result_not_a_smi;
- int shift_value = value_ & 0x1f;
- switch (op_) {
- case Token::BIT_OR: __ orr(int32, int32, Operand(value_)); break;
- case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
- case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
- case Token::SAR:
- ASSERT(!reversed_);
- if (shift_value != 0) {
- __ mov(int32, Operand(int32, ASR, shift_value));
- }
- break;
- case Token::SHR:
- ASSERT(!reversed_);
- if (shift_value != 0) {
- __ mov(int32, Operand(int32, LSR, shift_value), SetCC);
- } else {
- // SHR is special because it is required to produce a positive answer.
- __ cmp(int32, Operand(0, RelocInfo::NONE));
- }
- if (CpuFeatures::IsSupported(VFP3)) {
- __ b(mi, &result_not_a_smi);
- } else {
- // Non VFP code cannot convert from unsigned to double, so fall back
- // to GenericBinaryOpStub.
- __ b(mi, entry_label());
- }
- break;
- case Token::SHL:
- ASSERT(!reversed_);
- if (shift_value != 0) {
- __ mov(int32, Operand(int32, LSL, shift_value));
- }
- break;
- default: UNREACHABLE();
- }
- // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
- // if the shift if more than 0 or SHR if the shit is more than 1.
- if (!( (op_ == Token::AND && value_ >= 0) ||
- ((op_ == Token::SAR) && (shift_value > 0)) ||
- ((op_ == Token::SHR) && (shift_value > 1)))) {
- __ add(r3, int32, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- }
- __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize));
- Exit();
-
- if (result_not_a_smi.is_linked()) {
- __ bind(&result_not_a_smi);
- if (overwrite_mode_ != OVERWRITE_LEFT) {
- ASSERT((overwrite_mode_ == NO_OVERWRITE) ||
- (overwrite_mode_ == OVERWRITE_RIGHT));
- // If the allocation fails, fall back to the GenericBinaryOpStub.
- __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label());
- // Nothing can go wrong now, so overwrite tos.
- __ mov(tos_register_, Operand(r4));
- }
-
- // int32: answer as signed 32bits integer.
- // tos_register_: Heap number to write the answer into.
- WriteNonSmiAnswer(int32, tos_register_, r3);
-
- Exit();
- }
-}
-
-
-void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
- // The input from a bitwise operation were Smis but the result cannot fit
- // into a Smi, so we store it into a heap number. VirtualFrame::scratch0()
- // holds the untagged result to be converted. tos_register_ contains the
- // input. See the calls to JumpToAnswerOutOfRange to see how we got here.
- ASSERT(Token::IsBitOp(op_));
- ASSERT(!reversed_);
-
- Register untagged_result = VirtualFrame::scratch0();
-
- if (FLAG_debug_code) {
- __ Abort("Should not fall through!");
- }
-
- __ bind(&answer_out_of_range_);
- if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) {
- // >>> 0 is a special case where the untagged_result register is not set up
- // yet. We untag the input to get it.
- __ mov(untagged_result, Operand(tos_register_, ASR, kSmiTagSize));
- }
-
- // This routine uses the registers from r2 to r6. At the moment they are
- // not used by the register allocator, but when they are it should use
- // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
-
- // Allocate the result heap number.
- Register heap_number_map = VirtualFrame::scratch1();
- Register heap_number = r4;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- // If the allocation fails, fall back to the GenericBinaryOpStub.
- __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label());
- WriteNonSmiAnswer(untagged_result, heap_number, r3);
- __ mov(tos_register_, Operand(heap_number));
-
- Exit();
-}
-
-
-static bool PopCountLessThanEqual2(unsigned int x) {
- x &= x - 1;
- return (x & (x - 1)) == 0;
-}
-
-
-// Returns the index of the lowest bit set.
-static int BitPosition(unsigned x) {
- int bit_posn = 0;
- while ((x & 0xf) == 0) {
- bit_posn += 4;
- x >>= 4;
- }
- while ((x & 1) == 0) {
- bit_posn++;
- x >>= 1;
- }
- return bit_posn;
-}
-
-
-// Can we multiply by x with max two shifts and an add.
-// This answers yes to all integers from 2 to 10.
-static bool IsEasyToMultiplyBy(int x) {
- if (x < 2) return false; // Avoid special cases.
- if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
- if (IsPowerOf2(x)) return true; // Simple shift.
- if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
- if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
- return false;
-}
-
-
-// Can multiply by anything that IsEasyToMultiplyBy returns true for.
-// Source and destination may be the same register. This routine does
-// not set carry and overflow the way a mul instruction would.
-static void InlineMultiplyByKnownInt(MacroAssembler* masm,
- Register source,
- Register destination,
- int known_int) {
- if (IsPowerOf2(known_int)) {
- masm->mov(destination, Operand(source, LSL, BitPosition(known_int)));
- } else if (PopCountLessThanEqual2(known_int)) {
- int first_bit = BitPosition(known_int);
- int second_bit = BitPosition(known_int ^ (1 << first_bit));
- masm->add(destination, source,
- Operand(source, LSL, second_bit - first_bit));
- if (first_bit != 0) {
- masm->mov(destination, Operand(destination, LSL, first_bit));
- }
- } else {
- ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
- int the_bit = BitPosition(known_int + 1);
- masm->rsb(destination, source, Operand(source, LSL, the_bit));
- }
-}
-
-
-void CodeGenerator::SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode) {
- int int_value = Smi::cast(*value)->value();
-
- bool both_sides_are_smi = frame_->KnownSmiAt(0);
-
- bool something_to_inline;
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::BIT_AND:
- case Token::BIT_OR:
- case Token::BIT_XOR: {
- something_to_inline = true;
- break;
- }
- case Token::SHL: {
- something_to_inline = (both_sides_are_smi || !reversed);
- break;
- }
- case Token::SHR:
- case Token::SAR: {
- if (reversed) {
- something_to_inline = false;
- } else {
- something_to_inline = true;
- }
- break;
- }
- case Token::MOD: {
- if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
- something_to_inline = false;
- } else {
- something_to_inline = true;
- }
- break;
- }
- case Token::MUL: {
- if (!IsEasyToMultiplyBy(int_value)) {
- something_to_inline = false;
- } else {
- something_to_inline = true;
- }
- break;
- }
- default: {
- something_to_inline = false;
- break;
- }
- }
-
- if (!something_to_inline) {
- if (!reversed) {
- // Push the rhs onto the virtual frame by putting it in a TOS register.
- Register rhs = frame_->GetTOSRegister();
- __ mov(rhs, Operand(value));
- frame_->EmitPush(rhs, TypeInfo::Smi());
- GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
- } else {
- // Pop the rhs, then push lhs and rhs in the right order. Only performs
- // at most one pop, the rest takes place in TOS registers.
- Register lhs = frame_->GetTOSRegister(); // Get reg for pushing.
- Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this.
- __ mov(lhs, Operand(value));
- frame_->EmitPush(lhs, TypeInfo::Smi());
- TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
- frame_->EmitPush(rhs, t);
- GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI,
- GenericBinaryOpStub::kUnknownIntValue);
- }
- return;
- }
-
- // We move the top of stack to a register (normally no move is invoved).
- Register tos = frame_->PopToRegister();
- switch (op) {
- case Token::ADD: {
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-
- __ add(tos, tos, Operand(value), SetCC);
- deferred->Branch(vs);
- if (!both_sides_are_smi) {
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
- }
- deferred->BindExit();
- frame_->EmitPush(tos);
- break;
- }
-
- case Token::SUB: {
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-
- if (reversed) {
- __ rsb(tos, tos, Operand(value), SetCC);
- } else {
- __ sub(tos, tos, Operand(value), SetCC);
- }
- deferred->Branch(vs);
- if (!both_sides_are_smi) {
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
- }
- deferred->BindExit();
- frame_->EmitPush(tos);
- break;
- }
-
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- if (both_sides_are_smi) {
- switch (op) {
- case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
- case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
- default: UNREACHABLE();
- }
- frame_->EmitPush(tos, TypeInfo::Smi());
- } else {
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
- __ tst(tos, Operand(kSmiTagMask));
- deferred->JumpToNonSmiInput(ne);
- switch (op) {
- case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
- case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
- default: UNREACHABLE();
- }
- deferred->BindExit();
- TypeInfo result_type = TypeInfo::Integer32();
- if (op == Token::BIT_AND && int_value >= 0) {
- result_type = TypeInfo::Smi();
- }
- frame_->EmitPush(tos, result_type);
- }
- break;
- }
-
- case Token::SHL:
- if (reversed) {
- ASSERT(both_sides_are_smi);
- int max_shift = 0;
- int max_result = int_value == 0 ? 1 : int_value;
- while (Smi::IsValid(max_result << 1)) {
- max_shift++;
- max_result <<= 1;
- }
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
- // Mask off the last 5 bits of the shift operand (rhs). This is part
- // of the definition of shift in JS and we know we have a Smi so we
- // can safely do this. The masked version gets passed to the
- // deferred code, but that makes no difference.
- __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
- __ cmp(tos, Operand(Smi::FromInt(max_shift)));
- deferred->Branch(ge);
- Register scratch = VirtualFrame::scratch0();
- __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Untag.
- __ mov(tos, Operand(Smi::FromInt(int_value))); // Load constant.
- __ mov(tos, Operand(tos, LSL, scratch)); // Shift constant.
- deferred->BindExit();
- TypeInfo result = TypeInfo::Integer32();
- frame_->EmitPush(tos, result);
- break;
- }
- // Fall through!
- case Token::SHR:
- case Token::SAR: {
- ASSERT(!reversed);
- int shift_value = int_value & 0x1f;
- TypeInfo result = TypeInfo::Number();
-
- if (op == Token::SHR) {
- if (shift_value > 1) {
- result = TypeInfo::Smi();
- } else if (shift_value > 0) {
- result = TypeInfo::Integer32();
- }
- } else if (op == Token::SAR) {
- if (shift_value > 0) {
- result = TypeInfo::Smi();
- } else {
- result = TypeInfo::Integer32();
- }
- } else {
- ASSERT(op == Token::SHL);
- result = TypeInfo::Integer32();
- }
-
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
- if (!both_sides_are_smi) {
- __ tst(tos, Operand(kSmiTagMask));
- deferred->JumpToNonSmiInput(ne);
- }
- switch (op) {
- case Token::SHL: {
- if (shift_value != 0) {
- Register untagged_result = VirtualFrame::scratch0();
- Register scratch = VirtualFrame::scratch1();
- int adjusted_shift = shift_value - kSmiTagSize;
- ASSERT(adjusted_shift >= 0);
-
- if (adjusted_shift != 0) {
- __ mov(untagged_result, Operand(tos, LSL, adjusted_shift));
- } else {
- __ mov(untagged_result, Operand(tos));
- }
- // Check that the *signed* result fits in a smi.
- __ add(scratch, untagged_result, Operand(0x40000000), SetCC);
- deferred->JumpToAnswerOutOfRange(mi);
- __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
- }
- break;
- }
- case Token::SHR: {
- if (shift_value != 0) {
- Register untagged_result = VirtualFrame::scratch0();
- // Remove tag.
- __ mov(untagged_result, Operand(tos, ASR, kSmiTagSize));
- __ mov(untagged_result, Operand(untagged_result, LSR, shift_value));
- if (shift_value == 1) {
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging
- // - 0x40000000: this number would convert to negative when Smi
- // tagging.
- // These two cases can only happen with shifts by 0 or 1 when
- // handed a valid smi.
- __ tst(untagged_result, Operand(0xc0000000));
- deferred->JumpToAnswerOutOfRange(ne);
- }
- __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
- } else {
- __ cmp(tos, Operand(0, RelocInfo::NONE));
- deferred->JumpToAnswerOutOfRange(mi);
- }
- break;
- }
- case Token::SAR: {
- if (shift_value != 0) {
- // Do the shift and the tag removal in one operation. If the shift
- // is 31 bits (the highest possible value) then we emit the
- // instruction as a shift by 0 which in the ARM ISA means shift
- // arithmetically by 32.
- __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
- __ mov(tos, Operand(tos, LSL, kSmiTagSize));
- }
- break;
- }
- default: UNREACHABLE();
- }
- deferred->BindExit();
- frame_->EmitPush(tos, result);
- break;
- }
-
- case Token::MOD: {
- ASSERT(!reversed);
- ASSERT(int_value >= 2);
- ASSERT(IsPowerOf2(int_value));
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
- unsigned mask = (0x80000000u | kSmiTagMask);
- __ tst(tos, Operand(mask));
- deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
- mask = (int_value << kSmiTagSize) - 1;
- __ and_(tos, tos, Operand(mask));
- deferred->BindExit();
- // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
- frame_->EmitPush(
- tos,
- both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
- break;
- }
-
- case Token::MUL: {
- ASSERT(IsEasyToMultiplyBy(int_value));
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
- unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
- max_smi_that_wont_overflow <<= kSmiTagSize;
- unsigned mask = 0x80000000u;
- while ((mask & max_smi_that_wont_overflow) == 0) {
- mask |= mask >> 1;
- }
- mask |= kSmiTagMask;
- // This does a single mask that checks for a too high value in a
- // conservative way and for a non-Smi. It also filters out negative
- // numbers, unfortunately, but since this code is inline we prefer
- // brevity to comprehensiveness.
- __ tst(tos, Operand(mask));
- deferred->Branch(ne);
- InlineMultiplyByKnownInt(masm_, tos, tos, int_value);
- deferred->BindExit();
- frame_->EmitPush(tos);
- break;
- }
-
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void CodeGenerator::Comparison(Condition cond,
- Expression* left,
- Expression* right,
- bool strict) {
- VirtualFrame::RegisterAllocationScope scope(this);
-
- if (left != NULL) Load(left);
- if (right != NULL) Load(right);
-
- // sp[0] : y
- // sp[1] : x
- // result : cc register
-
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cond == eq);
-
- Register lhs;
- Register rhs;
-
- bool lhs_is_smi;
- bool rhs_is_smi;
-
- // We load the top two stack positions into registers chosen by the virtual
- // frame. This should keep the register shuffling to a minimum.
- // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cond == gt || cond == le) {
- cond = ReverseCondition(cond);
- lhs_is_smi = frame_->KnownSmiAt(0);
- rhs_is_smi = frame_->KnownSmiAt(1);
- lhs = frame_->PopToRegister();
- rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again!
- } else {
- rhs_is_smi = frame_->KnownSmiAt(0);
- lhs_is_smi = frame_->KnownSmiAt(1);
- rhs = frame_->PopToRegister();
- lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again!
- }
-
- bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
-
- ASSERT(rhs.is(r0) || rhs.is(r1));
- ASSERT(lhs.is(r0) || lhs.is(r1));
-
- JumpTarget exit;
-
- if (!both_sides_are_smi) {
- // Now we have the two sides in r0 and r1. We flush any other registers
- // because the stub doesn't know about register allocation.
- frame_->SpillAll();
- Register scratch = VirtualFrame::scratch0();
- Register smi_test_reg;
- if (lhs_is_smi) {
- smi_test_reg = rhs;
- } else if (rhs_is_smi) {
- smi_test_reg = lhs;
- } else {
- __ orr(scratch, lhs, Operand(rhs));
- smi_test_reg = scratch;
- }
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- JumpTarget smi;
- smi.Branch(eq);
-
- // Perform non-smi comparison by stub.
- // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
- // We call with 0 args because there are 0 on the stack.
- CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
- frame_->CallStub(&stub, 0);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- exit.Jump();
-
- smi.Bind();
- }
-
- // Do smi comparisons by pointer comparison.
- __ cmp(lhs, Operand(rhs));
-
- exit.Bind();
- cc_reg_ = cond;
-}
-
-
-// Call the function on the stack with the given arguments.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- // Push the arguments ("left-to-right") on the stack.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Record the position for debugging purposes.
- CodeForSourcePosition(position);
-
- // Use the shared code stub to call the function.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, flags);
- frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore context and pop function from the stack.
- __ ldr(cp, frame_->Context());
- frame_->Drop(); // discard the TOS
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position) {
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments).
- // If the arguments object of the scope has not been allocated,
- // and x.apply is Function.prototype.apply, this optimization
- // just copies y and the arguments of the current function on the
- // stack, as receiver and arguments, and calls x.
- // In the implementation comments, we call x the applicand
- // and y the receiver.
-
- ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
- ASSERT(arguments->IsArguments());
-
- // Load applicand.apply onto the stack. This will usually
- // give us a megamorphic load site. Not super, but it works.
- Load(applicand);
- Handle<String> name = Factory::LookupAsciiSymbol("apply");
- frame_->Dup();
- frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
- frame_->EmitPush(r0);
-
- // Load the receiver and the existing arguments object onto the
- // expression stack. Avoid allocating the arguments object here.
- Load(receiver);
- LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
- // At this point the top two stack elements are probably in registers
- // since they were just loaded. Ensure they are in regs and get the
- // regs.
- Register receiver_reg = frame_->Peek2();
- Register arguments_reg = frame_->Peek();
-
- // From now on the frame is spilled.
- frame_->SpillAll();
-
- // Emit the source position information after having loaded the
- // receiver and the arguments.
- CodeForSourcePosition(position);
- // Contents of the stack at this point:
- // sp[0]: arguments object of the current function or the hole.
- // sp[1]: receiver
- // sp[2]: applicand.apply
- // sp[3]: applicand.
-
- // Check if the arguments object has been lazily allocated
- // already. If so, just use that instead of copying the arguments
- // from the stack. This also deals with cases where a local variable
- // named 'arguments' has been introduced.
- JumpTarget slow;
- Label done;
- __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
- __ cmp(ip, arguments_reg);
- slow.Branch(ne);
-
- Label build_args;
- // Get rid of the arguments object probe.
- frame_->Drop();
- // Stack now has 3 elements on it.
- // Contents of stack at this point:
- // sp[0]: receiver - in the receiver_reg register.
- // sp[1]: applicand.apply
- // sp[2]: applicand.
-
- // Check that the receiver really is a JavaScript object.
- __ JumpIfSmi(receiver_reg, &build_args);
- // We allow all JSObjects including JSFunctions. As long as
- // JS_FUNCTION_TYPE is the last instance type and it is right
- // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
- // bound.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &build_args);
-
- // Check that applicand.apply is Function.prototype.apply.
- __ ldr(r0, MemOperand(sp, kPointerSize));
- __ JumpIfSmi(r0, &build_args);
- __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
- __ b(ne, &build_args);
- Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
- __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ cmp(r1, Operand(apply_code));
- __ b(ne, &build_args);
-
- // Check that applicand is a function.
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ JumpIfSmi(r1, &build_args);
- __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
- __ b(ne, &build_args);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ mov(r0, Operand(scope()->num_parameters()));
- for (int i = 0; i < scope()->num_parameters(); i++) {
- __ ldr(r2, frame_->ParameterAt(i));
- __ push(r2);
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(r0, Operand(r0, LSR, kSmiTagSize));
- __ mov(r3, r0);
- __ cmp(r0, Operand(kArgumentsLimit));
- __ b(gt, &build_args);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- // r3 is a small non-negative integer, due to the test above.
- __ cmp(r3, Operand(0, RelocInfo::NONE));
- __ b(eq, &invoke);
- // Compute the address of the first argument.
- __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
- __ add(r2, r2, Operand(kPointerSize));
- __ bind(&loop);
- // Post-decrement argument address by kPointerSize on each iteration.
- __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
- __ push(r4);
- __ sub(r3, r3, Operand(1), SetCC);
- __ b(gt, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION);
- // Drop applicand.apply and applicand from the stack, and push
- // the result of the function call, but leave the spilled frame
- // unchanged, with 3 elements, so it is correct when we compile the
- // slow-case code.
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ push(r0);
- // Stack now has 1 element:
- // sp[0]: result
- __ jmp(&done);
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // applicand.apply.
- __ bind(&build_args);
- // Stack now has 3 elements, because we have jumped from where:
- // sp[0]: receiver
- // sp[1]: applicand.apply
- // sp[2]: applicand.
- StoreArgumentsObject(false);
-
- // Stack and frame now have 4 elements.
- slow.Bind();
-
- // Generic computation of x.apply(y, args) with no special optimization.
- // Flip applicand.apply and applicand on the stack, so
- // applicand looks like the receiver of the applicand.apply call.
- // Then process it as a normal function call.
- __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
-
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- frame_->CallStub(&call_function, 3);
- // The function and its two arguments have been dropped.
- frame_->Drop(); // Drop the receiver as well.
- frame_->EmitPush(r0);
- frame_->SpillAll(); // A spilled frame is also jumping to label done.
- // Stack now has 1 element:
- // sp[0]: result
- __ bind(&done);
-
- // Restore the context register after a call.
- __ ldr(cp, frame_->Context());
-}
-
-
-void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
- ASSERT(has_cc());
- Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
- target->Branch(cond);
- cc_reg_ = al;
-}
-
-
-void CodeGenerator::CheckStack() {
- frame_->SpillAll();
- Comment cmnt(masm_, "[ check stack");
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- masm_->cmp(sp, Operand(ip));
- StackCheckStub stub;
- // Call the stub if lower.
- masm_->mov(ip,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- masm_->Call(ip, lo);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
- Visit(statements->at(i));
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Block");
- CodeForStatementPosition(node);
- node->break_target()->SetExpectedHeight();
- VisitStatements(node->statements());
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(pairs));
- frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
- frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
- // The result is discarded.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = node->proxy()->var();
- ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->AsSlot();
-
- // If it was not possible to allocate the variable at compile time,
- // we need to "declare" it at runtime to make sure it actually
- // exists in the local context.
- if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Variables with a "LOOKUP" slot were introduced as non-locals
- // during variable resolution and must have mode DYNAMIC.
- ASSERT(var->is_dynamic());
- // For now, just do a runtime call.
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(var->name()));
- // Declaration nodes are always declared in only two modes.
- ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
- PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->EmitPush(Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (node->mode() == Variable::CONST) {
- frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
- } else if (node->fun() != NULL) {
- Load(node->fun());
- } else {
- frame_->EmitPush(Operand(0, RelocInfo::NONE));
- }
-
- frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
- // Ignore the return value (declarations are statements).
-
- ASSERT(frame_->height() == original_height);
- return;
- }
-
- ASSERT(!var->is_global());
-
- // If we have a function or a constant, we need to initialize the variable.
- Expression* val = NULL;
- if (node->mode() == Variable::CONST) {
- val = new Literal(Factory::the_hole_value());
- } else {
- val = node->fun(); // NULL if we don't have a function
- }
-
-
- if (val != NULL) {
- WriteBarrierCharacter wb_info =
- val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
- if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
- // Set initial value.
- Reference target(this, node->proxy());
- Load(val);
- target.SetValue(NOT_CONST_INIT, wb_info);
-
- // Get rid of the assigned value (declarations are statements).
- frame_->Drop();
- }
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- Load(expression);
- frame_->Drop();
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "// EmptyStatement");
- CodeForStatementPosition(node);
- // nothing to do
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ IfStatement");
- // Generate different code depending on which parts of the if statement
- // are present or not.
- bool has_then_stm = node->HasThenStatement();
- bool has_else_stm = node->HasElseStatement();
-
- CodeForStatementPosition(node);
-
- JumpTarget exit;
- if (has_then_stm && has_else_stm) {
- Comment cmnt(masm_, "[ IfThenElse");
- JumpTarget then;
- JumpTarget else_;
- // if (cond)
- LoadCondition(node->condition(), &then, &else_, true);
- if (frame_ != NULL) {
- Branch(false, &else_);
- }
- // then
- if (frame_ != NULL || then.is_linked()) {
- then.Bind();
- Visit(node->then_statement());
- }
- if (frame_ != NULL) {
- exit.Jump();
- }
- // else
- if (else_.is_linked()) {
- else_.Bind();
- Visit(node->else_statement());
- }
-
- } else if (has_then_stm) {
- Comment cmnt(masm_, "[ IfThen");
- ASSERT(!has_else_stm);
- JumpTarget then;
- // if (cond)
- LoadCondition(node->condition(), &then, &exit, true);
- if (frame_ != NULL) {
- Branch(false, &exit);
- }
- // then
- if (frame_ != NULL || then.is_linked()) {
- then.Bind();
- Visit(node->then_statement());
- }
-
- } else if (has_else_stm) {
- Comment cmnt(masm_, "[ IfElse");
- ASSERT(!has_then_stm);
- JumpTarget else_;
- // if (!cond)
- LoadCondition(node->condition(), &exit, &else_, true);
- if (frame_ != NULL) {
- Branch(true, &exit);
- }
- // else
- if (frame_ != NULL || else_.is_linked()) {
- else_.Bind();
- Visit(node->else_statement());
- }
-
- } else {
- Comment cmnt(masm_, "[ If");
- ASSERT(!has_then_stm && !has_else_stm);
- // if (cond)
- LoadCondition(node->condition(), &exit, &exit, false);
- if (frame_ != NULL) {
- if (has_cc()) {
- cc_reg_ = al;
- } else {
- frame_->Drop();
- }
- }
- }
-
- // end
- if (exit.is_linked()) {
- exit.Bind();
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- Comment cmnt(masm_, "[ ContinueStatement");
- CodeForStatementPosition(node);
- node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- Comment cmnt(masm_, "[ BreakStatement");
- CodeForStatementPosition(node);
- node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- Comment cmnt(masm_, "[ ReturnStatement");
-
- CodeForStatementPosition(node);
- Load(node->expression());
- frame_->PopToR0();
- frame_->PrepareForReturn();
- if (function_return_is_shadowed_) {
- function_return_.Jump();
- } else {
- // Pop the result from the frame and prepare the frame for
- // returning thus making it easier to merge.
- if (function_return_.is_bound()) {
- // If the function return label is already bound we reuse the
- // code by jumping to the return site.
- function_return_.Jump();
- } else {
- function_return_.Bind();
- GenerateReturnSequence();
- }
- }
-}
-
-
-void CodeGenerator::GenerateReturnSequence() {
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns the parameter as it is.
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
-
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- // Make sure that the constant pool is not emitted inside of the return
- // sequence.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Tear down the frame which will restore the caller's frame pointer and
- // the link register.
- frame_->Exit();
-
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
- masm_->add(sp, sp, Operand(sp_delta));
- masm_->Jump(lr);
- DeleteFrame();
-
-#ifdef DEBUG
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceInstructions <=
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ WithEnterStatement");
- CodeForStatementPosition(node);
- Load(node->expression());
- if (node->is_catch_block()) {
- frame_->CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- frame_->CallRuntime(Runtime::kPushContext, 1);
- }
-#ifdef DEBUG
- JumpTarget verified_true;
- __ cmp(r0, cp);
- verified_true.Branch(eq);
- __ stop("PushContext: r0 is expected to be the same as cp");
- verified_true.Bind();
-#endif
- // Update context local.
- __ str(cp, frame_->Context());
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ WithExitStatement");
- CodeForStatementPosition(node);
- // Pop context.
- __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
- // Update context local.
- __ str(cp, frame_->Context());
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ SwitchStatement");
- CodeForStatementPosition(node);
- node->break_target()->SetExpectedHeight();
-
- Load(node->tag());
-
- JumpTarget next_test;
- JumpTarget fall_through;
- JumpTarget default_entry;
- JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
- CaseClause* default_clause = NULL;
-
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
- if (clause->is_default()) {
- // Remember the default clause and compile it at the end.
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case clause");
- // Compile the test.
- next_test.Bind();
- next_test.Unuse();
- // Duplicate TOS.
- frame_->Dup();
- Comparison(eq, NULL, clause->label(), true);
- Branch(false, &next_test);
-
- // Before entering the body from the test, remove the switch value from
- // the stack.
- frame_->Drop();
-
- // Label the body so that fall through is enabled.
- if (i > 0 && cases->at(i - 1)->is_default()) {
- default_exit.Bind();
- } else {
- fall_through.Bind();
- fall_through.Unuse();
- }
- VisitStatements(clause->statements());
-
- // If control flow can fall through from the body, jump to the next body
- // or the end of the statement.
- if (frame_ != NULL) {
- if (i < length - 1 && cases->at(i + 1)->is_default()) {
- default_entry.Jump();
- } else {
- fall_through.Jump();
- }
- }
- }
-
- // The final "test" removes the switch value.
- next_test.Bind();
- frame_->Drop();
-
- // If there is a default clause, compile it.
- if (default_clause != NULL) {
- Comment cmnt(masm_, "[ Default clause");
- default_entry.Bind();
- VisitStatements(default_clause->statements());
- // If control flow can fall out of the default and there is a case after
- // it, jump to that case's body.
- if (frame_ != NULL && default_exit.is_bound()) {
- default_exit.Jump();
- }
- }
-
- if (fall_through.is_linked()) {
- fall_through.Bind();
- }
-
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ DoWhileStatement");
- CodeForStatementPosition(node);
- node->break_target()->SetExpectedHeight();
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- IncrementLoopNesting();
-
- // Label the top of the loop for the backward CFG edge. If the test
- // is always true we can use the continue target, and if the test is
- // always false there is no need.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- switch (info) {
- case ALWAYS_TRUE:
- node->continue_target()->SetExpectedHeight();
- node->continue_target()->Bind();
- break;
- case ALWAYS_FALSE:
- node->continue_target()->SetExpectedHeight();
- break;
- case DONT_KNOW:
- node->continue_target()->SetExpectedHeight();
- body.Bind();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Compile the test.
- switch (info) {
- case ALWAYS_TRUE:
- // If control can fall off the end of the body, jump back to the
- // top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- break;
- case ALWAYS_FALSE:
- // If we have a continue in the body, we only have to bind its
- // jump target.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- break;
- case DONT_KNOW:
- // We have to compile the test expression if it can be reached by
- // control flow falling out of the body or via continue.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- Comment cmnt(masm_, "[ DoWhileCondition");
- CodeForDoWhileConditionPosition(node);
- LoadCondition(node->cond(), &body, node->break_target(), true);
- if (has_valid_frame()) {
- // A invalid frame here indicates that control did not
- // fall out of the test expression.
- Branch(true, &body);
- }
- }
- break;
- }
-
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ WhileStatement");
- CodeForStatementPosition(node);
-
- // If the test is never true and has no side effects there is no need
- // to compile the test or body.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- node->break_target()->SetExpectedHeight();
- IncrementLoopNesting();
-
- // Label the top of the loop with the continue target for the backward
- // CFG edge.
- node->continue_target()->SetExpectedHeight();
- node->continue_target()->Bind();
-
- if (info == DONT_KNOW) {
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- LoadCondition(node->cond(), &body, node->break_target(), true);
- if (has_valid_frame()) {
- // A NULL frame indicates that control did not fall out of the
- // test expression.
- Branch(false, node->break_target());
- }
- if (has_valid_frame() || body.is_linked()) {
- body.Bind();
- }
- }
-
- if (has_valid_frame()) {
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // If control flow can fall out of the body, jump back to the top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ForStatement");
- CodeForStatementPosition(node);
- if (node->init() != NULL) {
- Visit(node->init());
- }
-
- // If the test is never true there is no need to compile the test or
- // body.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- node->break_target()->SetExpectedHeight();
- IncrementLoopNesting();
-
- // We know that the loop index is a smi if it is not modified in the
- // loop body and it is checked against a constant limit in the loop
- // condition. In this case, we reset the static type information of the
- // loop index to smi before compiling the body, the update expression, and
- // the bottom check of the loop condition.
- TypeInfoCodeGenState type_info_scope(this,
- node->is_fast_smi_loop() ?
- node->loop_variable()->AsSlot() :
- NULL,
- TypeInfo::Smi());
-
- // If there is no update statement, label the top of the loop with the
- // continue target, otherwise with the loop target.
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- if (node->next() == NULL) {
- node->continue_target()->SetExpectedHeight();
- node->continue_target()->Bind();
- } else {
- node->continue_target()->SetExpectedHeight();
- loop.Bind();
- }
-
- // If the test is always true, there is no need to compile it.
- if (info == DONT_KNOW) {
- JumpTarget body;
- LoadCondition(node->cond(), &body, node->break_target(), true);
- if (has_valid_frame()) {
- Branch(false, node->break_target());
- }
- if (has_valid_frame() || body.is_linked()) {
- body.Bind();
- }
- }
-
- if (has_valid_frame()) {
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- if (node->next() == NULL) {
- // If there is no update statement and control flow can fall out
- // of the loop, jump directly to the continue label.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- } else {
- // If there is an update statement and control flow can reach it
- // via falling out of the body of the loop or continuing, we
- // compile the update statement.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- // Record source position of the statement as this code which is
- // after the code for the body actually belongs to the loop
- // statement and not the body.
- CodeForStatementPosition(node);
- Visit(node->next());
- loop.Jump();
- }
- }
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ForInStatement");
- CodeForStatementPosition(node);
-
- JumpTarget primitive;
- JumpTarget jsobject;
- JumpTarget fixed_array;
- JumpTarget entry(JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check;
- JumpTarget exit;
-
- // Get the object to enumerate over (converted to JSObject).
- Load(node->enumerable());
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
- // Both SpiderMonkey and kjs ignore null and undefined in contrast
- // to the specification. 12.6.4 mandates a call to ToObject.
- frame_->EmitPop(r0);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- exit.Branch(eq);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
- exit.Branch(eq);
-
- // Stack layout in body:
- // [iteration counter (Smi)]
- // [length of array]
- // [FixedArray]
- // [Map or 0]
- // [Object]
-
- // Check if enumerable is already a JSObject
- __ tst(r0, Operand(kSmiTagMask));
- primitive.Branch(eq);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
- jsobject.Branch(hs);
-
- primitive.Bind();
- frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
-
- jsobject.Bind();
- // Get the set of properties (as a FixedArray or Map).
- // r0: value to be iterated over
- frame_->EmitPush(r0); // Push the object being iterated over.
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- JumpTarget call_runtime;
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- JumpTarget check_prototype;
- JumpTarget use_cache;
- __ mov(r1, Operand(r0));
- loop.Bind();
- // Check that there are no elements.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
- __ cmp(r2, r4);
- call_runtime.Branch(ne);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in r3 for the subsequent
- // prototype load.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
- __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
- __ cmp(r2, ip);
- call_runtime.Branch(eq);
- // Check that there in an enum cache in the non-empty instance
- // descriptors. This is the case if the next enumeration index
- // field does not contain a smi.
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
- __ tst(r2, Operand(kSmiTagMask));
- call_runtime.Branch(eq);
- // For all objects but the receiver, check that the cache is empty.
- // r4: empty fixed array root.
- __ cmp(r1, r0);
- check_prototype.Branch(eq);
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(r2, r4);
- call_runtime.Branch(ne);
- check_prototype.Bind();
- // Load the prototype from the map and loop if non-null.
- __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r1, ip);
- loop.Branch(ne);
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- use_cache.Jump();
-
- call_runtime.Bind();
- // Call the runtime to get the property names for the object.
- frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
- frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- // r0: map or fixed array (result from call to
- // Runtime::kGetPropertyNamesFast)
- __ mov(r2, Operand(r0));
- __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r1, ip);
- fixed_array.Branch(ne);
-
- use_cache.Bind();
- // Get enum cache
- // r0: map (either the result from a call to
- // Runtime::kGetPropertyNamesFast or has been fetched directly from
- // the object)
- __ mov(r1, Operand(r0));
- __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
- __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
- __ ldr(r2,
- FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- frame_->EmitPush(r0); // map
- frame_->EmitPush(r2); // enum cache bridge cache
- __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
- frame_->EmitPush(r0);
- __ mov(r0, Operand(Smi::FromInt(0)));
- frame_->EmitPush(r0);
- entry.Jump();
-
- fixed_array.Bind();
- __ mov(r1, Operand(Smi::FromInt(0)));
- frame_->EmitPush(r1); // insert 0 in place of Map
- frame_->EmitPush(r0);
-
- // Push the length of the array and the initial index onto the stack.
- __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
- frame_->EmitPush(r0);
- __ mov(r0, Operand(Smi::FromInt(0))); // init index
- frame_->EmitPush(r0);
-
- // Condition.
- entry.Bind();
- // sp[0] : index
- // sp[1] : array/enum cache length
- // sp[2] : array or enum cache
- // sp[3] : 0 or map
- // sp[4] : enumerable
- // Grab the current frame's height for the break and continue
- // targets only after all the state is pushed on the frame.
- node->break_target()->SetExpectedHeight();
- node->continue_target()->SetExpectedHeight();
-
- // Load the current count to r0, load the length to r1.
- __ Ldrd(r0, r1, frame_->ElementAt(0));
- __ cmp(r0, r1); // compare to the array length
- node->break_target()->Branch(hs);
-
- // Get the i'th entry of the array.
- __ ldr(r2, frame_->ElementAt(2));
- __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Get Map or 0.
- __ ldr(r2, frame_->ElementAt(3));
- // Check if this (still) matches the map of the enumerable.
- // If not, we have to filter the key.
- __ ldr(r1, frame_->ElementAt(4));
- __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r1, Operand(r2));
- end_del_check.Branch(eq);
-
- // Convert the entry to a string (or null if it isn't a property anymore).
- __ ldr(r0, frame_->ElementAt(4)); // push enumerable
- frame_->EmitPush(r0);
- frame_->EmitPush(r3); // push entry
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
- __ mov(r3, Operand(r0), SetCC);
- // If the property has been removed while iterating, we just skip it.
- node->continue_target()->Branch(eq);
-
- end_del_check.Bind();
- // Store the entry in the 'each' expression and take another spin in the
- // loop. r3: i'th entry of the enum cache (or string there of)
- frame_->EmitPush(r3); // push entry
- { VirtualFrame::RegisterAllocationScope scope(this);
- Reference each(this, node->each());
- if (!each.is_illegal()) {
- if (each.size() > 0) {
- // Loading a reference may leave the frame in an unspilled state.
- frame_->SpillAll(); // Sync stack to memory.
- // Get the value (under the reference on the stack) from memory.
- __ ldr(r0, frame_->ElementAt(each.size()));
- frame_->EmitPush(r0);
- each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
- frame_->Drop(2); // The result of the set and the extra pushed value.
- } else {
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
- frame_->Drop(1); // Drop the result of the set operation.
- }
- }
- }
- // Body.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- { VirtualFrame::RegisterAllocationScope scope(this);
- Visit(node->body());
- }
-
- // Next. Reestablish a spilled frame in case we are coming here via
- // a continue in the body.
- node->continue_target()->Bind();
- frame_->SpillAll();
- frame_->EmitPop(r0);
- __ add(r0, r0, Operand(Smi::FromInt(1)));
- frame_->EmitPush(r0);
- entry.Jump();
-
- // Cleanup. No need to spill because VirtualFrame::Drop is safe for
- // any frame.
- node->break_target()->Bind();
- frame_->Drop(5);
-
- // Exit.
- exit.Bind();
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
- Comment cmnt(masm_, "[ TryCatchStatement");
- CodeForStatementPosition(node);
-
- JumpTarget try_block;
- JumpTarget exit;
-
- try_block.Call();
- // --- Catch block ---
- frame_->EmitPush(r0);
-
- // Store the caught exception in the catch variable.
- Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
- StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
- // Remove the exception from the stack.
- frame_->Drop();
-
- { VirtualFrame::RegisterAllocationScope scope(this);
- VisitStatements(node->catch_block()->statements());
- }
- if (frame_ != NULL) {
- exit.Jump();
- }
-
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_CATCH_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the labels for all escapes from the try block, including
- // returns. During shadowing, the original label is hidden as the
- // LabelShadow and operations on the original actually affect the
- // shadowing label.
- //
- // We should probably try to unify the escaping labels and the return
- // label.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- { VirtualFrame::RegisterAllocationScope scope(this);
- VisitStatements(node->try_block()->statements());
- }
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original labels are unshadowed and the
- // LabelShadows represent the formerly shadowing labels.
- bool has_unlinks = false;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- has_unlinks = has_unlinks || shadows[i]->is_linked();
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
-
- // If we can fall off the end of the try block, unlink from try chain.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame. Unlink from
- // the handler list and drop the rest of this handler from the
- // frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1); // r0 can contain the return value.
- __ mov(r3, Operand(handler_address));
- __ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- if (has_unlinks) {
- exit.Jump();
- }
- }
-
- // Generate unlink code for the (formerly) shadowing labels that have been
- // jumped to. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // Unlink from try chain;
- shadows[i]->Bind();
- // Because we can be jumping here (to spilled code) from unspilled
- // code, we need to reestablish a spilled frame at this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that we
- // break from (eg, for...in) may have left stuff on the stack.
- __ mov(r3, Operand(handler_address));
- __ ldr(sp, MemOperand(r3));
- frame_->Forget(frame_->height() - handler_height);
-
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1); // r0 can contain the return value.
- __ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
- frame_->PrepareForReturn();
- }
- shadows[i]->other_target()->Jump();
- }
- }
-
- exit.Bind();
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
- Comment cmnt(masm_, "[ TryFinallyStatement");
- CodeForStatementPosition(node);
-
- // State: Used to keep track of reason for entering the finally
- // block. Should probably be extended to hold information for
- // break/continue from within the try block.
- enum { FALLING, THROWING, JUMPING };
-
- JumpTarget try_block;
- JumpTarget finally_block;
-
- try_block.Call();
-
- frame_->EmitPush(r0); // save exception object on the stack
- // In case of thrown exceptions, this is where we continue.
- __ mov(r2, Operand(Smi::FromInt(THROWING)));
- finally_block.Jump();
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_FINALLY_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the labels for all escapes from the try block, including
- // returns. Shadowing hides the original label as the LabelShadow and
- // operations on the original actually affect the shadowing label.
- //
- // We should probably try to unify the escaping labels and the return
- // label.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- { VirtualFrame::RegisterAllocationScope scope(this);
- VisitStatements(node->try_block()->statements());
- }
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original labels are unshadowed and the
- // LabelShadows represent the formerly shadowing labels.
- int nof_unlinks = 0;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- if (shadows[i]->is_linked()) nof_unlinks++;
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
-
- // If we can fall off the end of the try block, unlink from the try
- // chain and set the state on the frame to FALLING.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1);
- __ mov(r3, Operand(handler_address));
- __ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- // Fake a top of stack value (unneeded when FALLING) and set the
- // state in r2, then jump around the unlink blocks if any.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r0);
- __ mov(r2, Operand(Smi::FromInt(FALLING)));
- if (nof_unlinks > 0) {
- finally_block.Jump();
- }
- }
-
- // Generate code to unlink and set the state for the (formerly)
- // shadowing targets that have been jumped to.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // If we have come from the shadowed return, the return value is
- // in (a non-refcounted reference to) r0. We must preserve it
- // until it is pushed.
- //
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- shadows[i]->Bind();
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that
- // we break from (eg, for...in) may have left stuff on the
- // stack.
- __ mov(r3, Operand(handler_address));
- __ ldr(sp, MemOperand(r3));
- frame_->Forget(frame_->height() - handler_height);
-
- // Unlink this handler and drop it from the frame. The next
- // handler address is currently on top of the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1);
- __ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- // If this label shadowed the function return, materialize the
- // return value on the stack.
- frame_->EmitPush(r0);
- } else {
- // Fake TOS for targets that shadowed breaks and continues.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r0);
- }
- __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
- if (--nof_unlinks > 0) {
- // If this is not the last unlink block, jump around the next.
- finally_block.Jump();
- }
- }
- }
-
- // --- Finally block ---
- finally_block.Bind();
-
- // Push the state on the stack.
- frame_->EmitPush(r2);
-
- // We keep two elements on the stack - the (possibly faked) result
- // and the state - while evaluating the finally block.
- //
- // Generate code for the statements in the finally block.
- { VirtualFrame::RegisterAllocationScope scope(this);
- VisitStatements(node->finally_block()->statements());
- }
-
- if (has_valid_frame()) {
- // Restore state and return value or faked TOS.
- frame_->EmitPop(r2);
- frame_->EmitPop(r0);
- }
-
- // Generate code to jump to the right destination for all used
- // formerly shadowing targets. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (has_valid_frame() && shadows[i]->is_bound()) {
- JumpTarget* original = shadows[i]->other_target();
- __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
- if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
- JumpTarget skip;
- skip.Branch(ne);
- frame_->PrepareForReturn();
- original->Jump();
- skip.Bind();
- } else {
- original->Branch(eq);
- }
- }
- }
-
- if (has_valid_frame()) {
- // Check if we need to rethrow the exception.
- JumpTarget exit;
- __ cmp(r2, Operand(Smi::FromInt(THROWING)));
- exit.Branch(ne);
-
- // Rethrow exception.
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kReThrow, 1);
-
- // Done.
- exit.Bind();
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ DebuggerStatament");
- CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- frame_->DebugBreak();
-#endif
- // Ignore the return value.
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::InstantiateFunction(
- Handle<SharedFunctionInfo> function_info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (scope()->is_function_scope() &&
- function_info->num_literals() == 0 &&
- !pretenure) {
- FastNewClosureStub stub;
- frame_->EmitPush(Operand(function_info));
- frame_->SpillAll();
- frame_->CallStub(&stub, 1);
- frame_->EmitPush(r0);
- } else {
- // Create a new closure.
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(function_info));
- frame_->EmitPush(Operand(pretenure
- ? Factory::true_value()
- : Factory::false_value()));
- frame_->CallRuntime(Runtime::kNewClosure, 3);
- frame_->EmitPush(r0);
- }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function info and instantiate it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(node, script());
- if (function_info.is_null()) {
- SetStackOverflow();
- ASSERT(frame_->height() == original_height);
- return;
- }
- InstantiateFunction(function_info, node->pretenure());
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- InstantiateFunction(node->shared_function_info(), false);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Conditional");
- JumpTarget then;
- JumpTarget else_;
- LoadCondition(node->condition(), &then, &else_, true);
- if (has_valid_frame()) {
- Branch(false, &else_);
- }
- if (has_valid_frame() || then.is_linked()) {
- then.Bind();
- Load(node->then_expression());
- }
- if (else_.is_linked()) {
- JumpTarget exit;
- if (has_valid_frame()) exit.Jump();
- else_.Bind();
- Load(node->else_expression());
- if (exit.is_linked()) exit.Bind();
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // JumpTargets do not yet support merging frames so the frame must be
- // spilled when jumping to these targets.
- JumpTarget slow;
- JumpTarget done;
-
- // Generate fast case for loading from slots that correspond to
- // local/global variables or arguments unless they are shadowed by
- // eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(slot,
- typeof_state,
- &slow,
- &done);
-
- slow.Bind();
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(slot->var()->name()));
-
- if (typeof_state == INSIDE_TYPEOF) {
- frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- } else {
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- }
-
- done.Bind();
- frame_->EmitPush(r0);
-
- } else {
- Register scratch = VirtualFrame::scratch0();
- TypeInfo info = type_info(slot);
- frame_->EmitPush(SlotOperand(slot, scratch), info);
-
- if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- Comment cmnt(masm_, "[ Unhole const");
- Register tos = frame_->PopToRegister();
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(tos, ip);
- __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
- frame_->EmitPush(tos);
- }
- }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- VirtualFrame::RegisterAllocationScope scope(this);
- LoadFromSlot(slot, state);
-
- // Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
- // ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
- // Load the loaded value from the stack into a register but leave it on the
- // stack.
- Register tos = frame_->Peek();
-
- // If the loaded value is the sentinel that indicates that we
- // haven't loaded the arguments object yet, we need to do it now.
- JumpTarget exit;
- __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
- __ cmp(tos, ip);
- exit.Branch(ne);
- frame_->Drop();
- StoreArgumentsObject(false);
- exit.Bind();
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- ASSERT(slot != NULL);
- VirtualFrame::RegisterAllocationScope scope(this);
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call.
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(slot->var()->name()));
-
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize
- // const properties (introduced via eval("const foo = (some
- // expr);")). Also, uses the current function context instead of
- // the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the
- // same time, because the const declaration may be at the end of
- // the eval code (sigh...) and the const variable may have been
- // used before (where its value is 'undefined'). Thus, we can only
- // do the initialization when we actually encounter the expression
- // and when the expression operands are defined and valid, and
- // thus we need the split into 2 operations: declaration of the
- // context slot followed by initialization.
- frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
- frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling assignment expressions.
- frame_->EmitPush(r0);
-
- } else {
- ASSERT(!slot->var()->is_dynamic());
- Register scratch = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- // The frame must be spilled when branching to this target.
- JumpTarget exit;
-
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is
- // executed, the code is identical to a normal store (see below).
- Comment cmnt(masm_, "[ Init const");
- __ ldr(scratch, SlotOperand(slot, scratch));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- exit.Branch(ne);
- }
-
- // We must execute the store. Storing a variable must keep the
- // (new) value on the stack. This is necessary for compiling
- // assignment expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will
- // initialize consts to 'the hole' value and by doing so, end up
- // calling this code. r2 may be loaded with context; used below in
- // RecordWrite.
- Register tos = frame_->Peek();
- __ str(tos, SlotOperand(slot, scratch));
- if (slot->type() == Slot::CONTEXT) {
- // Skip write barrier if the written value is a smi.
- __ tst(tos, Operand(kSmiTagMask));
- // We don't use tos any more after here.
- exit.Branch(eq);
- // scratch is loaded with context when calling SlotOperand above.
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- // We need an extra register. Until we have a way to do that in the
- // virtual frame we will cheat and ask for a free TOS register.
- Register scratch3 = frame_->GetTOSRegister();
- __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
- }
- // If we definitely did not jump over the assignment, we do not need
- // to bind the exit label. Doing so can defeat peephole
- // optimization.
- if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
- exit.Bind();
- }
- }
-}
-
-
-void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- // Check that no extension objects have been created by calls to
- // eval from the current scope to the global scope.
- Register tmp = frame_->scratch0();
- Register tmp2 = frame_->scratch1();
- Register context = cp;
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- frame_->SpillAll();
- // Check that extension is NULL.
- __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(tmp2, tmp2);
- slow->Branch(ne);
- }
- // Load next context in chain.
- __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- frame_->SpillAll();
- Label next, fast;
- __ Move(tmp, context);
- __ bind(&next);
- // Terminate at global context.
- __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
- __ cmp(tmp2, ip);
- __ b(eq, &fast);
- // Check that extension is NULL.
- __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
- __ tst(tmp2, tmp2);
- slow->Branch(ne);
- // Load next context in chain.
- __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- __ b(&next);
- __ bind(&fast);
- }
-
- // Load the global object.
- LoadGlobal();
- // Setup the name register and call load IC.
- frame_->CallLoadIC(slot->var()->name(),
- typeof_state == INSIDE_TYPEOF
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT);
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow,
- JumpTarget* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
- frame_->SpillAll();
- done->Jump();
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- frame_->SpillAll();
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- __ ldr(r0,
- ContextSlotOperandCheckExtensions(potential_slot,
- r1,
- r2,
- slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- }
- done->Jump();
- } else if (rewrite != NULL) {
- // Generate fast case for argument loads.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- __ ldr(r0,
- ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
- r1,
- r2,
- slow));
- frame_->EmitPush(r0);
- __ mov(r1, Operand(key_literal->handle()));
- frame_->EmitPush(r1);
- EmitKeyedLoad();
- done->Jump();
- }
- }
- }
- }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Slot");
- LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ VariableProxy");
-
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- Reference ref(this, node);
- ref.GetValue();
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Literal");
- Register reg = frame_->GetTOSRegister();
- bool is_smi = node->handle()->IsSmi();
- __ mov(reg, Operand(node->handle()));
- frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ RexExp Literal");
-
- Register tmp = VirtualFrame::scratch0();
- // Free up a TOS register that can be used to push the literal.
- Register literal = frame_->GetTOSRegister();
-
- // Retrieve the literal array and check the allocated entry.
-
- // Load the function of this activation.
- __ ldr(tmp, frame_->Function());
-
- // Load the literals array of the function.
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ ldr(literal, FieldMemOperand(tmp, literal_offset));
-
- JumpTarget materialized;
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(literal, ip);
- // This branch locks the virtual frame at the done label to match the
- // one we have here, where the literal register is not on the stack and
- // nothing is spilled.
- materialized.Branch(ne);
-
- // If the entry is undefined we call the runtime system to compute
- // the literal.
- // literal array (0)
- frame_->EmitPush(tmp);
- // literal index (1)
- frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
- // RegExp pattern (2)
- frame_->EmitPush(Operand(node->pattern()));
- // RegExp flags (3)
- frame_->EmitPush(Operand(node->flags()));
- frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ Move(literal, r0);
-
- materialized.Bind();
-
- frame_->EmitPush(literal);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- frame_->EmitPush(Operand(Smi::FromInt(size)));
- frame_->CallRuntime(Runtime::kAllocateInNewSpace, 1);
- // TODO(lrn): Use AllocateInNewSpace macro with fallback to runtime.
- // r0 is newly allocated space.
-
- // Reuse literal variable with (possibly) a new register, still holding
- // the materialized boilerplate.
- literal = frame_->PopToRegister(r0);
-
- __ CopyFields(r0, literal, tmp.bit(), size / kPointerSize);
-
- // Push the clone.
- frame_->EmitPush(r0);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- Register literal = frame_->GetTOSRegister();
- // Load the function of this activation.
- __ ldr(literal, frame_->Function());
- // Literal array.
- __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
- frame_->EmitPush(literal);
- // Literal index.
- frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
- // Constant properties.
- frame_->EmitPush(Operand(node->constant_properties()));
- // Should the object literal have fast elements?
- frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
- if (node->depth() > 1) {
- frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
- frame_->EmitPush(r0); // save the result
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- node->CalculateEmitStore();
-
- for (int i = 0; i < node->properties()->length(); i++) {
- // At the start of each iteration, the top of stack contains
- // the newly created object literal.
- ObjectLiteral::Property* property = node->properties()->at(i);
- Literal* key = property->key();
- Expression* value = property->value();
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
- // else fall through
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- Load(value);
- if (property->emit_store()) {
- frame_->PopToR0();
- // Fetch the object literal.
- frame_->SpillAllButCopyTOSToR1();
- __ mov(r2, Operand(key->handle()));
- frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
- } else {
- frame_->Drop();
- }
- break;
- }
- // else fall through
- case ObjectLiteral::Property::PROTOTYPE: {
- frame_->Dup();
- Load(key);
- Load(value);
- if (property->emit_store()) {
- frame_->EmitPush(Operand(Smi::FromInt(NONE))); // PropertyAttributes
- frame_->CallRuntime(Runtime::kSetProperty, 4);
- } else {
- frame_->Drop(3);
- }
- break;
- }
- case ObjectLiteral::Property::SETTER: {
- frame_->Dup();
- Load(key);
- frame_->EmitPush(Operand(Smi::FromInt(1)));
- Load(value);
- frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- break;
- }
- case ObjectLiteral::Property::GETTER: {
- frame_->Dup();
- Load(key);
- frame_->EmitPush(Operand(Smi::FromInt(0)));
- Load(value);
- frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- break;
- }
- }
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- Register tos = frame_->GetTOSRegister();
- // Load the function of this activation.
- __ ldr(tos, frame_->Function());
- // Load the literals array of the function.
- __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
- frame_->EmitPush(tos);
- frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
- frame_->EmitPush(Operand(node->constant_elements()));
- int length = node->values()->length();
- if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- frame_->CallStub(&stub, 3);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2);
- } else if (node->depth() > 1) {
- frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
- frame_->CallStub(&stub, 3);
- }
- frame_->EmitPush(r0); // save the result
- // r0: created object literal
-
- // Generate code to set the elements in the array that are not
- // literals.
- for (int i = 0; i < node->values()->length(); i++) {
- Expression* value = node->values()->at(i);
-
- // If value is a literal the property value is already set in the
- // boilerplate object.
- if (value->AsLiteral() != NULL) continue;
- // If value is a materialized literal the property value is already set
- // in the boilerplate object if it is simple.
- if (CompileTimeValue::IsCompileTimeValue(value)) continue;
-
- // The property must be set by generated code.
- Load(value);
- frame_->PopToR0();
- // Fetch the object literal.
- frame_->SpillAllButCopyTOSToR1();
-
- // Get the elements array.
- __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
-
- // Write to the indexed properties array.
- int offset = i * kPointerSize + FixedArray::kHeaderSize;
- __ str(r0, FieldMemOperand(r1, offset));
-
- // Update the write barrier for the array address.
- __ RecordWrite(r1, Operand(offset), r3, r2);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- Load(node->key());
- Load(node->value());
- frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->EmitPush(r0);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm(), "[ Variable Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL);
- Slot* slot = var->AsSlot();
- ASSERT(slot != NULL);
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-
- // Perform the binary operation.
- Literal* literal = node->value()->AsLiteral();
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- if (literal != NULL && literal->handle()->IsSmi()) {
- SmiOperation(node->binary_op(),
- literal->handle(),
- false,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- GenerateInlineSmi inline_smi =
- loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
- if (literal != NULL) {
- ASSERT(!literal->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- Load(node->value());
- GenericBinaryOperation(node->binary_op(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
- inline_smi);
- }
- } else {
- Load(node->value());
- }
-
- // Perform the assignment.
- if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
- CodeForSourcePosition(node->position());
- StoreToSlot(slot,
- node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm(), "[ Named Property Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
- ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
- // Initialize name and evaluate the receiver sub-expression if necessary. If
- // the receiver is trivial it is not placed on the stack at this point, but
- // loaded whenever actually needed.
- Handle<String> name;
- bool is_trivial_receiver = false;
- if (var != NULL) {
- name = var->name();
- } else {
- Literal* lit = prop->key()->AsLiteral();
- ASSERT_NOT_NULL(lit);
- name = Handle<String>::cast(lit->handle());
- // Do not materialize the receiver on the frame if it is trivial.
- is_trivial_receiver = prop->obj()->IsTrivial();
- if (!is_trivial_receiver) Load(prop->obj());
- }
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- // Initialization block consists of assignments of the form expr.x = ..., so
- // this will never be an assignment to a variable, so there must be a
- // receiver object.
- ASSERT_EQ(NULL, var);
- if (is_trivial_receiver) {
- Load(prop->obj());
- } else {
- frame_->Dup();
- }
- frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block() && !is_trivial_receiver) {
- frame_->Dup();
- }
-
- // Stack layout:
- // [tos] : receiver (only materialized if non-trivial)
- // [tos+1] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- if (is_trivial_receiver) {
- Load(prop->obj());
- } else if (var != NULL) {
- LoadGlobal();
- } else {
- frame_->Dup();
- }
- EmitNamedLoad(name, var != NULL);
-
- // Perform the binary operation.
- Literal* literal = node->value()->AsLiteral();
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- if (literal != NULL && literal->handle()->IsSmi()) {
- SmiOperation(node->binary_op(),
- literal->handle(),
- false,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- GenerateInlineSmi inline_smi =
- loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
- if (literal != NULL) {
- ASSERT(!literal->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- Load(node->value());
- GenericBinaryOperation(node->binary_op(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
- inline_smi);
- }
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : receiver (only materialized if non-trivial)
- // [tos+2] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(var == NULL || var->mode() != Variable::CONST);
- ASSERT_NE(Token::INIT_CONST, node->op());
- if (is_trivial_receiver) {
- // Load the receiver and swap with the value.
- Load(prop->obj());
- Register t0 = frame_->PopToRegister();
- Register t1 = frame_->PopToRegister(t0);
- frame_->EmitPush(t0);
- frame_->EmitPush(t1);
- }
- CodeForSourcePosition(node->position());
- bool is_contextual = (var != NULL);
- EmitNamedStore(name, is_contextual);
- frame_->EmitPush(r0);
-
- // Change to fast case at the end of an initialization block.
- if (node->ends_initialization_block()) {
- ASSERT_EQ(NULL, var);
- // The argument to the runtime call is the receiver.
- if (is_trivial_receiver) {
- Load(prop->obj());
- } else {
- // A copy of the receiver is below the value of the assignment. Swap
- // the receiver and the value of the assignment expression.
- Register t0 = frame_->PopToRegister();
- Register t1 = frame_->PopToRegister(t0);
- frame_->EmitPush(t0);
- frame_->EmitPush(t1);
- }
- frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Keyed Property Assignment");
- Property* prop = node->target()->AsProperty();
- ASSERT_NOT_NULL(prop);
-
- // Evaluate the receiver subexpression.
- Load(prop->obj());
-
- WriteBarrierCharacter wb_info;
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- frame_->Dup();
- frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block()) {
- frame_->Dup();
- }
-
- // Evaluate the key subexpression.
- Load(prop->key());
-
- // Stack layout:
- // [tos] : key
- // [tos+1] : receiver
- // [tos+2] : receiver if at the end of an initialization block
- //
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- // Duplicate receiver and key for loading the current property value.
- frame_->Dup2();
- EmitKeyedLoad();
- frame_->EmitPush(r0);
-
- // Perform the binary operation.
- Literal* literal = node->value()->AsLiteral();
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- if (literal != NULL && literal->handle()->IsSmi()) {
- SmiOperation(node->binary_op(),
- literal->handle(),
- false,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- GenerateInlineSmi inline_smi =
- loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
- if (literal != NULL) {
- ASSERT(!literal->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- Load(node->value());
- GenericBinaryOperation(node->binary_op(),
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
- inline_smi);
- }
- wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- wb_info = node->value()->AsLiteral() != NULL ?
- NEVER_NEWSPACE :
- (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : key
- // [tos+2] : receiver
- // [tos+3] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(node->op() != Token::INIT_CONST);
- CodeForSourcePosition(node->position());
- EmitKeyedStore(prop->key()->type(), wb_info);
- frame_->EmitPush(r0);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- // Change to fast case at the end of an initialization block.
- if (node->ends_initialization_block()) {
- // The argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment. Swap the receiver and
- // the value of the assignment expression.
- Register t0 = frame_->PopToRegister();
- Register t1 = frame_->PopToRegister(t0);
- frame_->EmitPush(t1);
- frame_->EmitPush(t0);
- frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
- VirtualFrame::RegisterAllocationScope scope(this);
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Assignment");
-
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
-
- if (var != NULL && !var->is_global()) {
- EmitSlotAssignment(node);
-
- } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
- (var != NULL && var->is_global())) {
- // Properties whose keys are property names and global variables are
- // treated as named property references. We do not need to consider
- // global 'this' because it is not a valid left-hand side.
- EmitNamedPropertyAssignment(node);
-
- } else if (prop != NULL) {
- // Other properties (including rewritten parameters for a function that
- // uses arguments) are keyed property assignments.
- EmitKeyedPropertyAssignment(node);
-
- } else {
- // Invalid left-hand side.
- Load(node->target());
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- // The runtime call doesn't actually return but the code generator will
- // still generate code and expects a certain frame height.
- frame_->EmitPush(r0);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Throw");
-
- Load(node->exception());
- CodeForSourcePosition(node->position());
- frame_->CallRuntime(Runtime::kThrow, 1);
- frame_->EmitPush(r0);
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Property");
-
- { Reference property(this, node);
- property.GetValue();
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ Call");
-
- Expression* function = node->expression();
- ZoneList<Expression*>* args = node->arguments();
-
- // Standard function call.
- // Check if the function is a variable or a property.
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
-
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
-
- if (var != NULL && var->is_possibly_eval()) {
- // ----------------------------------
- // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
- // ----------------------------------
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
-
- // Prepare stack for call to resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- JumpTarget done;
- if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
- JumpTarget slow;
- // Prepare the stack for the call to
- // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
- // function, the first argument to the eval call and the
- // receiver.
- LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- frame_->EmitPush(r0);
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- frame_->EmitPush(r1);
- } else {
- frame_->EmitPush(r2);
- }
- __ ldr(r1, frame_->Receiver());
- frame_->EmitPush(r1);
-
- // Push the strict mode flag.
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
- done.Jump();
- slow.Bind();
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval by
- // pushing the loaded function, the first argument to the eval
- // call and the receiver.
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
- frame_->EmitPush(r1);
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- frame_->EmitPush(r1);
- } else {
- frame_->EmitPush(r2);
- }
- __ ldr(r1, frame_->Receiver());
- frame_->EmitPush(r1);
-
- // Push the strict mode flag.
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
- // Resolve the call.
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
- // If we generated fast-case code bind the jump-target where fast
- // and slow case merge.
- if (done.is_linked()) done.Bind();
-
- // Touch up stack with the right values for the function and the receiver.
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ str(r1, MemOperand(sp, arg_count * kPointerSize));
-
- // Call the function.
- CodeForSourcePosition(node->position());
-
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- frame_->CallStub(&call_function, arg_count + 1);
-
- __ ldr(cp, frame_->Context());
- // Remove the function from the stack.
- frame_->Drop();
- frame_->EmitPush(r0);
-
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
- // Setup the name register and call the IC initialization code.
- __ mov(r2, Operand(var->name()));
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub = StubCache::ComputeCallInitialize(arg_count, in_loop);
- CodeForSourcePosition(node->position());
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
- arg_count + 1);
- __ ldr(cp, frame_->Context());
- frame_->EmitPush(r0);
-
- } else if (var != NULL && var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::LOOKUP) {
- // ----------------------------------
- // JavaScript examples:
- //
- // with (obj) foo(1, 2, 3) // foo may be in obj.
- //
- // function f() {};
- // function g() {
- // eval(...);
- // f(); // f could be in extension object.
- // }
- // ----------------------------------
-
- JumpTarget slow, done;
-
- // Generate fast case for loading functions from slots that
- // correspond to local/global variables or arguments unless they
- // are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow,
- &done);
-
- slow.Bind();
- // Load the function
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(var->name()));
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- // r0: slot value; r1: receiver
-
- // Load the receiver.
- frame_->EmitPush(r0); // function
- frame_->EmitPush(r1); // receiver
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- JumpTarget call;
- call.Jump();
- done.Bind();
- frame_->EmitPush(r0); // function
- LoadGlobalReceiver(VirtualFrame::scratch0()); // receiver
- call.Bind();
- }
-
- // Call the function. At this point, everything is spilled but the
- // function and receiver are in r0 and r1.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
- frame_->EmitPush(r0);
-
- } else if (property != NULL) {
- // Check if the key is a literal string.
- Literal* literal = property->key()->AsLiteral();
-
- if (literal != NULL && literal->handle()->IsSymbol()) {
- // ------------------------------------------------------------------
- // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
- // ------------------------------------------------------------------
-
- Handle<String> name = Handle<String>::cast(literal->handle());
-
- if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
- name->IsEqualTo(CStrVector("apply")) &&
- args->length() == 2 &&
- args->at(1)->AsVariableProxy() != NULL &&
- args->at(1)->AsVariableProxy()->IsArguments()) {
- // Use the optimized Function.prototype.apply that avoids
- // allocating lazily allocated arguments objects.
- CallApplyLazy(property->obj(),
- args->at(0),
- args->at(1)->AsVariableProxy(),
- node->position());
-
- } else {
- Load(property->obj()); // Receiver.
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
- // Set the name register and call the IC initialization code.
- __ mov(r2, Operand(name));
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub =
- StubCache::ComputeCallInitialize(arg_count, in_loop);
- CodeForSourcePosition(node->position());
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
- __ ldr(cp, frame_->Context());
- frame_->EmitPush(r0);
- }
-
- } else {
- // -------------------------------------------
- // JavaScript example: 'array[index](1, 2, 3)'
- // -------------------------------------------
-
- // Load the receiver and name of the function.
- Load(property->obj());
- Load(property->key());
-
- if (property->is_synthetic()) {
- EmitKeyedLoad();
- // Put the function below the receiver.
- // Use the global receiver.
- frame_->EmitPush(r0); // Function.
- LoadGlobalReceiver(VirtualFrame::scratch0());
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- frame_->EmitPush(r0);
- } else {
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- Register key = frame_->PopToRegister();
- Register receiver = frame_->PopToRegister(key);
- frame_->EmitPush(key);
- frame_->EmitPush(receiver);
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Load the key into r2 and call the IC initialization code.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub =
- StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
- CodeForSourcePosition(node->position());
- frame_->SpillAll();
- __ ldr(r2, frame_->ElementAt(arg_count + 1));
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
- frame_->Drop(); // Drop the key still on the stack.
- __ ldr(cp, frame_->Context());
- frame_->EmitPush(r0);
- }
- }
-
- } else {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is not global
- // ----------------------------------
-
- // Load the function.
- Load(function);
-
- // Pass the global proxy as the receiver.
- LoadGlobalReceiver(VirtualFrame::scratch0());
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
- frame_->EmitPush(r0);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ CallNew");
-
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments. This is different from ordinary calls, where the
- // actual function to call is resolved after the arguments have been
- // evaluated.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- Load(node->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = node->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Spill everything from here to simplify the implementation.
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- // Load the argument count into r0 and the function into r1 as per
- // calling convention.
- __ mov(r0, Operand(arg_count));
- __ ldr(r1, frame_->ElementAt(arg_count));
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- CodeForSourcePosition(node->position());
- Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
- frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
- frame_->EmitPush(r0);
-
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- Register scratch = VirtualFrame::scratch0();
- JumpTarget null, function, leave, non_function_constructor;
-
- // Load the object into register.
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register tos = frame_->PopToRegister();
-
- // If the object is a smi, we return null.
- __ tst(tos, Operand(kSmiTagMask));
- null.Branch(eq);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- __ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE);
- null.Branch(lt);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ cmp(scratch, Operand(JS_FUNCTION_TYPE));
- function.Branch(eq);
-
- // Check if the constructor in the map is a function.
- __ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset));
- __ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE);
- non_function_constructor.Branch(ne);
-
- // The tos register now contains the constructor function. Grab the
- // instance class name from there.
- __ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(tos,
- FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->EmitPush(tos);
- leave.Jump();
-
- // Functions have class 'Function'.
- function.Bind();
- __ mov(tos, Operand(Factory::function_class_symbol()));
- frame_->EmitPush(tos);
- leave.Jump();
-
- // Objects with a non-function constructor have class 'Object'.
- non_function_constructor.Bind();
- __ mov(tos, Operand(Factory::Object_symbol()));
- frame_->EmitPush(tos);
- leave.Jump();
-
- // Non-JS objects have class null.
- null.Bind();
- __ LoadRoot(tos, Heap::kNullValueRootIndex);
- frame_->EmitPush(tos);
-
- // All done.
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- Register scratch = VirtualFrame::scratch0();
- JumpTarget leave;
-
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register tos = frame_->PopToRegister(); // tos contains object.
- // if (object->IsSmi()) return the object.
- __ tst(tos, Operand(kSmiTagMask));
- leave.Branch(eq);
- // It is a heap object - get map. If (!object->IsJSValue()) return the object.
- __ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE);
- leave.Branch(ne);
- // Load the value.
- __ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset));
- leave.Bind();
- frame_->EmitPush(tos);
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- JumpTarget leave;
-
- ASSERT(args->length() == 2);
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- Register value = frame_->PopToRegister();
- Register object = frame_->PopToRegister(value);
- // if (object->IsSmi()) return object.
- __ tst(object, Operand(kSmiTagMask));
- leave.Branch(eq);
- // It is a heap object - get map. If (!object->IsJSValue()) return the object.
- __ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE);
- leave.Branch(ne);
- // Store the value.
- __ str(value, FieldMemOperand(object, JSValue::kValueOffset));
- // Update the write barrier.
- __ RecordWrite(object,
- Operand(JSValue::kValueOffset - kHeapObjectTag),
- scratch1,
- scratch2);
- // Leave.
- leave.Bind();
- frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register reg = frame_->PopToRegister();
- __ tst(reg, Operand(kSmiTagMask));
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
- frame_->CallRuntime(Runtime::kLog, 2);
- }
-#endif
- frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register reg = frame_->PopToRegister();
- __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
- cc_reg_ = eq;
-}
-
-
-// Generates the Math.pow method.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Load(args->at(1));
-
- if (!CpuFeatures::IsSupported(VFP3)) {
- frame_->CallRuntime(Runtime::kMath_pow, 2);
- frame_->EmitPush(r0);
- } else {
- CpuFeatures::Scope scope(VFP3);
- JumpTarget runtime, done;
- Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- // Get base and exponent to registers.
- Register exponent = frame_->PopToRegister();
- Register base = frame_->PopToRegister(exponent);
- Register heap_number_map = no_reg;
-
- // Set the frame for the runtime jump target. The code below jumps to the
- // jump target label so the frame needs to be established before that.
- ASSERT(runtime.entry_frame() == NULL);
- runtime.set_entry_frame(frame_);
-
- __ JumpIfNotSmi(exponent, &exponent_nonsmi);
- __ JumpIfNotSmi(base, &base_nonsmi);
-
- heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // Exponent is a smi and base is a smi. Get the smi value into vfp register
- // d1.
- __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
- __ b(&powi);
-
- __ bind(&base_nonsmi);
- // Exponent is smi and base is non smi. Get the double value from the base
- // into vfp register d1.
- __ ObjectToDoubleVFPRegister(base, d1,
- scratch1, scratch2, heap_number_map, s0,
- runtime.entry_label());
-
- __ bind(&powi);
-
- // Load 1.0 into d0.
- __ vmov(d0, 1.0);
-
- // Get the absolute untagged value of the exponent and use that for the
- // calculation.
- __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
- // Negate if negative.
- __ rsb(scratch1, scratch1, Operand(0, RelocInfo::NONE), LeaveCC, mi);
- __ vmov(d2, d0, mi); // 1.0 needed in d2 later if exponent is negative.
-
- // Run through all the bits in the exponent. The result is calculated in d0
- // and d1 holds base^(bit^2).
- Label more_bits;
- __ bind(&more_bits);
- __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
- __ vmul(d0, d0, d1, cs); // Multiply with base^(bit^2) if bit is set.
- __ vmul(d1, d1, d1, ne); // Don't bother calculating next d1 if done.
- __ b(ne, &more_bits);
-
- // If exponent is positive we are done.
- __ cmp(exponent, Operand(0, RelocInfo::NONE));
- __ b(ge, &allocate_return);
-
- // If exponent is negative result is 1/result (d2 already holds 1.0 in that
- // case). However if d0 has reached infinity this will not provide the
- // correct result, so call runtime if that is the case.
- __ mov(scratch2, Operand(0x7FF00000));
- __ mov(scratch1, Operand(0, RelocInfo::NONE));
- __ vmov(d1, scratch1, scratch2); // Load infinity into d1.
- __ VFPCompareAndSetFlags(d0, d1);
- runtime.Branch(eq); // d0 reached infinity.
- __ vdiv(d0, d2, d0);
- __ b(&allocate_return);
-
- __ bind(&exponent_nonsmi);
- // Special handling of raising to the power of -0.5 and 0.5. First check
- // that the value is a heap number and that the lower bits (which for both
- // values are zero).
- heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
- __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
- __ cmp(scratch1, heap_number_map);
- runtime.Branch(ne);
- __ tst(scratch2, scratch2);
- runtime.Branch(ne);
-
- // Load the higher bits (which contains the floating point exponent).
- __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
-
- // Compare exponent with -0.5.
- __ cmp(scratch1, Operand(0xbfe00000));
- __ b(ne, &not_minus_half);
-
- // Get the double value from the base into vfp register d0.
- __ ObjectToDoubleVFPRegister(base, d0,
- scratch1, scratch2, heap_number_map, s0,
- runtime.entry_label(),
- AVOID_NANS_AND_INFINITIES);
-
- // Convert -0 into +0 by adding +0.
- __ vmov(d2, 0.0);
- __ vadd(d0, d2, d0);
- // Load 1.0 into d2.
- __ vmov(d2, 1.0);
-
- // Calculate the reciprocal of the square root.
- __ vsqrt(d0, d0);
- __ vdiv(d0, d2, d0);
-
- __ b(&allocate_return);
-
- __ bind(&not_minus_half);
- // Compare exponent with 0.5.
- __ cmp(scratch1, Operand(0x3fe00000));
- runtime.Branch(ne);
-
- // Get the double value from the base into vfp register d0.
- __ ObjectToDoubleVFPRegister(base, d0,
- scratch1, scratch2, heap_number_map, s0,
- runtime.entry_label(),
- AVOID_NANS_AND_INFINITIES);
- // Convert -0 into +0 by adding +0.
- __ vmov(d2, 0.0);
- __ vadd(d0, d2, d0);
- __ vsqrt(d0, d0);
-
- __ bind(&allocate_return);
- Register scratch3 = r5;
- __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
- heap_number_map, runtime.entry_label());
- __ mov(base, scratch3);
- done.Jump();
-
- runtime.Bind();
-
- // Push back the arguments again for the runtime call.
- frame_->EmitPush(base);
- frame_->EmitPush(exponent);
- frame_->CallRuntime(Runtime::kMath_pow, 2);
- __ Move(base, r0);
-
- done.Bind();
- frame_->EmitPush(base);
- }
-}
-
-
-// Generates the Math.sqrt method.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
-
- if (!CpuFeatures::IsSupported(VFP3)) {
- frame_->CallRuntime(Runtime::kMath_sqrt, 1);
- frame_->EmitPush(r0);
- } else {
- CpuFeatures::Scope scope(VFP3);
- JumpTarget runtime, done;
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- // Get the value from the frame.
- Register tos = frame_->PopToRegister();
-
- // Set the frame for the runtime jump target. The code below jumps to the
- // jump target label so the frame needs to be established before that.
- ASSERT(runtime.entry_frame() == NULL);
- runtime.set_entry_frame(frame_);
-
- Register heap_number_map = r6;
- Register new_heap_number = r5;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // Get the double value from the heap number into vfp register d0.
- __ ObjectToDoubleVFPRegister(tos, d0,
- scratch1, scratch2, heap_number_map, s0,
- runtime.entry_label());
-
- // Calculate the square root of d0 and place result in a heap number object.
- __ vsqrt(d0, d0);
- __ AllocateHeapNumberWithValue(new_heap_number,
- d0,
- scratch1, scratch2,
- heap_number_map,
- runtime.entry_label());
- __ mov(tos, Operand(new_heap_number));
- done.Jump();
-
- runtime.Bind();
- // Push back the argument again for the runtime call.
- frame_->EmitPush(tos);
- frame_->CallRuntime(Runtime::kMath_sqrt, 1);
- __ Move(tos, r0);
-
- done.Bind();
- frame_->EmitPush(tos);
- }
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result_, Heap::kNanValueRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharCodeAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
-
- Register index = frame_->PopToRegister();
- Register object = frame_->PopToRegister(index);
-
- // We need two extra registers.
- Register scratch = VirtualFrame::scratch0();
- Register result = VirtualFrame::scratch1();
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(object,
- index,
- scratch,
- result);
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->EmitPush(result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
-
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
- }
-
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharFromCode");
- ASSERT(args->length() == 1);
-
- Load(args->at(0));
-
- Register result = frame_->GetTOSRegister();
- Register code = frame_->PopToRegister(result);
-
- DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
- code, result);
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->EmitPush(result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ mov(result_, Operand(Smi::FromInt(0)));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
-
- Register index = frame_->PopToRegister();
- Register object = frame_->PopToRegister(index);
-
- // We need three extra registers.
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- // Use r6 without notifying the virtual frame.
- Register result = r6;
-
- DeferredStringCharAt* deferred =
- new DeferredStringCharAt(object,
- index,
- scratch1,
- scratch2,
- result);
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->EmitPush(result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- JumpTarget answer;
- // We need the CC bits to come out as not_equal in the case where the
- // object is a smi. This can't be done with the usual test opcode so
- // we use XOR to get the right CC bits.
- Register possible_array = frame_->PopToRegister();
- Register scratch = VirtualFrame::scratch0();
- __ and_(scratch, possible_array, Operand(kSmiTagMask));
- __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
- answer.Branch(ne);
- // It is a heap object - get the map. Check if the object is a JS array.
- __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
- answer.Bind();
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- JumpTarget answer;
- // We need the CC bits to come out as not_equal in the case where the
- // object is a smi. This can't be done with the usual test opcode so
- // we use XOR to get the right CC bits.
- Register possible_regexp = frame_->PopToRegister();
- Register scratch = VirtualFrame::scratch0();
- __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
- __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
- answer.Branch(ne);
- // It is a heap object - get the map. Check if the object is a regexp.
- __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
- answer.Bind();
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register possible_object = frame_->PopToRegister();
- __ tst(possible_object, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(possible_object, ip);
- true_target()->Branch(eq);
-
- Register map_reg = VirtualFrame::scratch0();
- __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
- __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
- false_target()->Branch(ne);
-
- __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
- __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
- false_target()->Branch(lt);
- __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
- cc_reg_ = le;
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
- // typeof(arg) == function).
- // It includes undetectable objects (as opposed to IsObject).
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register value = frame_->PopToRegister();
- __ tst(value, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- // Check that this is an object.
- __ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset));
- __ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset));
- __ cmp(value, Operand(FIRST_JS_OBJECT_TYPE));
- cc_reg_ = ge;
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
- DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
- Register map_result,
- Register scratch1,
- Register scratch2)
- : object_(object),
- map_result_(map_result),
- scratch1_(scratch1),
- scratch2_(scratch2) { }
-
- virtual void Generate() {
- Label false_result;
-
- // Check that map is loaded as expected.
- if (FLAG_debug_code) {
- __ ldr(ip, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ cmp(map_result_, ip);
- __ Assert(eq, "Map not in expected register");
- }
-
- // Check for fast case object. Generate false result for slow case object.
- __ ldr(scratch1_, FieldMemOperand(object_, JSObject::kPropertiesOffset));
- __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(scratch1_, ip);
- __ b(eq, &false_result);
-
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ ldr(map_result_,
- FieldMemOperand(map_result_, Map::kInstanceDescriptorsOffset));
- __ ldr(scratch2_, FieldMemOperand(map_result_, FixedArray::kLengthOffset));
- // map_result_: descriptor array
- // scratch2_: length of descriptor array
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ add(scratch1_,
- map_result_,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(scratch1_,
- scratch1_,
- Operand(scratch2_, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Calculate location of the first key name.
- __ add(map_result_,
- map_result_,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag +
- DescriptorArray::kFirstIndex * kPointerSize));
- // Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- // The use of ip to store the valueOf symbol asumes that it is not otherwise
- // used in the loop below.
- __ mov(ip, Operand(Factory::value_of_symbol()));
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(scratch2_, MemOperand(map_result_, 0));
- __ cmp(scratch2_, ip);
- __ b(eq, &false_result);
- __ add(map_result_, map_result_, Operand(kPointerSize));
- __ bind(&entry);
- __ cmp(map_result_, Operand(scratch1_));
- __ b(ne, &loop);
-
- // Reload map as register map_result_ was used as temporary above.
- __ ldr(map_result_, FieldMemOperand(object_, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that it's
- // prototype is the un-modified String prototype. If not result is false.
- __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kPrototypeOffset));
- __ tst(scratch1_, Operand(kSmiTagMask));
- __ b(eq, &false_result);
- __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
- __ ldr(scratch2_,
- ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(scratch2_,
- FieldMemOperand(scratch2_, GlobalObject::kGlobalContextOffset));
- __ ldr(scratch2_,
- ContextOperand(
- scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ cmp(scratch1_, scratch2_);
- __ b(ne, &false_result);
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
- __ orr(scratch1_,
- scratch1_,
- Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
- __ mov(map_result_, Operand(1));
- __ jmp(exit_label());
- __ bind(&false_result);
- // Set false result.
- __ mov(map_result_, Operand(0, RelocInfo::NONE));
- }
-
- private:
- Register object_;
- Register map_result_;
- Register scratch1_;
- Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register obj = frame_->PopToRegister(); // Pop the string wrapper.
- if (FLAG_debug_code) {
- __ AbortIfSmi(obj);
- }
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- Register map_result = VirtualFrame::scratch0();
- __ ldr(map_result, FieldMemOperand(obj, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(map_result, Map::kBitField2Offset));
- __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- true_target()->Branch(ne);
-
- // We need an additional two scratch registers for the deferred code.
- Register scratch1 = VirtualFrame::scratch1();
- // Use r6 without notifying the virtual frame.
- Register scratch2 = r6;
-
- DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
- new DeferredIsStringWrapperSafeForDefaultValueOf(
- obj, map_result, scratch1, scratch2);
- deferred->Branch(eq);
- deferred->BindExit();
- __ tst(map_result, Operand(map_result));
- cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (%_ClassOf(arg) === 'Function')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register possible_function = frame_->PopToRegister();
- __ tst(possible_function, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- Register map_reg = VirtualFrame::scratch0();
- Register scratch = VirtualFrame::scratch1();
- __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register possible_undetectable = frame_->PopToRegister();
- __ tst(possible_undetectable, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- Register scratch = VirtualFrame::scratch0();
- __ ldr(scratch,
- FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Register scratch0 = VirtualFrame::scratch0();
- Register scratch1 = VirtualFrame::scratch1();
- // Get the frame pointer for the calling frame.
- __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- __ ldr(scratch1,
- MemOperand(scratch0, StandardFrameConstants::kContextOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ldr(scratch0,
- MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
-
- // Check the marker in the calling frame.
- __ ldr(scratch1,
- MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Register tos = frame_->GetTOSRegister();
- Register scratch0 = VirtualFrame::scratch0();
- Register scratch1 = VirtualFrame::scratch1();
-
- // Check if the calling frame is an arguments adaptor frame.
- __ ldr(scratch0,
- MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(scratch1,
- MemOperand(scratch0, StandardFrameConstants::kContextOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Get the number of formal parameters.
- __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ ldr(tos,
- MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
- eq);
-
- frame_->EmitPush(tos);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // Satisfy contract with ArgumentsAccessStub:
- // Load the key into r1 and the formal parameters count into r0.
- Load(args->at(0));
- frame_->PopToR1();
- frame_->SpillAll();
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
-
- // Call the shared stub to get to arguments[key].
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- frame_->CallStub(&stub, 0);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- ASSERT(args->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r4, Operand(r0));
-
- __ bind(&heapnumber_allocated);
-
- // Convert 32 random bits in r0 to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(VFP3)) {
- __ PrepareCallCFunction(0, r1);
- __ CallCFunction(ExternalReference::random_uint32_function(), 0);
-
- CpuFeatures::Scope scope(VFP3);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand(0, RelocInfo::NONE));
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
- __ sub(r0, r4, Operand(kHeapObjectTag));
- __ vstr(d7, r0, HeapNumber::kValueOffset);
- frame_->EmitPush(r4);
- } else {
- __ mov(r0, Operand(r4));
- __ PrepareCallCFunction(1, r1);
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(), 1);
- frame_->EmitPush(r0);
- }
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- frame_->SpillAll();
- frame_->CallStub(&stub, 2);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- SubStringStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringCompareStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 2);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- ASSERT_EQ(4, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
- Load(args->at(3));
- RegExpExecStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 4);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0)); // Size of array, smi.
- Load(args->at(1)); // "index" property value.
- Load(args->at(2)); // "input" property value.
- RegExpConstructResultStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
- DeferredSearchCache(Register dst, Register cache, Register key)
- : dst_(dst), cache_(cache), key_(key) {
- set_comment("[ DeferredSearchCache");
- }
-
- virtual void Generate();
-
- private:
- Register dst_, cache_, key_;
-};
-
-
-void DeferredSearchCache::Generate() {
- __ Push(cache_, key_);
- __ CallRuntime(Runtime::kGetFromCache, 2);
- __ Move(dst_, r0);
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
- return;
- }
-
- Load(args->at(1));
-
- frame_->PopToR1();
- frame_->SpillAll();
- Register key = r1; // Just poped to r1
- Register result = r0; // Free, as frame has just been spilled.
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- __ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(scratch1,
- FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset));
- __ ldr(scratch1,
- ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ ldr(scratch1,
- FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id)));
-
- DeferredSearchCache* deferred =
- new DeferredSearchCache(result, scratch1, key);
-
- const int kFingerOffset =
- FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ ldr(result, FieldMemOperand(scratch1, kFingerOffset));
- // result now holds finger offset as a smi.
- __ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // scratch2 now points to the start of fixed array elements.
- __ ldr(result,
- MemOperand(
- scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
- // Note side effect of PreIndex: scratch2 now points to the key of the pair.
- __ cmp(key, result);
- deferred->Branch(ne);
-
- __ ldr(result, MemOperand(scratch2, kPointerSize));
-
- deferred->BindExit();
- frame_->EmitPush(result);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and jump to the runtime.
- Load(args->at(0));
-
- NumberToStringStub stub;
- frame_->SpillAll();
- frame_->CallStub(&stub, 1);
- frame_->EmitPush(r0);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
-
- virtual void Generate();
-
- private:
- Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
- __ push(object_);
- __ push(index1_);
- __ push(index2_);
- __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateSwapElements");
-
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- Register index2 = r2;
- Register index1 = r1;
- Register object = r0;
- Register tmp1 = r3;
- Register tmp2 = r4;
-
- frame_->EmitPop(index2);
- frame_->EmitPop(index1);
- frame_->EmitPop(object);
-
- DeferredSwapElements* deferred =
- new DeferredSwapElements(object, index1, index2);
-
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
- deferred->Branch(lt);
- __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
- __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
- deferred->Branch(ne);
-
- // Check the object's elements are in fast case and writable.
- __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(tmp2, ip);
- deferred->Branch(ne);
-
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- // Check that both indices are smis.
- __ mov(tmp2, index1);
- __ orr(tmp2, tmp2, index2);
- __ tst(tmp2, Operand(kSmiTagMask));
- deferred->Branch(ne);
-
- // Check that both indices are valid.
- __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
- __ cmp(tmp2, index1);
- __ cmp(tmp2, index2, hi);
- deferred->Branch(ls);
-
- // Bring the offsets into the fixed array in tmp1 into index1 and
- // index2.
- __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Swap elements.
- Register tmp3 = object;
- object = no_reg;
- __ ldr(tmp3, MemOperand(tmp1, index1));
- __ ldr(tmp2, MemOperand(tmp1, index2));
- __ str(tmp3, MemOperand(tmp1, index2));
- __ str(tmp2, MemOperand(tmp1, index1));
-
- Label done;
- __ InNewSpace(tmp1, tmp2, eq, &done);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
-
- __ mov(tmp2, tmp1);
- __ add(index1, index1, tmp1);
- __ add(index2, index2, tmp1);
- __ RecordWriteHelper(tmp1, index1, tmp3);
- __ RecordWriteHelper(tmp2, index2, tmp3);
- __ bind(&done);
-
- deferred->BindExit();
- __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(tmp1);
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateCallFunction");
-
- ASSERT(args->length() >= 2);
-
- int n_args = args->length() - 2; // for receiver and function.
- Load(args->at(0)); // receiver
- for (int i = 0; i < n_args; i++) {
- Load(args->at(i + 1));
- }
- Load(args->at(n_args + 1)); // function
- frame_->CallJSFunction(n_args);
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- if (CpuFeatures::IsSupported(VFP3)) {
- TranscendentalCacheStub stub(TranscendentalCache::SIN);
- frame_->SpillAllButCopyTOSToR0();
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kMath_sin, 1);
- }
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- if (CpuFeatures::IsSupported(VFP3)) {
- TranscendentalCacheStub stub(TranscendentalCache::COS);
- frame_->SpillAllButCopyTOSToR0();
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kMath_cos, 1);
- }
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- if (CpuFeatures::IsSupported(VFP3)) {
- TranscendentalCacheStub stub(TranscendentalCache::LOG);
- frame_->SpillAllButCopyTOSToR0();
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kMath_log, 1);
- }
- frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Register lhs = frame_->PopToRegister();
- Register rhs = frame_->PopToRegister(lhs);
- __ cmp(lhs, rhs);
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Register right = frame_->PopToRegister();
- Register left = frame_->PopToRegister(right);
- Register tmp = frame_->scratch0();
- Register tmp2 = frame_->scratch1();
-
- // Jumps to done must have the eq flag set if the test is successful
- // and clear if the test has failed.
- Label done;
-
- // Fail if either is a non-HeapObject.
- __ cmp(left, Operand(right));
- __ b(eq, &done);
- __ and_(tmp, left, Operand(right));
- __ eor(tmp, tmp, Operand(kSmiTagMask));
- __ tst(tmp, Operand(kSmiTagMask));
- __ b(ne, &done);
- __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
- __ b(ne, &done);
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ cmp(tmp, Operand(tmp2));
- __ b(ne, &done);
- __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ cmp(tmp, tmp2);
- __ bind(&done);
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register value = frame_->PopToRegister();
- Register tmp = frame_->scratch0();
- __ ldr(tmp, FieldMemOperand(value, String::kHashFieldOffset));
- __ tst(tmp, Operand(String::kContainsCachedArrayIndexMask));
- cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Register value = frame_->PopToRegister();
-
- __ ldr(value, FieldMemOperand(value, String::kHashFieldOffset));
- __ IndexFromHash(value, value);
- frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Register value = frame_->PopToRegister();
- __ LoadRoot(value, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- if (CheckForInlineRuntimeCall(node)) {
- ASSERT((has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
- return;
- }
-
- ZoneList<Expression*>* args = node->arguments();
- Comment cmnt(masm_, "[ CallRuntime");
- Runtime::Function* function = node->function();
-
- if (function == NULL) {
- // Prepare stack for calling JS runtime function.
- // Push the builtins object found in the current global object.
- Register scratch = VirtualFrame::scratch0();
- __ ldr(scratch, GlobalObjectOperand());
- Register builtins = frame_->GetTOSRegister();
- __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
- frame_->EmitPush(builtins);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- if (function == NULL) {
- // Call the JS runtime function.
- __ mov(r2, Operand(node->name()));
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub = StubCache::ComputeCallInitialize(arg_count, in_loop);
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
- __ ldr(cp, frame_->Context());
- frame_->EmitPush(r0);
- } else {
- // Call the C runtime function.
- frame_->CallRuntime(function, arg_count);
- frame_->EmitPush(r0);
- }
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ UnaryOperation");
-
- Token::Value op = node->op();
-
- if (op == Token::NOT) {
- LoadCondition(node->expression(), false_target(), true_target(), true);
- // LoadCondition may (and usually does) leave a test and branch to
- // be emitted by the caller. In that case, negate the condition.
- if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
-
- } else if (op == Token::DELETE) {
- Property* property = node->expression()->AsProperty();
- Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
- if (property != NULL) {
- Load(property->obj());
- Load(property->key());
- frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
- frame_->EmitPush(r0);
-
- } else if (variable != NULL) {
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is.
- ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
- Slot* slot = variable->AsSlot();
- if (variable->is_global()) {
- LoadGlobal();
- frame_->EmitPush(Operand(variable->name()));
- frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode)));
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
- frame_->EmitPush(r0);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Delete from the context holding the named variable.
- frame_->EmitPush(cp);
- frame_->EmitPush(Operand(variable->name()));
- frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
- frame_->EmitPush(r0);
-
- } else {
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
- }
-
- } else {
- // Default: Result of deleting expressions is true.
- Load(node->expression()); // may have side-effects
- frame_->Drop();
- frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
- }
-
- } else if (op == Token::TYPEOF) {
- // Special case for loading the typeof expression; see comment on
- // LoadTypeofExpression().
- LoadTypeofExpression(node->expression());
- frame_->CallRuntime(Runtime::kTypeof, 1);
- frame_->EmitPush(r0); // r0 has result
-
- } else {
- bool can_overwrite = node->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-
- bool no_negative_zero = node->expression()->no_negative_zero();
- Load(node->expression());
- switch (op) {
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- UNREACHABLE(); // handled above
- break;
-
- case Token::SUB: {
- frame_->PopToR0();
- GenericUnaryOpStub stub(
- Token::SUB,
- overwrite,
- NO_UNARY_FLAGS,
- no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
- frame_->CallStub(&stub, 0);
- frame_->EmitPush(r0); // r0 has result
- break;
- }
-
- case Token::BIT_NOT: {
- Register tos = frame_->PopToRegister();
- JumpTarget not_smi_label;
- JumpTarget continue_label;
- // Smi check.
- __ tst(tos, Operand(kSmiTagMask));
- not_smi_label.Branch(ne);
-
- __ mvn(tos, Operand(tos));
- __ bic(tos, tos, Operand(kSmiTagMask)); // Bit-clear inverted smi-tag.
- frame_->EmitPush(tos);
- // The fast case is the first to jump to the continue label, so it gets
- // to decide the virtual frame layout.
- continue_label.Jump();
-
- not_smi_label.Bind();
- frame_->SpillAll();
- __ Move(r0, tos);
- GenericUnaryOpStub stub(Token::BIT_NOT,
- overwrite,
- NO_UNARY_SMI_CODE_IN_STUB);
- frame_->CallStub(&stub, 0);
- frame_->EmitPush(r0);
-
- continue_label.Bind();
- break;
- }
-
- case Token::VOID:
- frame_->Drop();
- frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
- break;
-
- case Token::ADD: {
- Register tos = frame_->Peek();
- // Smi check.
- JumpTarget continue_label;
- __ tst(tos, Operand(kSmiTagMask));
- continue_label.Branch(eq);
-
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
- frame_->EmitPush(r0);
-
- continue_label.Bind();
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-class DeferredCountOperation: public DeferredCode {
- public:
- DeferredCountOperation(Register value,
- bool is_increment,
- bool is_postfix,
- int target_size)
- : value_(value),
- is_increment_(is_increment),
- is_postfix_(is_postfix),
- target_size_(target_size) {}
-
- virtual void Generate() {
- VirtualFrame copied_frame(*frame_state()->frame());
-
- Label slow;
- // Check for smi operand.
- __ tst(value_, Operand(kSmiTagMask));
- __ b(ne, &slow);
-
- // Revert optimistic increment/decrement.
- if (is_increment_) {
- __ sub(value_, value_, Operand(Smi::FromInt(1)));
- } else {
- __ add(value_, value_, Operand(Smi::FromInt(1)));
- }
-
- // Slow case: Convert to number. At this point the
- // value to be incremented is in the value register..
- __ bind(&slow);
-
- // Convert the operand to a number.
- copied_frame.EmitPush(value_);
-
- copied_frame.InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
-
- if (is_postfix_) {
- // Postfix: store to result (on the stack).
- __ str(r0, MemOperand(sp, target_size_ * kPointerSize));
- }
-
- copied_frame.EmitPush(r0);
- copied_frame.EmitPush(Operand(Smi::FromInt(1)));
-
- if (is_increment_) {
- copied_frame.CallRuntime(Runtime::kNumberAdd, 2);
- } else {
- copied_frame.CallRuntime(Runtime::kNumberSub, 2);
- }
-
- __ Move(value_, r0);
-
- copied_frame.MergeTo(frame_state()->frame());
- }
-
- private:
- Register value_;
- bool is_increment_;
- bool is_postfix_;
- int target_size_;
-};
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ CountOperation");
- VirtualFrame::RegisterAllocationScope scope(this);
-
- bool is_postfix = node->is_postfix();
- bool is_increment = node->op() == Token::INC;
-
- Variable* var = node->expression()->AsVariableProxy()->AsVariable();
- bool is_const = (var != NULL && var->mode() == Variable::CONST);
- bool is_slot = (var != NULL && var->mode() == Variable::VAR);
-
- if (!is_const && is_slot && type_info(var->AsSlot()).IsSmi()) {
- // The type info declares that this variable is always a Smi. That
- // means it is a Smi both before and after the increment/decrement.
- // Lets make use of that to make a very minimal count.
- Reference target(this, node->expression(), !is_const);
- ASSERT(!target.is_illegal());
- target.GetValue(); // Pushes the value.
- Register value = frame_->PopToRegister();
- if (is_postfix) frame_->EmitPush(value);
- if (is_increment) {
- __ add(value, value, Operand(Smi::FromInt(1)));
- } else {
- __ sub(value, value, Operand(Smi::FromInt(1)));
- }
- frame_->EmitPush(value);
- target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
- if (is_postfix) frame_->Pop();
- ASSERT_EQ(original_height + 1, frame_->height());
- return;
- }
-
- // If it's a postfix expression and its result is not ignored and the
- // reference is non-trivial, then push a placeholder on the stack now
- // to hold the result of the expression.
- bool placeholder_pushed = false;
- if (!is_slot && is_postfix) {
- frame_->EmitPush(Operand(Smi::FromInt(0)));
- placeholder_pushed = true;
- }
-
- // A constant reference is not saved to, so a constant reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
- if (target.is_illegal()) {
- // Spoof the virtual frame to have the expected height (one higher
- // than on entry).
- if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
- ASSERT_EQ(original_height + 1, frame_->height());
- return;
- }
-
- // This pushes 0, 1 or 2 words on the object to be used later when updating
- // the target. It also pushes the current value of the target.
- target.GetValue();
-
- bool value_is_known_smi = frame_->KnownSmiAt(0);
- Register value = frame_->PopToRegister();
-
- // Postfix: Store the old value as the result.
- if (placeholder_pushed) {
- frame_->SetElementAt(value, target.size());
- } else if (is_postfix) {
- frame_->EmitPush(value);
- __ mov(VirtualFrame::scratch0(), value);
- value = VirtualFrame::scratch0();
- }
-
- // We can't use any type information here since the virtual frame from the
- // deferred code may have lost information and we can't merge a virtual
- // frame with less specific type knowledge to a virtual frame with more
- // specific knowledge that has already used that specific knowledge to
- // generate code.
- frame_->ForgetTypeInfo();
-
- // The constructor here will capture the current virtual frame and use it to
- // merge to after the deferred code has run. No virtual frame changes are
- // allowed from here until the 'BindExit' below.
- DeferredCode* deferred =
- new DeferredCountOperation(value,
- is_increment,
- is_postfix,
- target.size());
- if (!value_is_known_smi) {
- // Check for smi operand.
- __ tst(value, Operand(kSmiTagMask));
-
- deferred->Branch(ne);
- }
-
- // Perform optimistic increment/decrement.
- if (is_increment) {
- __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
- } else {
- __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
- }
-
- // If increment/decrement overflows, go to deferred code.
- deferred->Branch(vs);
-
- deferred->BindExit();
-
- // Store the new value in the target if not const.
- // At this point the answer is in the value register.
- frame_->EmitPush(value);
- // Set the target with the result, leaving the result on
- // top of the stack. Removes the target from the stack if
- // it has a non-zero size.
- if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
- }
-
- // Postfix: Discard the new value and use the old.
- if (is_postfix) frame_->Pop();
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- // According to ECMA-262 section 11.11, page 58, the binary logical
- // operators must yield the result of one of the two expressions
- // before any ToBoolean() conversions. This means that the value
- // produced by a && or || operator is not necessarily a boolean.
-
- // NOTE: If the left hand side produces a materialized value (not in
- // the CC register), we force the right hand side to do the
- // same. This is necessary because we may have to branch to the exit
- // after evaluating the left hand side (due to the shortcut
- // semantics), but the compiler must (statically) know if the result
- // of compiling the binary operation is materialized or not.
- if (node->op() == Token::AND) {
- JumpTarget is_true;
- LoadCondition(node->left(), &is_true, false_target(), false);
- if (has_valid_frame() && !has_cc()) {
- // The left-hand side result is on top of the virtual frame.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- frame_->Dup();
- // Avoid popping the result if it converts to 'false' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- ToBoolean(&pop_and_continue, &exit);
- Branch(false, &exit);
-
- // Pop the result of evaluating the first part.
- pop_and_continue.Bind();
- frame_->Pop();
-
- // Evaluate right side expression.
- is_true.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- } else if (has_cc() || is_true.is_linked()) {
- // The left-hand side is either (a) partially compiled to
- // control flow with a final branch left to emit or (b) fully
- // compiled to control flow and possibly true.
- if (has_cc()) {
- Branch(false, false_target());
- }
- is_true.Bind();
- LoadCondition(node->right(), true_target(), false_target(), false);
- } else {
- // Nothing to do.
- ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
- }
-
- } else {
- ASSERT(node->op() == Token::OR);
- JumpTarget is_false;
- LoadCondition(node->left(), true_target(), &is_false, false);
- if (has_valid_frame() && !has_cc()) {
- // The left-hand side result is on top of the virtual frame.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- frame_->Dup();
- // Avoid popping the result if it converts to 'true' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- ToBoolean(&exit, &pop_and_continue);
- Branch(true, &exit);
-
- // Pop the result of evaluating the first part.
- pop_and_continue.Bind();
- frame_->Pop();
-
- // Evaluate right side expression.
- is_false.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- } else if (has_cc() || is_false.is_linked()) {
- // The left-hand side is either (a) partially compiled to
- // control flow with a final branch left to emit or (b) fully
- // compiled to control flow and possibly false.
- if (has_cc()) {
- Branch(true, true_target());
- }
- is_false.Bind();
- LoadCondition(node->right(), true_target(), false_target(), false);
- } else {
- // Nothing to do.
- ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
- }
- }
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ BinaryOperation");
-
- if (node->op() == Token::AND || node->op() == Token::OR) {
- GenerateLogicalBooleanOperation(node);
- } else {
- // Optimize for the case where (at least) one of the expressions
- // is a literal small integer.
- Literal* lliteral = node->left()->AsLiteral();
- Literal* rliteral = node->right()->AsLiteral();
- // NOTE: The code below assumes that the slow cases (calls to runtime)
- // never return a constant/immutable object.
- bool overwrite_left = node->left()->ResultOverwriteAllowed();
- bool overwrite_right = node->right()->ResultOverwriteAllowed();
-
- if (rliteral != NULL && rliteral->handle()->IsSmi()) {
- VirtualFrame::RegisterAllocationScope scope(this);
- Load(node->left());
- if (frame_->KnownSmiAt(0)) overwrite_left = false;
- SmiOperation(node->op(),
- rliteral->handle(),
- false,
- overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
- } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
- VirtualFrame::RegisterAllocationScope scope(this);
- Load(node->right());
- if (frame_->KnownSmiAt(0)) overwrite_right = false;
- SmiOperation(node->op(),
- lliteral->handle(),
- true,
- overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- GenerateInlineSmi inline_smi =
- loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
- if (lliteral != NULL) {
- ASSERT(!lliteral->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- if (rliteral != NULL) {
- ASSERT(!rliteral->handle()->IsSmi());
- inline_smi = DONT_GENERATE_INLINE_SMI;
- }
- VirtualFrame::RegisterAllocationScope scope(this);
- OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (overwrite_left) {
- overwrite_mode = OVERWRITE_LEFT;
- } else if (overwrite_right) {
- overwrite_mode = OVERWRITE_RIGHT;
- }
- Load(node->left());
- Load(node->right());
- GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
- }
- }
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- frame_->EmitPush(MemOperand(frame_->Function()));
- ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ CompareOperation");
-
- VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
-
- // Get the expressions from the node.
- Expression* left = node->left();
- Expression* right = node->right();
- Token::Value op = node->op();
-
- // To make typeof testing for natives implemented in JavaScript really
- // efficient, we generate special code for expressions of the form:
- // 'typeof <expression> == <string>'.
- UnaryOperation* operation = left->AsUnaryOperation();
- if ((op == Token::EQ || op == Token::EQ_STRICT) &&
- (operation != NULL && operation->op() == Token::TYPEOF) &&
- (right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsString())) {
- Handle<String> check(String::cast(*right->AsLiteral()->handle()));
-
- // Load the operand, move it to a register.
- LoadTypeofExpression(operation->expression());
- Register tos = frame_->PopToRegister();
-
- Register scratch = VirtualFrame::scratch0();
-
- if (check->Equals(Heap::number_symbol())) {
- __ tst(tos, Operand(kSmiTagMask));
- true_target()->Branch(eq);
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(tos, ip);
- cc_reg_ = eq;
-
- } else if (check->Equals(Heap::string_symbol())) {
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
-
- // It can be an undetectable string object.
- __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
- __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
- false_target()->Branch(eq);
-
- __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
- cc_reg_ = lt;
-
- } else if (check->Equals(Heap::boolean_symbol())) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(tos, ip);
- true_target()->Branch(eq);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(tos, ip);
- cc_reg_ = eq;
-
- } else if (check->Equals(Heap::undefined_symbol())) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(tos, ip);
- true_target()->Branch(eq);
-
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- // It can be an undetectable object.
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
- __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
-
- cc_reg_ = eq;
-
- } else if (check->Equals(Heap::function_symbol())) {
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
- Register map_reg = scratch;
- __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
- true_target()->Branch(eq);
- // Regular expressions are callable so typeof == 'function'.
- __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
- cc_reg_ = eq;
-
- } else if (check->Equals(Heap::object_symbol())) {
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos, ip);
- true_target()->Branch(eq);
-
- Register map_reg = scratch;
- __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
- false_target()->Branch(eq);
-
- // It can be an undetectable object.
- __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
- __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
- __ cmp(tos, Operand(1 << Map::kIsUndetectable));
- false_target()->Branch(eq);
-
- __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
- __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
- false_target()->Branch(lt);
- __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
- cc_reg_ = le;
-
- } else {
- // Uncommon case: typeof testing against a string literal that is
- // never returned from the typeof operator.
- false_target()->Jump();
- }
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height));
- return;
- }
-
- switch (op) {
- case Token::EQ:
- Comparison(eq, left, right, false);
- break;
-
- case Token::LT:
- Comparison(lt, left, right);
- break;
-
- case Token::GT:
- Comparison(gt, left, right);
- break;
-
- case Token::LTE:
- Comparison(le, left, right);
- break;
-
- case Token::GTE:
- Comparison(ge, left, right);
- break;
-
- case Token::EQ_STRICT:
- Comparison(eq, left, right, true);
- break;
-
- case Token::IN: {
- Load(left);
- Load(right);
- frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
- frame_->EmitPush(r0);
- break;
- }
-
- case Token::INSTANCEOF: {
- Load(left);
- Load(right);
- InstanceofStub stub(InstanceofStub::kNoFlags);
- frame_->CallStub(&stub, 2);
- // At this point if instanceof succeeded then r0 == 0.
- __ tst(r0, Operand(r0));
- cc_reg_ = eq;
- break;
- }
-
- default:
- UNREACHABLE();
- }
- ASSERT((has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- Comment cmnt(masm_, "[ CompareToNull");
-
- Load(node->expression());
- Register tos = frame_->PopToRegister();
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos, ip);
-
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- if (!node->is_strict()) {
- true_target()->Branch(eq);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(tos, Operand(ip));
- true_target()->Branch(eq);
-
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- // It can be an undetectable object.
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
- __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
- __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
- __ cmp(tos, Operand(1 << Map::kIsUndetectable));
- }
-
- cc_reg_ = eq;
- ASSERT(has_cc() && frame_->height() == original_height);
-}
-
-
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
- explicit DeferredReferenceGetNamedValue(Register receiver,
- Handle<String> name,
- bool is_contextual)
- : receiver_(receiver),
- name_(name),
- is_contextual_(is_contextual),
- is_dont_delete_(false) {
- set_comment(is_contextual
- ? "[ DeferredReferenceGetNamedValue (contextual)"
- : "[ DeferredReferenceGetNamedValue");
- }
-
- virtual void Generate();
-
- void set_is_dont_delete(bool value) {
- ASSERT(is_contextual_);
- is_dont_delete_ = value;
- }
-
- private:
- Register receiver_;
- Handle<String> name_;
- bool is_contextual_;
- bool is_dont_delete_;
-};
-
-
-// Convention for this is that on entry the receiver is in a register that
-// is not used by the stack. On exit the answer is found in that same
-// register and the stack has the same height.
-void DeferredReferenceGetNamedValue::Generate() {
-#ifdef DEBUG
- int expected_height = frame_state()->frame()->height();
-#endif
- VirtualFrame copied_frame(*frame_state()->frame());
- copied_frame.SpillAll();
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
- __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
- __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
-
- // Ensure receiver in r0 and name in r2 to match load ic calling convention.
- __ Move(r0, receiver_);
- __ mov(r2, Operand(name_));
-
- // The rest of the instructions in the deferred code must be together.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- RelocInfo::Mode mode = is_contextual_
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- __ Call(ic, mode);
- // We must mark the code just after the call with the correct marker.
- MacroAssembler::NopMarkerTypes code_marker;
- if (is_contextual_) {
- code_marker = is_dont_delete_
- ? MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE
- : MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT;
- } else {
- code_marker = MacroAssembler::PROPERTY_ACCESS_INLINED;
- }
- __ MarkCode(code_marker);
-
- // At this point the answer is in r0. We move it to the expected register
- // if necessary.
- __ Move(receiver_, r0);
-
- // Now go back to the frame that we entered with. This will not overwrite
- // the receiver register since that register was not in use when we came
- // in. The instructions emitted by this merge are skipped over by the
- // inline load patching mechanism when looking for the branch instruction
- // that tells it where the code to patch is.
- copied_frame.MergeTo(frame_state()->frame());
-
- // Block the constant pool for one more instruction after leaving this
- // constant pool block scope to include the branch instruction ending the
- // deferred code.
- __ BlockConstPoolFor(1);
- }
- ASSERT_EQ(expected_height, frame_state()->frame()->height());
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceGetKeyedValue(Register key, Register receiver)
- : key_(key), receiver_(receiver) {
- set_comment("[ DeferredReferenceGetKeyedValue");
- }
-
- virtual void Generate();
-
- private:
- Register key_;
- Register receiver_;
-};
-
-
-// Takes key and register in r0 and r1 or vice versa. Returns result
-// in r0.
-void DeferredReferenceGetKeyedValue::Generate() {
- ASSERT((key_.is(r0) && receiver_.is(r1)) ||
- (key_.is(r1) && receiver_.is(r0)));
-
- VirtualFrame copied_frame(*frame_state()->frame());
- copied_frame.SpillAll();
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
- __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
-
- // Ensure key in r0 and receiver in r1 to match keyed load ic calling
- // convention.
- if (key_.is(r1)) {
- __ Swap(r0, r1, ip);
- }
-
- // The rest of the instructions in the deferred code must be together.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a nop instruction to indicate that the
- // keyed load has been inlined.
- __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
- // Now go back to the frame that we entered with. This will not overwrite
- // the receiver or key registers since they were not in use when we came
- // in. The instructions emitted by this merge are skipped over by the
- // inline load patching mechanism when looking for the branch instruction
- // that tells it where the code to patch is.
- copied_frame.MergeTo(frame_state()->frame());
-
- // Block the constant pool for one more instruction after leaving this
- // constant pool block scope to include the branch instruction ending the
- // deferred code.
- __ BlockConstPoolFor(1);
- }
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver,
- StrictModeFlag strict_mode)
- : value_(value),
- key_(key),
- receiver_(receiver),
- strict_mode_(strict_mode) {
- set_comment("[ DeferredReferenceSetKeyedValue");
- }
-
- virtual void Generate();
-
- private:
- Register value_;
- Register key_;
- Register receiver_;
- StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
- __ IncrementCounter(
- &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
-
- // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
- // calling convention.
- if (value_.is(r1)) {
- __ Swap(r0, r1, ip);
- }
- ASSERT(receiver_.is(r2));
-
- // The rest of the instructions in the deferred code must be together.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Call keyed store IC. It has the arguments value, key and receiver in r0,
- // r1 and r2.
- Handle<Code> ic(Builtins::builtin(
- (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a nop instruction to indicate that the
- // keyed store has been inlined.
- __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
- // Block the constant pool for one more instruction after leaving this
- // constant pool block scope to include the branch instruction ending the
- // deferred code.
- __ BlockConstPoolFor(1);
- }
-}
-
-
-class DeferredReferenceSetNamedValue: public DeferredCode {
- public:
- DeferredReferenceSetNamedValue(Register value,
- Register receiver,
- Handle<String> name,
- StrictModeFlag strict_mode)
- : value_(value),
- receiver_(receiver),
- name_(name),
- strict_mode_(strict_mode) {
- set_comment("[ DeferredReferenceSetNamedValue");
- }
-
- virtual void Generate();
-
- private:
- Register value_;
- Register receiver_;
- Handle<String> name_;
- StrictModeFlag strict_mode_;
-};
-
-
-// Takes value in r0, receiver in r1 and returns the result (the
-// value) in r0.
-void DeferredReferenceSetNamedValue::Generate() {
- // Record the entry frame and spill.
- VirtualFrame copied_frame(*frame_state()->frame());
- copied_frame.SpillAll();
-
- // Ensure value in r0, receiver in r1 to match store ic calling
- // convention.
- ASSERT(value_.is(r0) && receiver_.is(r1));
- __ mov(r2, Operand(name_));
-
- // The rest of the instructions in the deferred code must be together.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Call keyed store IC. It has the arguments value, key and receiver in r0,
- // r1 and r2.
- Handle<Code> ic(Builtins::builtin(
- (strict_mode_ == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a nop instruction to indicate that the
- // named store has been inlined.
- __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
- // Go back to the frame we entered with. The instructions
- // generated by this merge are skipped over by the inline store
- // patching mechanism when looking for the branch instruction that
- // tells it where the code to patch is.
- copied_frame.MergeTo(frame_state()->frame());
-
- // Block the constant pool for one more instruction after leaving this
- // constant pool block scope to include the branch instruction ending the
- // deferred code.
- __ BlockConstPoolFor(1);
- }
-}
-
-
-// Consumes the top of stack (the receiver) and pushes the result instead.
-void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
- bool contextual_load_in_builtin =
- is_contextual &&
- (Bootstrapper::IsActive() ||
- (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
-
- if (scope()->is_global_scope() ||
- loop_nesting() == 0 ||
- contextual_load_in_builtin) {
- Comment cmnt(masm(), "[ Load from named Property");
- // Setup the name register and call load IC.
- frame_->CallLoadIC(name,
- is_contextual
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET);
- frame_->EmitPush(r0); // Push answer.
- } else {
- // Inline the in-object property case.
- Comment cmnt(masm(), is_contextual
- ? "[ Inlined contextual property load"
- : "[ Inlined named property load");
-
- // Counter will be decremented in the deferred code. Placed here to avoid
- // having it in the instruction stream below where patching will occur.
- if (is_contextual) {
- __ IncrementCounter(&Counters::named_load_global_inline, 1,
- frame_->scratch0(), frame_->scratch1());
- } else {
- __ IncrementCounter(&Counters::named_load_inline, 1,
- frame_->scratch0(), frame_->scratch1());
- }
-
- // The following instructions are the inlined load of an in-object property.
- // Parts of this code is patched, so the exact instructions generated needs
- // to be fixed. Therefore the instruction pool is blocked when generating
- // this code
-
- // Load the receiver from the stack.
- Register receiver = frame_->PopToRegister();
-
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(receiver, name, is_contextual);
-
- bool is_dont_delete = false;
- if (is_contextual) {
- if (!info_->closure().is_null()) {
- // When doing lazy compilation we can check if the global cell
- // already exists and use its "don't delete" status as a hint.
- AssertNoAllocation no_gc;
- v8::internal::GlobalObject* global_object =
- info_->closure()->context()->global();
- LookupResult lookup;
- global_object->LocalLookupRealNamedProperty(*name, &lookup);
- if (lookup.IsProperty() && lookup.type() == NORMAL) {
- ASSERT(lookup.holder() == global_object);
- ASSERT(global_object->property_dictionary()->ValueAt(
- lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
- is_dont_delete = lookup.IsDontDelete();
- }
- }
- if (is_dont_delete) {
- __ IncrementCounter(&Counters::dont_delete_hint_hit, 1,
- frame_->scratch0(), frame_->scratch1());
- }
- }
-
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- if (!is_contextual) {
- // Check that the receiver is a heap object.
- __ tst(receiver, Operand(kSmiTagMask));
- deferred->Branch(eq);
- }
-
- // Check for the_hole_value if necessary.
- // Below we rely on the number of instructions generated, and we can't
- // cope with the Check macro which does not generate a fixed number of
- // instructions.
- Label skip, check_the_hole, cont;
- if (FLAG_debug_code && is_contextual && is_dont_delete) {
- __ b(&skip);
- __ bind(&check_the_hole);
- __ Check(ne, "DontDelete cells can't contain the hole");
- __ b(&cont);
- __ bind(&skip);
- }
-
-#ifdef DEBUG
- int InlinedNamedLoadInstructions = 5;
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
-
- Register scratch = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
-
- // Check the map. The null map used below is patched by the inline cache
- // code. Therefore we can't use a LoadRoot call.
- __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ mov(scratch2, Operand(Factory::null_value()));
- __ cmp(scratch, scratch2);
- deferred->Branch(ne);
-
- if (is_contextual) {
-#ifdef DEBUG
- InlinedNamedLoadInstructions += 1;
-#endif
- // Load the (initially invalid) cell and get its value.
- masm()->mov(receiver, Operand(Factory::null_value()));
- __ ldr(receiver,
- FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset));
-
- deferred->set_is_dont_delete(is_dont_delete);
-
- if (!is_dont_delete) {
-#ifdef DEBUG
- InlinedNamedLoadInstructions += 3;
-#endif
- __ cmp(receiver, Operand(Factory::the_hole_value()));
- deferred->Branch(eq);
- } else if (FLAG_debug_code) {
-#ifdef DEBUG
- InlinedNamedLoadInstructions += 3;
-#endif
- __ cmp(receiver, Operand(Factory::the_hole_value()));
- __ b(&check_the_hole, eq);
- __ bind(&cont);
- }
- } else {
- // Initially use an invalid index. The index will be patched by the
- // inline cache code.
- __ ldr(receiver, MemOperand(receiver, 0));
- }
-
- // Make sure that the expected number of instructions are generated.
- // If the code before is updated, the offsets in ic-arm.cc
- // LoadIC::PatchInlinedContextualLoad and PatchInlinedLoad need
- // to be updated.
- ASSERT_EQ(InlinedNamedLoadInstructions,
- masm_->InstructionsGeneratedSince(&check_inlined_codesize));
- }
-
- deferred->BindExit();
- // At this point the receiver register has the result, either from the
- // deferred code or from the inlined code.
- frame_->EmitPush(receiver);
- }
-}
-
-
-void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
- Result result;
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- } else {
- // Inline the in-object property case.
- JumpTarget slow, done;
-
- // Get the value and receiver from the stack.
- frame()->PopToR0();
- Register value = r0;
- frame()->PopToR1();
- Register receiver = r1;
-
- DeferredReferenceSetNamedValue* deferred =
- new DeferredReferenceSetNamedValue(
- value, receiver, name, strict_mode_flag());
-
- // Check that the receiver is a heap object.
- __ tst(receiver, Operand(kSmiTagMask));
- deferred->Branch(eq);
-
- // The following instructions are the part of the inlined
- // in-object property store code which can be patched. Therefore
- // the exact number of instructions generated must be fixed, so
- // the constant pool is blocked while generating this code.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- Register scratch0 = VirtualFrame::scratch0();
- Register scratch1 = VirtualFrame::scratch1();
-
- // Check the map. Initially use an invalid map to force a
- // failure. The map check will be patched in the runtime system.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
- __ mov(scratch0, Operand(Factory::null_value()));
- __ cmp(scratch0, scratch1);
- deferred->Branch(ne);
-
- int offset = 0;
- __ str(value, MemOperand(receiver, offset));
-
- // Update the write barrier and record its size. We do not use
- // the RecordWrite macro here because we want the offset
- // addition instruction first to make it easy to patch.
- Label record_write_start, record_write_done;
- __ bind(&record_write_start);
- // Add offset into the object.
- __ add(scratch0, receiver, Operand(offset));
- // Test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- __ InNewSpace(receiver, scratch1, eq, &record_write_done);
- // Record the actual write.
- __ RecordWriteHelper(receiver, scratch0, scratch1);
- __ bind(&record_write_done);
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (FLAG_debug_code) {
- __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
- __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
- __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
- }
- // Check that this is the first inlined write barrier or that
- // this inlined write barrier has the same size as all the other
- // inlined write barriers.
- ASSERT((inlined_write_barrier_size_ == -1) ||
- (inlined_write_barrier_size_ ==
- masm()->InstructionsGeneratedSince(&record_write_start)));
- inlined_write_barrier_size_ =
- masm()->InstructionsGeneratedSince(&record_write_start);
-
- // Make sure that the expected number of instructions are generated.
- ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
- masm()->InstructionsGeneratedSince(&check_inlined_codesize));
- }
- deferred->BindExit();
- }
- ASSERT_EQ(expected_height, frame()->height());
-}
-
-
-void CodeGenerator::EmitKeyedLoad() {
- if (loop_nesting() == 0) {
- Comment cmnt(masm_, "[ Load from keyed property");
- frame_->CallKeyedLoadIC();
- } else {
- // Inline the keyed load.
- Comment cmnt(masm_, "[ Inlined load from keyed property");
-
- // Counter will be decremented in the deferred code. Placed here to avoid
- // having it in the instruction stream below where patching will occur.
- __ IncrementCounter(&Counters::keyed_load_inline, 1,
- frame_->scratch0(), frame_->scratch1());
-
- // Load the key and receiver from the stack.
- bool key_is_known_smi = frame_->KnownSmiAt(0);
- Register key = frame_->PopToRegister();
- Register receiver = frame_->PopToRegister(key);
-
- // The deferred code expects key and receiver in registers.
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(key, receiver);
-
- // Check that the receiver is a heap object.
- __ tst(receiver, Operand(kSmiTagMask));
- deferred->Branch(eq);
-
- // The following instructions are the part of the inlined load keyed
- // property code which can be patched. Therefore the exact number of
- // instructions generated need to be fixed, so the constant pool is blocked
- // while generating this code.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- // Check the map. The null map used below is patched by the inline cache
- // code.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that the key is a smi.
- if (!key_is_known_smi) {
- __ tst(key, Operand(kSmiTagMask));
- deferred->Branch(ne);
- }
-
-#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
- __ mov(scratch2, Operand(Factory::null_value()));
- __ cmp(scratch1, scratch2);
- deferred->Branch(ne);
-
- // Get the elements array from the receiver.
- __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(scratch1);
-
- // Check that key is within bounds. Use unsigned comparison to handle
- // negative keys.
- __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ cmp(scratch2, key);
- deferred->Branch(ls); // Unsigned less equal.
-
- // Load and check that the result is not the hole (key is a smi).
- __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ add(scratch1,
- scratch1,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(scratch1,
- MemOperand(scratch1, key, LSL,
- kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
- __ cmp(scratch1, scratch2);
- deferred->Branch(eq);
-
- __ mov(r0, scratch1);
- // Make sure that the expected number of instructions are generated.
- ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
- masm_->InstructionsGeneratedSince(&check_inlined_codesize));
- }
-
- deferred->BindExit();
- }
-}
-
-
-void CodeGenerator::EmitKeyedStore(StaticType* key_type,
- WriteBarrierCharacter wb_info) {
- // Generate inlined version of the keyed store if the code is in a loop
- // and the key is likely to be a smi.
- if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
- // Inline the keyed store.
- Comment cmnt(masm_, "[ Inlined store to keyed property");
-
- Register scratch1 = VirtualFrame::scratch0();
- Register scratch2 = VirtualFrame::scratch1();
- Register scratch3 = r3;
-
- // Counter will be decremented in the deferred code. Placed here to avoid
- // having it in the instruction stream below where patching will occur.
- __ IncrementCounter(&Counters::keyed_store_inline, 1,
- scratch1, scratch2);
-
-
-
- // Load the value, key and receiver from the stack.
- bool value_is_harmless = frame_->KnownSmiAt(0);
- if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
- bool key_is_smi = frame_->KnownSmiAt(1);
- Register value = frame_->PopToRegister();
- Register key = frame_->PopToRegister(value);
- VirtualFrame::SpilledScope spilled(frame_);
- Register receiver = r2;
- frame_->EmitPop(receiver);
-
-#ifdef DEBUG
- bool we_remembered_the_write_barrier = value_is_harmless;
-#endif
-
- // The deferred code expects value, key and receiver in registers.
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(
- value, key, receiver, strict_mode_flag());
-
- // Check that the value is a smi. As this inlined code does not set the
- // write barrier it is only possible to store smi values.
- if (!value_is_harmless) {
- // If the value is not likely to be a Smi then let's test the fixed array
- // for new space instead. See below.
- if (wb_info == LIKELY_SMI) {
- __ tst(value, Operand(kSmiTagMask));
- deferred->Branch(ne);
-#ifdef DEBUG
- we_remembered_the_write_barrier = true;
-#endif
- }
- }
-
- if (!key_is_smi) {
- // Check that the key is a smi.
- __ tst(key, Operand(kSmiTagMask));
- deferred->Branch(ne);
- }
-
- // Check that the receiver is a heap object.
- __ tst(receiver, Operand(kSmiTagMask));
- deferred->Branch(eq);
-
- // Check that the receiver is a JSArray.
- __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
- deferred->Branch(ne);
-
- // Check that the key is within bounds. Both the key and the length of
- // the JSArray are smis. Use unsigned comparison to handle negative keys.
- __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ cmp(scratch1, key);
- deferred->Branch(ls); // Unsigned less equal.
-
- // Get the elements array from the receiver.
- __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (!value_is_harmless && wb_info != LIKELY_SMI) {
- Label ok;
- __ and_(scratch2, scratch1, Operand(ExternalReference::new_space_mask()));
- __ cmp(scratch2, Operand(ExternalReference::new_space_start()));
- __ tst(value, Operand(kSmiTagMask), ne);
- deferred->Branch(ne);
-#ifdef DEBUG
- we_remembered_the_write_barrier = true;
-#endif
- }
- // Check that the elements array is not a dictionary.
- __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
- // The following instructions are the part of the inlined store keyed
- // property code which can be patched. Therefore the exact number of
- // instructions generated need to be fixed, so the constant pool is blocked
- // while generating this code.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
-#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
-
- // Read the fixed array map from the constant pool (not from the root
- // array) so that the value can be patched. When debugging, we patch this
- // comparison to always fail so that we will hit the IC call in the
- // deferred code which will allow the debugger to break for fast case
- // stores.
- __ mov(scratch3, Operand(Factory::fixed_array_map()));
- __ cmp(scratch2, scratch3);
- deferred->Branch(ne);
-
- // Store the value.
- __ add(scratch1, scratch1,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(value,
- MemOperand(scratch1, key, LSL,
- kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
-
- // Make sure that the expected number of instructions are generated.
- ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
- masm_->InstructionsGeneratedSince(&check_inlined_codesize));
- }
-
- ASSERT(we_remembered_the_write_barrier);
-
- // Make sure that r0 holds the value which is the result of the expression.
- __ Move(r0, value);
-
- deferred->BindExit();
- } else {
- frame()->CallKeyedStoreIC(strict_mode_flag());
- }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() { return true; }
-#endif
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>(String::cast(*raw_name->handle()));
- }
-}
-
-
-void Reference::DupIfPersist() {
- if (persist_after_get_) {
- switch (type_) {
- case KEYED:
- cgen_->frame()->Dup2();
- break;
- case NAMED:
- cgen_->frame()->Dup();
- // Fall through.
- case UNLOADED:
- case ILLEGAL:
- case SLOT:
- // Do nothing.
- ;
- }
- } else {
- set_unloaded();
- }
-}
-
-
-void Reference::GetValue() {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
- MacroAssembler* masm = cgen_->masm();
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- DupIfPersist();
- cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- break;
- }
-
- case NAMED: {
- Variable* var = expression_->AsVariableProxy()->AsVariable();
- bool is_global = var != NULL;
- ASSERT(!is_global || var->is_global());
- Handle<String> name = GetName();
- DupIfPersist();
- cgen_->EmitNamedLoad(name, is_global);
- break;
- }
-
- case KEYED: {
- ASSERT(property != NULL);
- DupIfPersist();
- cgen_->EmitKeyedLoad();
- cgen_->frame()->EmitPush(r0);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
- ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
- MacroAssembler* masm = cgen_->masm();
- VirtualFrame* frame = cgen_->frame();
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- cgen_->StoreToSlot(slot, init_state);
- set_unloaded();
- break;
- }
-
- case NAMED: {
- Comment cmnt(masm, "[ Store to named Property");
- cgen_->EmitNamedStore(GetName(), false);
- frame->EmitPush(r0);
- set_unloaded();
- break;
- }
-
- case KEYED: {
- Comment cmnt(masm, "[ Store to keyed Property");
- Property* property = expression_->AsProperty();
- ASSERT(property != NULL);
- cgen_->CodeForSourcePosition(property->position());
- cgen_->EmitKeyedStore(property->key()->type(), wb_info);
- frame->EmitPush(r0);
- set_unloaded();
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int len = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(len);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s_%s",
- op_name,
- overwrite_name,
- specialized_on_rhs_ ? "_ConstantRhs" : "",
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
-#undef __
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 8f46256b8..01aa8052e 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,162 +37,8 @@ namespace internal {
// Forward declarations
class CompilationInfo;
-class DeferredCode;
-class JumpTarget;
-class RegisterAllocator;
-class RegisterFile;
-enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
-enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
-
-
-// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame. The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state, WriteBarrierCharacter wb);
-
- // This is in preparation for something that uses the reference on the stack.
- // If we need this reference afterwards get then dup it now. Otherwise mark
- // it as used.
- inline void DupIfPersist();
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- // Keep the reference on the stack after get, so it can be used by set later.
- bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair). It is threaded through the
-// call stack. Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- virtual ~CodeGenState();
-
- virtual JumpTarget* true_target() const { return NULL; }
- virtual JumpTarget* false_target() const { return NULL; }
-
- protected:
- inline CodeGenerator* owner() { return owner_; }
- inline CodeGenState* previous() const { return previous_; }
-
- private:
- CodeGenerator* owner_;
- CodeGenState* previous_;
-};
-
-
-class ConditionCodeGenState : public CodeGenState {
- public:
- // Create a code generator state based on a code generator's current
- // state. The new state has its own pair of branch labels.
- ConditionCodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target);
-
- virtual JumpTarget* true_target() const { return true_target_; }
- virtual JumpTarget* false_target() const { return false_target_; }
-
- private:
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-};
-
-
-class TypeInfoCodeGenState : public CodeGenState {
- public:
- TypeInfoCodeGenState(CodeGenerator* owner,
- Slot* slot_number,
- TypeInfo info);
- ~TypeInfoCodeGenState();
-
- virtual JumpTarget* true_target() const { return previous()->true_target(); }
- virtual JumpTarget* false_target() const {
- return previous()->false_target();
- }
-
- private:
- Slot* slot_;
- TypeInfo old_type_info_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
- NO_ARGUMENTS_ALLOCATION,
- EAGER_ARGUMENTS_ALLOCATION,
- LAZY_ARGUMENTS_ALLOCATION
-};
-
// -------------------------------------------------------------------------
// CodeGenerator
@@ -225,367 +71,17 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
-
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
-
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- TypeInfo type_info(Slot* slot) {
- int index = NumberOfSlot(slot);
- if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
- return (*type_info_)[index];
- }
-
- TypeInfo set_type_info(Slot* slot, TypeInfo info) {
- int index = NumberOfSlot(slot);
- ASSERT(index >= kInvalidSlotNumber);
- if (index != kInvalidSlotNumber) {
- TypeInfo previous_value = (*type_info_)[index];
- (*type_info_)[index] = info;
- return previous_value;
- }
- return TypeInfo::Unknown();
- }
-
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
// Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() {
return FLAG_debug_code ? 32 : 13;
}
- static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
+ static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
static int GetInlinedNamedStoreInstructionsAfterPatch() {
- ASSERT(inlined_write_barrier_size_ != -1);
- return inlined_write_barrier_size_ + 4;
+ ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
+ return Isolate::Current()->inlined_write_barrier_size() + 4;
}
private:
- // Type of a member function that generates inline code for a native function.
- typedef void (CodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- // Construction/Destruction
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors
- inline bool is_eval();
- inline Scope* scope();
- inline StrictModeFlag strict_mode_flag();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- static const int kInvalidSlotNumber = -1;
-
- int NumberOfSlot(Slot* slot);
-
- // State
- bool has_cc() const { return cc_reg_ != al; }
- JumpTarget* true_target() const { return state_->true_target(); }
- JumpTarget* false_target() const { return state_->false_target(); }
-
- // Track loop nesting level.
- int loop_nesting() const { return loop_nesting_; }
- void IncrementLoopNesting() { loop_nesting_++; }
- void DecrementLoopNesting() { loop_nesting_--; }
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
- virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // Generate the return sequence code. Should be called no more than
- // once per compiled function, immediately after binding the return
- // target (which can not be done more than once). The return value should
- // be in r0.
- void GenerateReturnSequence();
-
- // Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode();
-
- // Store the arguments object and allocate it if necessary.
- void StoreArgumentsObject(bool initial);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
- void UnloadReference(Reference* ref);
-
- MemOperand SlotOperand(Slot* slot, Register tmp);
-
- MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
- Register tmp,
- Register tmp2,
- JumpTarget* slow);
-
- // Expressions
- void LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc);
- void Load(Expression* expr);
- void LoadGlobal();
- void LoadGlobalReceiver(Register scratch);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-
- // Store the value on top of the stack to a slot.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- // Support for compiling assignment expressions.
- void EmitSlotAssignment(Assignment* node);
- void EmitNamedPropertyAssignment(Assignment* node);
- void EmitKeyedPropertyAssignment(Assignment* node);
-
- // Load a named property, returning it in r0. The receiver is passed on the
- // stack, and remains there.
- void EmitNamedLoad(Handle<String> name, bool is_contextual);
-
- // Store to a named property. If the store is contextual, value is passed on
- // the frame and consumed. Otherwise, receiver and value are passed on the
- // frame and consumed. The result is returned in r0.
- void EmitNamedStore(Handle<String> name, bool is_contextual);
-
- // Load a keyed property, leaving it in r0. The receiver and key are
- // passed on the stack, and remain there.
- void EmitKeyedLoad();
-
- // Store a keyed property. Key and receiver are on the stack and the value is
- // in r0. Result is returned in r0.
- void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
-
- void LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow);
-
- // Support for loading from local/global variables and arguments
- // whose location is known unless they are shadowed by
- // eval-introduced bindings. Generates no code for unsupported slot
- // types and therefore expects to fall through to the slow jump target.
- void EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow,
- JumpTarget* done);
-
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
-
- void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
-
- // Generate code that computes a shortcutting logical operation.
- void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
- void GenericBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- GenerateInlineSmi inline_smi,
- int known_rhs =
- GenericBinaryOpStub::kUnknownIntValue);
- void Comparison(Condition cc,
- Expression* left,
- Expression* right,
- bool strict = false);
-
- void SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode);
-
- void CallWithArguments(ZoneList<Expression*>* arguments,
- CallFunctionFlags flags,
- int position);
-
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position);
-
- // Control flow
- void Branch(bool if_true, JumpTarget* target);
- void CheckStack();
-
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- static Handle<Code> ComputeLazyCompile(int argc);
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function based on the shared function info.
- void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
- bool pretenure);
-
- // Support for type checks.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharAt(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- // Fast support for StringAdd.
- void GenerateStringAdd(ZoneList<Expression*>* args);
-
- // Fast support for SubString.
- void GenerateSubString(ZoneList<Expression*>* args);
-
- // Fast support for StringCompare.
- void GenerateStringCompare(ZoneList<Expression*>* args);
-
- // Support for direct calls from JavaScript to native RegExp code.
- void GenerateRegExpExec(ZoneList<Expression*>* args);
-
- void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
- // Support for fast native caches.
- void GenerateGetFromCache(ZoneList<Expression*>* args);
-
- // Fast support for number to string.
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast swapping of elements.
- void GenerateSwapElements(ZoneList<Expression*>* args);
-
- // Fast call for custom callbacks.
- void GenerateCallFunction(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
- void GenerateMathLog(ZoneList<Expression*>* args);
-
- void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
- void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* node);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block.
- bool HasValidEntryRegisters();
-#endif
-
- List<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- Condition cc_reg_;
- CodeGenState* state_;
- int loop_nesting_;
-
- Vector<TypeInfo>* type_info_;
-
- // Jump targets
- BreakTarget function_return_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- // Size of inlined write barriers generated by EmitNamedStore.
- static int inlined_write_barrier_size_;
-
- friend class VirtualFrame;
- friend class JumpTarget;
- friend class Reference;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
- friend class LCodeGen;
-
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index e6033a897..823c6ff7e 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,12 +28,9 @@
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
-// The simulator emulates the EABI so we define the USE_ARM_EABI macro if we
-// are not running on real ARM hardware. One reason for this is that the
-// old ABI uses fp registers in the calling convention and the simulator does
-// not simulate fp registers or coroutine instructions.
-#if defined(__ARM_EABI__) || !defined(__arm__)
-# define USE_ARM_EABI 1
+// ARM EABI is required.
+#if defined(__arm__) && !defined(__ARM_EABI__)
+#error ARM EABI support is required.
#endif
// This means that interwork-compatible jump instructions are generated. We
@@ -89,6 +86,11 @@
namespace v8 {
namespace internal {
+// Constant pool marker.
+static const int kConstantPoolMarkerMask = 0xffe00000;
+static const int kConstantPoolMarker = 0x0c000000;
+static const int kConstantPoolLengthMask = 0x001ffff;
+
// Number of registers in normal ARM mode.
static const int kNumRegisters = 16;
@@ -341,7 +343,9 @@ enum BlockAddrMode {
da_x = (0|0|0) << 21, // Decrement after.
ia_x = (0|4|0) << 21, // Increment after.
db_x = (8|0|0) << 21, // Decrement before.
- ib_x = (8|4|0) << 21 // Increment before.
+ ib_x = (8|4|0) << 21, // Increment before.
+
+ kBlockAddrModeMask = (8|4|1) << 21
};
@@ -388,9 +392,11 @@ enum VFPConversionMode {
// This mask does not include the "inexact" or "input denormal" cumulative
// exceptions flags, because we usually don't want to check for it.
static const uint32_t kVFPExceptionMask = 0xf;
+static const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
+static const uint32_t kVFPOverflowExceptionBit = 1 << 2;
+static const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
static const uint32_t kVFPInexactExceptionBit = 1 << 4;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
-static const uint32_t kVFPInvalidExceptionBit = 1;
static const uint32_t kVFPNConditionFlagBit = 1 << 31;
static const uint32_t kVFPZConditionFlagBit = 1 << 30;
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index 51c84b335..51cfeb6c8 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -42,10 +42,12 @@ namespace v8 {
namespace internal {
void CPU::Setup() {
- CpuFeatures::Probe(true);
- if (!CpuFeatures::IsSupported(VFP3) || Serializer::enabled()) {
- V8::DisableCrankshaft();
- }
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return CpuFeatures::IsSupported(VFP3);
}
@@ -61,7 +63,7 @@ void CPU::FlushICache(void* start, size_t size) {
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(start, size);
+ Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,
@@ -73,62 +75,33 @@ void CPU::FlushICache(void* start, size_t size) {
register uint32_t end asm("a2") =
reinterpret_cast<uint32_t>(start) + size;
register uint32_t flg asm("a3") = 0;
- #ifdef __ARM_EABI__
- #if defined (__arm__) && !defined(__thumb__)
- // __arm__ may be defined in thumb mode.
- register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
- asm volatile(
- "svc 0x0"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (scno));
- #else
- // r7 is reserved by the EABI in thumb mode.
- asm volatile(
- "@ Enter ARM Mode \n\t"
- "adr r3, 1f \n\t"
- "bx r3 \n\t"
- ".ALIGN 4 \n\t"
- ".ARM \n"
- "1: push {r7} \n\t"
- "mov r7, %4 \n\t"
- "svc 0x0 \n\t"
- "pop {r7} \n\t"
- "@ Enter THUMB Mode\n\t"
- "adr r3, 2f+1 \n\t"
- "bx r3 \n\t"
- ".THUMB \n"
- "2: \n\t"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
- : "r3");
- #endif
+ #if defined (__arm__) && !defined(__thumb__)
+ // __arm__ may be defined in thumb mode.
+ register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
+ asm volatile(
+ "svc 0x0"
+ : "=r" (beg)
+ : "0" (beg), "r" (end), "r" (flg), "r" (scno));
#else
- #if defined (__arm__) && !defined(__thumb__)
- // __arm__ may be defined in thumb mode.
- asm volatile(
- "svc %1"
- : "=r" (beg)
- : "i" (__ARM_NR_cacheflush), "0" (beg), "r" (end), "r" (flg));
- #else
- // Do not use the value of __ARM_NR_cacheflush in the inline assembly
- // below, because the thumb mode value would be used, which would be
- // wrong, since we switch to ARM mode before executing the svc instruction
- asm volatile(
- "@ Enter ARM Mode \n\t"
- "adr r3, 1f \n\t"
- "bx r3 \n\t"
- ".ALIGN 4 \n\t"
- ".ARM \n"
- "1: svc 0x9f0002 \n"
- "@ Enter THUMB Mode\n\t"
- "adr r3, 2f+1 \n\t"
- "bx r3 \n\t"
- ".THUMB \n"
- "2: \n\t"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg)
- : "r3");
- #endif
+ // r7 is reserved by the EABI in thumb mode.
+ asm volatile(
+ "@ Enter ARM Mode \n\t"
+ "adr r3, 1f \n\t"
+ "bx r3 \n\t"
+ ".ALIGN 4 \n\t"
+ ".ARM \n"
+ "1: push {r7} \n\t"
+ "mov r7, %4 \n\t"
+ "svc 0x0 \n\t"
+ "pop {r7} \n\t"
+ "@ Enter THUMB Mode\n\t"
+ "adr r3, 2f+1 \n\t"
+ "bx r3 \n\t"
+ ".THUMB \n"
+ "2: \n\t"
+ : "=r" (beg)
+ : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
+ : "r3");
#endif
#endif
}
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 22640ca1c..07a22722c 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_ARM)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
namespace v8 {
@@ -65,7 +65,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
#endif
- patcher.Emit(Debug::debug_break_return()->entry());
+ patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry());
patcher.masm()->bkpt(0);
}
@@ -115,7 +115,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
#endif
- patcher.Emit(Debug::debug_break_slot()->entry());
+ patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry());
}
@@ -159,7 +159,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
- __ mov(r1, Operand(ExternalReference::debug_break()));
+ __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1);
__ CallStub(&ceb);
@@ -185,7 +185,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
- __ mov(ip, Operand(ExternalReference(Debug_Address::AfterBreakTarget())));
+ ExternalReference after_break_target =
+ ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
+ __ mov(ip, Operand(after_break_target));
__ ldr(ip, MemOperand(ip));
__ Jump(ip);
}
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 339841875..e0e84ab33 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -51,6 +51,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
@@ -74,8 +75,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
// Check that we did not shoot past next safepoint.
- // TODO(srdjan): How do we guarantee that safepoint code does not
- // overlap other safepoint patching code?
CHECK(pc_offset >= last_pc_offset);
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
@@ -112,8 +111,9 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- node->set_next(deoptimizing_code_list_);
- deoptimizing_code_list_ = node;
+ DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+ node->set_next(data->deoptimizing_code_list_);
+ data->deoptimizing_code_list_ = node;
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -122,6 +122,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+#ifdef DEBUG
+ if (FLAG_print_code) {
+ code->PrintLn();
+ }
+#endif
}
}
@@ -283,14 +288,33 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
- for (int i = 0; ok && i < 4; i++) {
+ for (int i = StandardFrameConstants::kCallerPCOffset;
+ ok && i >= StandardFrameConstants::kMarkerOffset;
+ i -= kPointerSize) {
uint32_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part)\n",
+ const char* name = "UNKNOWN";
+ switch (i) {
+ case StandardFrameConstants::kCallerPCOffset:
+ name = "caller's pc";
+ break;
+ case StandardFrameConstants::kCallerFPOffset:
+ name = "fp";
+ break;
+ case StandardFrameConstants::kContextOffset:
+ name = "context";
+ break;
+ case StandardFrameConstants::kMarkerOffset:
+ name = "function";
+ break;
+ }
+ PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
output_offset,
input_value,
- input_offset);
+ input_offset,
+ name);
}
+
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
output_offset -= kPointerSize;
@@ -316,7 +340,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
- Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+ Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
@@ -490,11 +514,13 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state));
+
// Set the continuation for the topmost frame.
if (is_topmost) {
+ Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
- ? Builtins::builtin(Builtins::NotifyDeoptimized)
- : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+ ? builtins->builtin(Builtins::kNotifyDeoptimized)
+ : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
}
@@ -510,6 +536,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// easily ported.
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
+
+ Isolate* isolate = masm()->isolate();
+
CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -520,13 +549,21 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize =
kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
- // Save all general purpose registers before messing with them.
- __ sub(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
- DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ vstr(vfp_reg, sp, offset);
+ // Save all VFP registers before messing with them.
+ DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
+ DwVfpRegister last =
+ DwVfpRegister::FromAllocationIndex(
+ DwVfpRegister::kNumAllocatableRegisters - 1);
+ ASSERT(last.code() > first.code());
+ ASSERT((last.code() - first.code()) ==
+ (DwVfpRegister::kNumAllocatableRegisters - 1));
+#ifdef DEBUG
+ for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
+ ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
+ (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
}
+#endif
+ __ vstm(db_w, sp, first, last);
// Push all 16 registers (needed to populate FrameDescription::registers_).
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
@@ -557,14 +594,16 @@ void Deoptimizer::EntryGenerator::Generate() {
// Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack.
- __ PrepareCallCFunction(5, r5);
+ __ PrepareCallCFunction(6, r5);
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(r1, Operand(type())); // bailout type,
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
+ __ mov(r5, Operand(ExternalReference::isolate_address()));
+ __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
// Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_);
@@ -618,7 +657,8 @@ void Deoptimizer::EntryGenerator::Generate() {
// r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1, r1);
// Call Deoptimizer::ComputeOutputFrames().
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 1);
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
@@ -668,7 +708,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(ip); // remove lr
// Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address();
+ ExternalReference roots_address = ExternalReference::roots_address(isolate);
__ mov(r10, Operand(roots_address));
__ pop(ip); // remove pc
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 08f605b16..d4bd81ce4 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -89,6 +89,9 @@ class Decoder {
// Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(byte* instruction);
+ static bool IsConstantPoolAt(byte* instr_ptr);
+ static int ConstantPoolSizeAt(byte* instr_ptr);
+
private:
// Bottleneck functions to print into the out_buffer.
void PrintChar(const char ch);
@@ -368,25 +371,34 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
ASSERT((format[0] == 'S') || (format[0] == 'D'));
+ VFPRegPrecision precision =
+ format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
+
+ int retval = 2;
+ int reg = -1;
if (format[1] == 'n') {
- int reg = instr->VnValue();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NValue()));
- if (format[0] == 'D') PrintDRegister(reg);
- return 2;
+ reg = instr->VFPNRegValue(precision);
} else if (format[1] == 'm') {
- int reg = instr->VmValue();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MValue()));
- if (format[0] == 'D') PrintDRegister(reg);
- return 2;
+ reg = instr->VFPMRegValue(precision);
} else if (format[1] == 'd') {
- int reg = instr->VdValue();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DValue()));
- if (format[0] == 'D') PrintDRegister(reg);
- return 2;
+ reg = instr->VFPDRegValue(precision);
+ if (format[2] == '+') {
+ int immed8 = instr->Immed8Value();
+ if (format[0] == 'S') reg += immed8 - 1;
+ if (format[0] == 'D') reg += (immed8 / 2 - 1);
+ }
+ if (format[2] == '+') retval = 3;
+ } else {
+ UNREACHABLE();
}
- UNREACHABLE();
- return -1;
+ if (precision == kSinglePrecision) {
+ PrintSRegister(reg);
+ } else {
+ PrintDRegister(reg);
+ }
+
+ return retval;
}
@@ -490,13 +502,16 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
ASSERT(STRING_STARTS_WITH(format, "memop"));
if (instr->HasL()) {
Print("ldr");
- } else if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0)) {
- if (instr->Bits(7, 4) == 0xf) {
- Print("strd");
- } else {
- Print("ldrd");
- }
} else {
+ if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0) &&
+ (instr->Bits(7, 6) == 3) && (instr->Bit(4) == 1)) {
+ if (instr->Bit(5) == 1) {
+ Print("strd");
+ } else {
+ Print("ldrd");
+ }
+ return 5;
+ }
Print("str");
}
return 5;
@@ -899,6 +914,7 @@ void Decoder::DecodeType2(Instruction* instr) {
case da_x: {
if (instr->HasW()) {
Unknown(instr); // not used in V8
+ return;
}
Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
break;
@@ -906,6 +922,7 @@ void Decoder::DecodeType2(Instruction* instr) {
case ia_x: {
if (instr->HasW()) {
Unknown(instr); // not used in V8
+ return;
}
Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
break;
@@ -992,11 +1009,15 @@ void Decoder::DecodeType3(Instruction* instr) {
void Decoder::DecodeType4(Instruction* instr) {
- ASSERT(instr->Bit(22) == 0); // Privileged mode currently not supported.
- if (instr->HasL()) {
- Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
+ if (instr->Bit(22) != 0) {
+ // Privileged mode currently not supported.
+ Unknown(instr);
} else {
- Format(instr, "stm'cond'pu 'rn'w, 'rlist");
+ if (instr->HasL()) {
+ Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
+ } else {
+ Format(instr, "stm'cond'pu 'rn'w, 'rlist");
+ }
}
}
@@ -1042,6 +1063,8 @@ int Decoder::DecodeType7(Instruction* instr) {
// vmov: Rt = Sn
// vcvt: Dd = Sm
// vcvt: Sd = Dm
+// Dd = vabs(Dm)
+// Dd = vneg(Dm)
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
@@ -1066,7 +1089,10 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
}
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
- Format(instr, "vabs'cond 'Dd, 'Dm");
+ Format(instr, "vabs.f64'cond 'Dd, 'Dm");
+ } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
+ // vneg
+ Format(instr, "vneg.f64'cond 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
@@ -1259,9 +1285,22 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
}
break;
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB: {
+ bool to_vfp_register = (instr->VLValue() == 0x1);
+ if (to_vfp_register) {
+ Format(instr, "vldm'cond'pu 'rn'w, {'Sd-'Sd+}");
+ } else {
+ Format(instr, "vstm'cond'pu 'rn'w, {'Sd-'Sd+}");
+ }
+ break;
+ }
default:
Unknown(instr); // Not used by V8.
- break;
}
} else if (instr->CoprocessorValue() == 0xB) {
switch (instr->OpcodeValue()) {
@@ -1289,12 +1328,38 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
}
break;
+ case 0x4:
+ case 0x5:
+ case 0x9: {
+ bool to_vfp_register = (instr->VLValue() == 0x1);
+ if (to_vfp_register) {
+ Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
+ } else {
+ Format(instr, "vstm'cond'pu 'rn'w, {'Dd-'Dd+}");
+ }
+ break;
+ }
default:
Unknown(instr); // Not used by V8.
- break;
}
} else {
- UNIMPLEMENTED(); // Not used by V8.
+ Unknown(instr); // Not used by V8.
+ }
+}
+
+
+bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
+ int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
+ return (instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker;
+}
+
+
+int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
+ if (IsConstantPoolAt(instr_ptr)) {
+ int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
+ return instruction_bits & kConstantPoolLengthMask;
+ } else {
+ return -1;
}
}
@@ -1307,7 +1372,15 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"%08x ",
instr->InstructionBits());
if (instr->ConditionField() == kSpecialCondition) {
- UNIMPLEMENTED();
+ Unknown(instr);
+ return Instruction::kInstrSize;
+ }
+ int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
+ if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "constant pool begin (length %d)",
+ instruction_bits &
+ kConstantPoolLengthMask);
return Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
@@ -1359,9 +1432,8 @@ namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
- static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
- v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
- return tmp_buffer.start();
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
}
@@ -1411,12 +1483,7 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
int Disassembler::ConstantPoolSizeAt(byte* instruction) {
- int instruction_bits = *(reinterpret_cast<int*>(instruction));
- if ((instruction_bits & 0xfff00000) == 0x03000000) {
- return instruction_bits & 0x0000ffff;
- } else {
- return -1;
- }
+ return v8::internal::Decoder::ConstantPoolSizeAt(instruction);
}
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index 4aa8d6aa9..84e108b3d 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -72,6 +72,9 @@ static const RegList kCalleeSaved =
static const int kNumCalleeSaved = 7 + kR9Available;
+// Double registers d8 to d15 are callee-saved.
+static const int kNumDoubleCalleeSaved = 8;
+
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
@@ -136,7 +139,7 @@ class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kSavedRegistersOffset = +2 * kPointerSize;
+ static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 7a4764478..9b771dae2 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
@@ -46,6 +46,12 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+static unsigned GetPropertyId(Property* property) {
+ if (property->is_synthetic()) return AstNode::kNoNumber;
+ return property->id();
+}
+
+
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
@@ -133,6 +139,20 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
#endif
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (info->is_strict_mode() || info->is_native()) {
+ Label ok;
+ __ cmp(r5, Operand(0));
+ __ b(eq, &ok);
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ str(r2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
+
int locals_count = scope()->num_stack_slots();
__ Push(lr, fp, cp, r1);
@@ -162,7 +182,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in both r0 and cp. It replaces the context
@@ -210,13 +230,18 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ ArgumentsAccessStub::Type type;
+ if (is_strict_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
__ CallStub(&stub);
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(r3, r0);
+
Move(arguments->AsSlot(), r0, r1, r2);
- Slot* dot_arguments_slot = scope()->arguments_shadow()->AsSlot();
- Move(dot_arguments_slot, r3, r1, r2);
}
if (FLAG_trace) {
@@ -240,7 +265,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
+ PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
@@ -358,7 +383,7 @@ void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
// For simplicity we always test the accumulator register.
codegen()->Move(result_register(), slot);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -392,7 +417,7 @@ void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
if (true_label_ != fall_through_) __ b(true_label_);
} else {
__ LoadRoot(result_register(), index);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -426,8 +451,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
if (true_label_ != fall_through_) __ b(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ b(false_label_);
- __ b(false_label_);
+ if (false_label_ != fall_through_) __ b(false_label_);
} else {
if (true_label_ != fall_through_) __ b(true_label_);
}
@@ -440,7 +464,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
} else {
// For simplicity we always test the accumulator register.
__ mov(result_register(), Operand(lit));
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -476,7 +500,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
__ Drop(count);
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -554,27 +578,11 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const {
}
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
Label* if_false,
Label* fall_through) {
if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Emit the inlined tests assumed by the stub.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(result_register(), ip);
- __ b(eq, if_false);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(result_register(), ip);
- __ b(eq, if_true);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(result_register(), ip);
- __ b(eq, if_false);
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(result_register(), result_register());
- __ b(eq, if_false);
- __ JumpIfSmi(result_register(), if_true);
-
- // Call the ToBoolean stub for all other cases.
ToBooleanStub stub(result_register());
__ CallStub(&stub);
__ tst(result_register(), result_register());
@@ -586,8 +594,6 @@ void FullCodeGenerator::DoTest(Label* if_true,
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(r0, ip);
}
-
- // The stub returns nonzero for true.
Split(ne, if_true, if_false, fall_through);
}
@@ -707,10 +713,12 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check that we're not inside a 'with'.
- __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ cmp(r1, cp);
- __ Check(eq, "Unexpected declaration in current context.");
+ // Check that we're not inside a with or catch context.
+ __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
+ __ Check(ne, "Declaration in with context.");
+ __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, "Declaration in catch context.");
}
if (mode == Variable::CONST) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -755,31 +763,30 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
}
} else if (prop != NULL) {
- if (function != NULL || mode == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value. We
- // cannot visit the rewrite because it's shared and we risk
- // recording duplicate AST IDs for bailouts from optimized code.
+ // A const declaration aliasing a parameter is an illegal redeclaration.
+ ASSERT(mode != Variable::CONST);
+ if (function != NULL) {
+ // We are declaring a function that rewrites to a property.
+ // Use (keyed) IC to set the initial value. We cannot visit the
+ // rewrite because it's shared and we risk recording duplicate AST
+ // IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
- if (function != NULL) {
- __ push(r0);
- VisitForAccumulatorValue(function);
- __ pop(r2);
- } else {
- __ mov(r2, r0);
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- }
+
+ __ push(r0);
+ VisitForAccumulatorValue(function);
+ __ pop(r2);
+
ASSERT(prop->key()->AsLiteral() != NULL &&
prop->key()->AsLiteral()->handle()->IsSmi());
__ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(is_strict()
- ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
// Value in r0 is ignored (declarations are statements).
}
}
@@ -819,7 +826,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
- clause->body_target()->entry_label()->Unuse();
+ clause->body_target()->Unuse();
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
@@ -846,18 +853,19 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ cmp(r1, r0);
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target()->entry_label());
+ __ b(clause->body_target());
__ bind(&slow_case);
}
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site);
+ EmitCallIC(ic, &patch_site, clause->CompareId());
+
__ cmp(r0, Operand(0));
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target()->entry_label());
+ __ b(clause->body_target());
}
// Discard the test value and jump to the default if present, otherwise to
@@ -867,14 +875,15 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (default_clause == NULL) {
__ b(nested_statement.break_target());
} else {
- __ b(default_clause->body_target()->entry_label());
+ __ b(default_clause->body_target());
}
// Compile all the case bodies.
for (int i = 0; i < clauses->length(); i++) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target()->entry_label());
+ __ bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
VisitStatements(clause->statements());
}
@@ -906,11 +915,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
- __ b(hs, &done_convert);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &done_convert);
__ bind(&convert);
__ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
__ push(r0);
@@ -938,9 +947,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// check for an enum cache. Leave the map in r2 for the subsequent
// prototype load.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOffset));
- __ cmp(r3, empty_descriptor_array_value);
- __ b(eq, &call_runtime);
+ __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
+ __ JumpIfSmi(r3, &call_runtime);
// Check that there is an enum cache in the non-empty instance
// descriptors (r3). This is the case if the next enumeration
@@ -985,7 +993,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a map in register r0. Get the enumeration cache from it.
__ bind(&use_cache);
- __ ldr(r1, FieldMemOperand(r0, Map::kInstanceDescriptorsOffset));
+ __ LoadInstanceDescriptors(r0, r1);
__ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
__ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -1034,7 +1042,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// just skip it.
__ push(r1); // Enumerable.
__ push(r3); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS);
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ mov(r3, Operand(r0), SetCC);
__ b(eq, loop_statement.continue_target());
@@ -1080,10 +1088,10 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
// doesn't just get a copy of the existing unoptimized code.
if (!FLAG_always_opt &&
!FLAG_prepare_always_opt &&
+ !pretenure &&
scope()->is_function_scope() &&
- info->num_literals() == 0 &&
- !pretenure) {
- FastNewClosureStub stub;
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
__ mov(r0, Operand(info));
__ push(r0);
__ CallStub(&stub);
@@ -1104,6 +1112,65 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
+void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
+ Slot* slot,
+ TypeofState typeof_state,
+ Label* slow) {
+ Register current = cp;
+ Register next = r1;
+ Register temp = r2;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ tst(temp, temp);
+ __ b(ne, slow);
+ }
+ // Load next context in chain.
+ __ ldr(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label loop, fast;
+ if (!current.is(next)) {
+ __ Move(next, current);
+ }
+ __ bind(&loop);
+ // Terminate at global context.
+ __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ __ cmp(temp, ip);
+ __ b(eq, &fast);
+ // Check that extension is NULL.
+ __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX));
+ __ tst(temp, temp);
+ __ b(ne, slow);
+ // Load next context in chain.
+ __ ldr(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ b(&loop);
+ __ bind(&fast);
+ }
+
+ __ ldr(r0, GlobalObjectOperand());
+ __ mov(r2, Operand(slot->var()->name()));
+ RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ EmitCallIC(ic, mode, AstNode::kNoNumber);
+}
+
+
MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
Slot* slot,
Label* slow) {
@@ -1120,8 +1187,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
__ tst(temp, temp);
__ b(ne, slow);
}
- __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX));
- __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
+ __ ldr(next, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
@@ -1180,8 +1246,9 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
slow));
__ mov(r0, Operand(key_literal->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic =
+ isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@@ -1190,85 +1257,23 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
}
-void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- Label* slow) {
- Register current = cp;
- Register next = r1;
- Register temp = r2;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- }
- // Load next context in chain.
- __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
- __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- if (!current.is(next)) {
- __ Move(next, current);
- }
- __ bind(&loop);
- // Terminate at global context.
- __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
- __ cmp(temp, ip);
- __ b(eq, &fast);
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- // Load next context in chain.
- __ ldr(next, ContextOperand(next, Context::CLOSURE_INDEX));
- __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
- __ b(&loop);
- __ bind(&fast);
- }
-
- __ ldr(r0, GlobalObjectOperand());
- __ mov(r2, Operand(slot->var()->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, mode);
-}
-
-
void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
- if (var->is_global() && !var->is_this()) {
+ if (slot == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object (receiver) in r0.
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
context()->Plug(r0);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ } else if (slot->type() == Slot::LOOKUP) {
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -1284,7 +1289,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
context()->Plug(r0);
- } else if (slot != NULL) {
+ } else {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
@@ -1300,32 +1305,6 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
} else {
context()->Plug(slot);
}
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- Move(r1, object_slot);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ mov(r0, Operand(key_literal->handle()));
-
- // Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- context()->Plug(r0);
}
}
@@ -1387,7 +1366,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->constant_properties()));
- __ mov(r0, Operand(Smi::FromInt(expr->fast_elements() ? 1 : 0)));
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ mov(r0, Operand(Smi::FromInt(flags)));
__ Push(r3, r2, r1, r0);
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
@@ -1426,8 +1411,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->handle()));
__ ldr(r1, MemOperand(sp));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1465,6 +1452,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ ldr(r0, MemOperand(sp));
+ __ push(r0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1484,11 +1478,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->constant_elements()));
__ Push(r3, r2, r1);
- if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ if (expr->constant_elements()->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
__ CallStub(&stub);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2);
+ __ IncrementCounter(
+ isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
} else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -1549,7 +1545,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
@@ -1575,52 +1571,37 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
- __ push(r0);
- __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
} else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ ldr(r1, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
- __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
- __ Push(r1, r0);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
if (expr->is_compound()) {
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
}
}
- // For property compound assignments we need another deoptimization
- // point after the property load.
- if (property != NULL) {
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- }
-
Token::Value op = expr->binary_op();
__ push(r0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
@@ -1631,13 +1612,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr,
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
mode,
expr->target(),
expr->value());
} else {
- EmitBinaryOp(op, mode);
+ EmitBinaryOp(expr->binary_operation(), op, mode);
}
// Deoptimization point in case the binary operation may have side effects.
@@ -1672,20 +1653,20 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode,
Expression* left_expr,
@@ -1707,14 +1688,14 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
+ BinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), &patch_site, expr->id());
__ jmp(&done);
__ bind(&smi_case);
// Smi case. This code works the same way as the smi-smi case in the type
// recording binary operation stub, see
- // TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments.
+ // BinaryOpStub::GenerateSmiSmiOperation for comments.
switch (op) {
case Token::SAR:
__ b(&stub_call);
@@ -1784,11 +1765,12 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
OverwriteMode mode) {
__ pop(r1);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), NULL);
+ BinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), NULL, expr->id());
context()->Plug(r0);
}
@@ -1802,7 +1784,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
@@ -1825,33 +1807,23 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
break;
}
case KEYED_PROPERTY: {
__ push(r0); // Preserve value.
- if (prop->is_synthetic()) {
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- __ mov(r2, r0);
- __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(r1, r0);
- __ pop(r2);
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(r1, r0);
+ __ pop(r2);
__ pop(r0); // Restore value.
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
break;
}
}
@@ -1862,8 +1834,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->AsSlot() != NULL);
@@ -1874,10 +1844,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// r2, and the global object in r1.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1899,18 +1869,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ b(ne, &skip);
__ str(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
- case Slot::CONTEXT: {
- __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ ldr(r2, ContextOperand(r1, slot->index()));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
- __ b(ne, &skip);
- __ str(r0, ContextOperand(r1, slot->index()));
- int offset = Context::SlotOffset(slot->index());
- __ mov(r3, r0); // Preserve the stored value in r0.
- __ RecordWrite(r1, Operand(offset), r3, r2);
- break;
- }
+ case Slot::CONTEXT:
case Slot::LOOKUP:
__ push(r0);
__ mov(r0, Operand(slot->var()->name()));
@@ -1983,10 +1942,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(r1);
}
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2029,10 +1988,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(r2);
}
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2082,8 +2041,9 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
SetSourcePosition(expr->position());
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, mode);
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+ EmitCallIC(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2092,8 +2052,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key,
- RelocInfo::Mode mode) {
+ Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
@@ -2115,9 +2074,10 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
SetSourcePosition(expr->position());
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, mode);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2125,7 +2085,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2137,7 +2097,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+ CallFunctionStub stub(arg_count, in_loop, flags);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2233,7 +2193,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+ CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_IMPLICIT);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2276,14 +2236,17 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ bind(&done);
// Push function.
__ push(r0);
- // Push global receiver.
- __ ldr(r1, GlobalObjectOperand());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
__ push(r1);
__ bind(&call);
}
- EmitCallWithStub(expr);
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot. That object could be the hole if the
+ // receiver is implicitly the global object.
+ EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
} else if (fun->AsProperty() != NULL) {
// Call to an object property.
Property* prop = fun->AsProperty();
@@ -2297,7 +2260,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed CallIC.
+ // for a regular property use keyed EmitCallIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@@ -2314,30 +2277,20 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source code position for IC call.
SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ ldr(r1, GlobalObjectOperand());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ Push(r0, r1); // Function, receiver.
- EmitCallWithStub(expr);
+ EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
} else {
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(prop->obj());
}
- EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
+ EmitKeyedCallWithIC(expr, prop->key());
}
}
} else {
- // Call to some other expression. If the expression is an anonymous
- // function literal not called in a loop, mark it as one that should
- // also use the fast code generator.
- FunctionLiteral* lit = fun->AsFunctionLiteral();
- if (lit != NULL &&
- lit->name()->Equals(Heap::empty_string()) &&
- loop_depth() == 0) {
- lit->set_try_full_codegen(true);
- }
-
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(fun);
}
@@ -2346,7 +2299,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ push(r1);
// Emit function call.
- EmitCallWithStub(expr);
+ EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
}
#ifdef DEBUG
@@ -2382,7 +2335,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(r0, Operand(arg_count));
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> construct_builtin =
+ isolate()->builtins()->JSConstructCall();
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
context()->Plug(r0);
}
@@ -2450,9 +2404,9 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ tst(r1, Operand(1 << Map::kIsUndetectable));
__ b(ne, if_false);
__ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ b(lt, if_false);
- __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(le, if_true, if_false, fall_through);
@@ -2473,7 +2427,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@@ -2518,11 +2472,74 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
- // used in a few functions in runtime.js which should not normally be hit by
- // this compiler.
+ if (FLAG_debug_code) __ AbortIfSmi(r0);
+
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
+ __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ b(ne, if_true);
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r2, ip);
+ __ b(eq, if_false);
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ LoadInstanceDescriptors(r1, r4);
+ __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ // r4: descriptor array
+ // r3: length of descriptor array
+ // Calculate the end of the descriptor array.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kPointerSize == 4);
+ __ add(r2, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+ // Calculate location of the first key name.
+ __ add(r4,
+ r4,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ // The use of ip to store the valueOf symbol asumes that it is not otherwise
+ // used in the loop below.
+ __ mov(ip, Operand(FACTORY->value_of_symbol()));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ ldr(r3, MemOperand(r4, 0));
+ __ cmp(r3, ip);
+ __ b(eq, if_false);
+ __ add(r4, r4, Operand(kPointerSize));
+ __ bind(&entry);
+ __ cmp(r4, Operand(r2));
+ __ b(ne, &loop);
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
+ __ JumpIfSmi(r2, if_false);
+ __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+ __ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ cmp(r2, r3);
+ __ b(ne, if_false);
+
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+ __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+ __ jmp(if_true);
+
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ jmp(if_false);
context()->Plug(if_true, if_false);
}
@@ -2692,16 +2709,18 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE); // Map is now in r0.
+ __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
+ // Map is now in r0.
__ b(lt, &null);
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ cmp(r1, Operand(JS_FUNCTION_TYPE));
- __ b(eq, &function);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+ __ b(ge, &function);
// Check if the constructor in the map is a function.
__ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
@@ -2778,8 +2797,9 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(VFP3)) {
- __ PrepareCallCFunction(0, r1);
- __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+ __ PrepareCallCFunction(1, r0);
+ __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatures::Scope scope(VFP3);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
@@ -2797,10 +2817,11 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ vstr(d7, r0, HeapNumber::kValueOffset);
__ mov(r0, r4);
} else {
+ __ PrepareCallCFunction(2, r0);
__ mov(r0, Operand(r4));
- __ PrepareCallCFunction(1, r1);
+ __ mov(r1, Operand(ExternalReference::isolate_address()));
__ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(), 1);
+ ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
context()->Plug(r0);
@@ -2855,7 +2876,8 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kMath_pow, 2);
+ MathPowStub stub;
+ __ CallStub(&stub);
context()->Plug(r0);
}
@@ -3038,7 +3060,8 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
+ TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3048,7 +3071,8 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS);
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
+ TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3058,7 +3082,8 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG);
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -3078,17 +3103,17 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() >= 2);
- int arg_count = args->length() - 2; // For receiver and function.
- VisitForStackValue(args->at(0)); // Receiver.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i + 1));
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
}
- VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
+ VisitForAccumulatorValue(args->last()); // Function.
- // InvokeFunction requires function in r1. Move it in there.
- if (!result_register().is(r1)) __ mov(r1, result_register());
+ // InvokeFunction requires the function in r1. Move it in there.
+ __ mov(r1, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION);
+ __ InvokeFunction(r1, count, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(r0);
}
@@ -3110,7 +3135,79 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
+ Label done;
+ Label slow_case;
+ Register object = r0;
+ Register index1 = r1;
+ Register index2 = r2;
+ Register elements = r3;
+ Register scratch1 = r4;
+ Register scratch2 = r5;
+
+ __ ldr(object, MemOperand(sp, 2 * kPointerSize));
+ // Fetch the map and check if array is in fast case.
+ // Check that object doesn't require security checks and
+ // has no indexed interceptor.
+ __ CompareObjectType(object, scratch1, scratch2, JS_ARRAY_TYPE);
+ __ b(ne, &slow_case);
+ // Map is now in scratch1.
+
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
+ __ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ b(ne, &slow_case);
+
+ // Check the object's elements are in fast case and writable.
+ __ ldr(elements, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(scratch1, ip);
+ __ b(ne, &slow_case);
+
+ // Check that both indices are smis.
+ __ ldr(index1, MemOperand(sp, 1 * kPointerSize));
+ __ ldr(index2, MemOperand(sp, 0));
+ __ JumpIfNotBothSmi(index1, index2, &slow_case);
+
+ // Check that both indices are valid.
+ __ ldr(scratch1, FieldMemOperand(object, JSArray::kLengthOffset));
+ __ cmp(scratch1, index1);
+ __ cmp(scratch1, index2, hi);
+ __ b(ls, &slow_case);
+
+ // Bring the address of the elements into index1 and index2.
+ __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(index1,
+ scratch1,
+ Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(index2,
+ scratch1,
+ Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+ // Swap elements.
+ __ ldr(scratch1, MemOperand(index1, 0));
+ __ ldr(scratch2, MemOperand(index2, 0));
+ __ str(scratch1, MemOperand(index2, 0));
+ __ str(scratch2, MemOperand(index1, 0));
+
+ Label new_space;
+ __ InNewSpace(elements, scratch1, eq, &new_space);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask.)
+
+ __ mov(scratch1, elements);
+ __ RecordWriteHelper(elements, index1, scratch2);
+ __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
+
+ __ bind(&new_space);
+ // We are done. Drop elements from the stack, and return undefined.
+ __ Drop(3);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&slow_case);
__ CallRuntime(Runtime::kSwapElements, 3);
+
+ __ bind(&done);
context()->Plug(r0);
}
@@ -3122,7 +3219,7 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
+ isolate()->global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -3183,8 +3280,7 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ b(eq, &ok);
// Fail if either is a non-HeapObject.
__ and_(tmp, left, Operand(right));
- __ tst(tmp, Operand(kSmiTagMask));
- __ b(eq, &fail);
+ __ JumpIfSmi(tmp, &fail);
__ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
__ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
__ cmp(tmp2, Operand(JS_REGEXP_TYPE));
@@ -3274,9 +3370,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ b(ne, &bailout);
// Check that the array has fast elements.
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ tst(scratch2, Operand(1 << Map::kHasFastElements));
- __ b(eq, &bailout);
+ __ CheckFastElements(scratch1, scratch2, &bailout);
// If the array has length zero, return the empty string.
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
@@ -3501,8 +3595,12 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Call the JS runtime function.
__ mov(r2, Operand(expr->name()));
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, NOT_IN_LOOP);
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP,
+ mode);
+ EmitCallIC(ic, mode, expr->id());
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -3530,7 +3628,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(prop->key());
__ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
__ push(r1);
- __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
}
} else if (var != NULL) {
@@ -3542,7 +3640,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ mov(r1, Operand(var->name()));
__ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
__ Push(r2, r1, r0);
- __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
} else if (var->AsSlot() != NULL &&
var->AsSlot()->type() != Slot::LOOKUP) {
@@ -3610,8 +3708,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmt(masm_, "[ UnaryOperation (ADD)");
VisitForAccumulatorValue(expr->expression());
Label no_conversion;
- __ tst(result_register(), Operand(kSmiTagMask));
- __ b(eq, &no_conversion);
+ __ JumpIfSmi(result_register(), &no_conversion);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -3619,48 +3716,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
- case Token::SUB: {
- Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register r0.
- VisitForAccumulatorValue(expr->expression());
- __ CallStub(&stub);
- context()->Plug(r0);
+ case Token::SUB:
+ EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
break;
- }
- case Token::BIT_NOT: {
- Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- // The generic unary operation stub expects the argument to be
- // in the accumulator register r0.
- VisitForAccumulatorValue(expr->expression());
- Label done;
- bool inline_smi_code = ShouldInlineSmiCase(expr->op());
- if (inline_smi_code) {
- Label call_stub;
- __ JumpIfNotSmi(r0, &call_stub);
- __ mvn(r0, Operand(r0));
- // Bit-clear inverted smi-tag.
- __ bic(r0, r0, Operand(kSmiTagMask));
- __ b(&done);
- __ bind(&call_stub);
- }
- bool overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOpFlags flags = inline_smi_code
- ? NO_UNARY_SMI_CODE_IN_STUB
- : NO_UNARY_FLAGS;
- UnaryOverwriteMode mode =
- overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
- __ CallStub(&stub);
- __ bind(&done);
- context()->Plug(r0);
+ case Token::BIT_NOT:
+ EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
break;
- }
default:
UNREACHABLE();
@@ -3668,6 +3730,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+ const char* comment) {
+ // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+ Comment cmt(masm_, comment);
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode overwrite =
+ can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ UnaryOpStub stub(expr->op(), overwrite);
+ // UnaryOpStub expects the argument to be in the
+ // accumulator register r0.
+ VisitForAccumulatorValue(expr->expression());
+ SetSourcePosition(expr->position());
+ EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ context()->Plug(r0);
+}
+
+
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
@@ -3680,7 +3759,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
@@ -3708,15 +3787,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ push(r0);
EmitNamedPropertyLoad(prop);
} else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
- __ push(r0);
- __ mov(r0, Operand(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
EmitKeyedPropertyLoad(prop);
@@ -3725,7 +3797,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
- PrepareForBailout(expr->increment(), TOS_REG);
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ }
// Call ToNumber only if operand is not a smi.
Label no_conversion;
@@ -3776,8 +3852,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
- TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- EmitCallIC(stub.GetCode(), &patch_site);
+ BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+ EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
__ bind(&done);
// Store the value returned in r0.
@@ -3805,10 +3881,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
__ pop(r1);
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3822,10 +3898,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(r1); // Key.
__ pop(r2); // Receiver.
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3848,10 +3924,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Comment cmnt(masm_, "Global variable");
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(proxy->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL &&
@@ -3874,104 +3950,83 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(r0);
} else {
// This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
+ VisitInCurrentContext(expr);
}
}
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
+ VisitForTypeofValue(expr);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- if (check->Equals(Heap::number_symbol())) {
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, if_true);
+ if (check->Equals(isolate()->heap()->number_symbol())) {
+ __ JumpIfSmi(r0, if_true);
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r0, ip);
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::string_symbol())) {
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, if_false);
+ } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ __ JumpIfSmi(r0, if_false);
// Check for undetectable objects => false.
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
+ __ b(ge, if_false);
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
- __ cmp(r1, Operand(1 << Map::kIsUndetectable));
- __ b(eq, if_false);
- __ ldrb(r1, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
- Split(lt, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::boolean_symbol())) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
+ __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ __ CompareRoot(r0, Heap::kTrueValueRootIndex);
__ b(eq, if_true);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
+ __ CompareRoot(r0, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::undefined_symbol())) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
+ } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(eq, if_true);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, if_false);
+ __ JumpIfSmi(r0, if_false);
// Check for undetectable objects => true.
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
- __ cmp(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::function_symbol())) {
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, if_false);
- __ CompareObjectType(r0, r1, r0, JS_FUNCTION_TYPE);
- __ b(eq, if_true);
- // Regular expressions => 'function' (they are callable).
- __ CompareInstanceType(r1, r0, JS_REGEXP_TYPE);
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::object_symbol())) {
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, if_false);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
+ __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ Split(ne, if_true, if_false, fall_through);
+
+ } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ __ JumpIfSmi(r0, if_false);
+ __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ Split(ge, if_true, if_false, fall_through);
+
+ } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ __ JumpIfSmi(r0, if_false);
+ __ CompareRoot(r0, Heap::kNullValueRootIndex);
__ b(eq, if_true);
- // Regular expressions => 'function', not 'object'.
- __ CompareObjectType(r0, r1, r0, JS_REGEXP_TYPE);
- __ b(eq, if_false);
- // Check for undetectable objects => false.
- __ ldrb(r0, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ cmp(r0, Operand(1 << Map::kIsUndetectable));
- __ b(eq, if_false);
// Check for JS objects => true.
- __ ldrb(r0, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ cmp(r0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ b(lt, if_false);
- __ cmp(r0, Operand(LAST_JS_OBJECT_TYPE));
- Split(le, if_true, if_false, fall_through);
+ __ CompareInstanceType(r0, r1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ b(gt, if_false);
+ // Check for undetectable objects => false.
+ __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
+ __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ Split(eq, if_true, if_false, fall_through);
} else {
if (if_false != fall_through) __ jmp(if_false);
}
+}
- return true;
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
@@ -3991,19 +4046,17 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
+ Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_JS);
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
@@ -4073,7 +4126,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site);
+ EmitCallIC(ic, &patch_site, expr->id());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
@@ -4106,8 +4159,7 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r1);
__ b(eq, if_true);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, if_false);
+ __ JumpIfSmi(r0, if_false);
// It can be an undetectable object.
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
@@ -4135,47 +4187,62 @@ Register FullCodeGenerator::context_register() {
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+ RelocInfo::Mode mode,
+ unsigned ast_id) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
+ Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1, r1, r2);
+ __ IncrementCounter(counters->named_load_full(), 1, r1, r2);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2);
+ __ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1, r1, r2);
+ __ IncrementCounter(counters->named_store_full(), 1, r1, r2);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2);
+ __ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
default:
break;
}
-
- __ Call(ic, mode);
+ if (ast_id == kNoASTId || mode == RelocInfo::CODE_TARGET_CONTEXT) {
+ __ Call(ic, mode);
+ } else {
+ ASSERT(mode == RelocInfo::CODE_TARGET);
+ mode = RelocInfo::CODE_TARGET_WITH_ID;
+ __ CallWithAstId(ic, mode, ast_id);
+ }
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+ JumpPatchSite* patch_site,
+ unsigned ast_id) {
+ Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1, r1, r2);
+ __ IncrementCounter(counters->named_load_full(), 1, r1, r2);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2);
+ __ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1, r1, r2);
+ __ IncrementCounter(counters->named_store_full(), 1, r1, r2);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2);
+ __ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
default:
break;
}
- __ Call(ic, RelocInfo::CODE_TARGET);
+ if (ast_id == kNoASTId) {
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ } else {
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET_WITH_ID, ast_id);
+ }
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
@@ -4195,6 +4262,26 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
}
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ if (scope()->is_global_scope()) {
+ // Contexts nested in the global context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ mov(ip, Operand(Smi::FromInt(0)));
+ } else if (scope()->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(scope()->is_function_scope());
+ __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ __ push(ip);
+}
+
+
// ----------------------------------------------------------------------------
// Non-local control flow support.
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 0fc681870..676baeb35 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,7 +31,7 @@
#include "assembler-arm.h"
#include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "disasm.h"
#include "ic-inl.h"
#include "runtime.h"
@@ -79,15 +79,14 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
// elements map.
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
- __ CompareObjectType(receiver, t0, t1, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, miss);
// If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
GenerateGlobalInstanceTypeCheck(masm, t1, miss);
@@ -105,65 +104,6 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
}
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
-
- // Compute the capacity mask.
- const int kCapacityOffset = StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
- __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
- __ sub(scratch1, scratch1, Operand(1));
-
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
- __ add(scratch2, scratch2, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
- }
- __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
-
- // Scale the index by multiplying by the element size.
- ASSERT(StringDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
- __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
- // Check if the key is identical to the name.
- __ add(scratch2, elements, Operand(scratch2, LSL, 2));
- __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
- __ cmp(name, Operand(ip));
- if (i != kProbes - 1) {
- __ b(eq, done);
- } else {
- __ b(ne, miss);
- }
- }
-}
-
-
// Helper function used from LoadIC/CallIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -191,13 +131,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
// If probing finds an entry check that the value is a normal
// property.
@@ -240,13 +180,13 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
// If probing finds an entry in the dictionary check that the value
// is a normal property that is not read only.
@@ -538,7 +478,8 @@ Object* CallIC_Miss(Arguments args);
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
- Code::Kind kind) {
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r1 : receiver
// -- r2 : name
@@ -549,10 +490,11 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
MONOMORPHIC,
- Code::kNoExtraICState,
+ extra_ic_state,
NORMAL,
argc);
- StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, r1, r2, r3, r4, r5);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -560,8 +502,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// to probe.
//
// Check for number.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &number);
+ __ JumpIfSmi(r1, &number);
__ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
__ b(ne, &non_number);
__ bind(&number);
@@ -591,7 +532,8 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, r1, r2, r3, r4, r5);
__ bind(&miss);
}
@@ -604,8 +546,7 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
// r1: function
// Check that the value isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(r1, miss);
// Check that the value is a JSFunction.
__ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
@@ -613,7 +554,8 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
// Invoke the function.
ParameterCount actual(argc);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
@@ -639,16 +581,20 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
}
-static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
+static void GenerateCallMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
+ Isolate* isolate = masm->isolate();
if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(&Counters::call_miss, 1, r3, r4);
+ __ IncrementCounter(isolate->counters()->call_miss(), 1, r3, r4);
} else {
- __ IncrementCounter(&Counters::keyed_call_miss, 1, r3, r4);
+ __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, r3, r4);
}
// Get the receiver of the function from the stack.
@@ -661,7 +607,7 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// Call the entry.
__ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(id))));
+ __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
CEntryStub stub(1);
__ CallStub(&stub);
@@ -675,8 +621,7 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
if (id == IC::kCallIC_Miss) {
Label invoke, global;
__ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &invoke);
+ __ JumpIfSmi(r2, &invoke);
__ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
__ b(eq, &global);
__ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
@@ -690,22 +635,33 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
}
// Invoke the function.
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
ParameterCount actual(argc);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+ __ InvokeFunction(r1,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ call_kind);
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+ GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
}
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -713,8 +669,8 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
- GenerateMiss(masm, argc);
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+ GenerateMiss(masm, argc, extra_ic_state);
}
@@ -725,7 +681,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -----------------------------------
GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc);
+ GenerateMiss(masm, argc, Code::kNoExtraICState);
}
@@ -735,7 +691,7 @@ void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// -- lr : return address
// -----------------------------------
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
+ GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
}
@@ -763,7 +719,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateFastArrayLoad(
masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1, r0, r3);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, r0, r3);
__ bind(&do_call);
// receiver in r1 is not used after this point.
@@ -782,13 +739,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ mov(r0, Operand(r2, ASR, kSmiTagSize));
// r0: untagged index
GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1, r0, r3);
+ __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
__ jmp(&do_call);
__ bind(&slow_load);
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
- __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1, r0, r3);
+ __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
__ EnterInternalFrame();
__ push(r2); // save the key
__ Push(r1, r2); // pass the receiver and the key
@@ -815,12 +772,15 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ b(ne, &lookup_monomorphic_cache);
GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1, r0, r3);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, r0, r3);
__ jmp(&do_call);
__ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1, r0, r3);
- GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, r0, r3);
+ GenerateMonomorphicCacheProbe(masm,
+ argc,
+ Code::KEYED_CALL_IC,
+ Code::kNoExtraICState);
// Fall through on miss.
__ bind(&slow_call);
@@ -830,7 +790,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
- __ IncrementCounter(&Counters::keyed_call_generic_slow, 1, r0, r3);
+ __ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3);
GenerateMiss(masm, argc);
__ bind(&index_string);
@@ -848,8 +808,7 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Check if the name is a string.
Label miss;
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r2, &miss);
__ IsObjectJSStringType(r2, r0, &miss);
GenerateCallNormal(masm, argc);
@@ -873,7 +832,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, r0, r2, r3, r4, r5);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, r0, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -908,244 +868,202 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
+ Isolate* isolate = masm->isolate();
- __ IncrementCounter(&Counters::load_miss, 1, r3, r4);
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
__ mov(r3, r0);
__ Push(r3, r2);
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
__ TailCallExternalReference(ref, 2, 1);
}
-// Returns the code marker, or the 0 if the code is not marked.
-static inline int InlinedICSiteMarker(Address address,
- Address* inline_end_address) {
- if (V8::UseCrankshaft()) return false;
-
- // If the instruction after the call site is not the pseudo instruction nop1
- // then this is not related to an inlined in-object property load. The nop1
- // instruction is located just after the call to the IC in the deferred code
- // handling the miss in the inlined code. After the nop1 instruction there is
- // a branch instruction for jumping back from the deferred code.
- Address address_after_call = address + Assembler::kCallTargetAddressOffset;
- Instr instr_after_call = Assembler::instr_at(address_after_call);
- int code_marker = MacroAssembler::GetCodeMarker(instr_after_call);
-
- // A negative result means the code is not marked.
- if (code_marker <= 0) return 0;
-
- Address address_after_nop = address_after_call + Assembler::kInstrSize;
- Instr instr_after_nop = Assembler::instr_at(address_after_nop);
- // There may be some reg-reg move and frame merging code to skip over before
- // the branch back from the DeferredReferenceGetKeyedValue code to the inlined
- // code.
- while (!Assembler::IsBranch(instr_after_nop)) {
- address_after_nop += Assembler::kInstrSize;
- instr_after_nop = Assembler::instr_at(address_after_nop);
- }
-
- // Find the end of the inlined code for handling the load.
- int b_offset =
- Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
- ASSERT(b_offset < 0); // Jumping back from deferred code.
- *inline_end_address = address_after_nop + b_offset;
-
- return code_marker;
-}
-
-
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // Find the end of the inlined code for handling the load if this is an
- // inlined IC call site.
- Address inline_end_address;
- if (InlinedICSiteMarker(address, &inline_end_address)
- != Assembler::PROPERTY_ACCESS_INLINED) {
- return false;
- }
- // Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
- // The immediate must be representable in 12 bits.
- ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
- Address ldr_property_instr_address =
- inline_end_address - Assembler::kInstrSize;
- ASSERT(Assembler::IsLdrRegisterImmediate(
- Assembler::instr_at(ldr_property_instr_address)));
- Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
- ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
- ldr_property_instr, offset - kHeapObjectTag);
- Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
-
- // Indicate that code has changed.
- CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
-
- // Patch the map check.
- // For PROPERTY_ACCESS_INLINED, the load map instruction is generated
- // 4 instructions before the end of the inlined code.
- // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
- int ldr_map_offset = -4;
- Address ldr_map_instr_address =
- inline_end_address + ldr_map_offset * Assembler::kInstrSize;
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
- return true;
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the map check
+ // later, we do not need to check for interceptors or whether it
+ // requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
+ __ b(lt, slow_case);
+
+ // Check that the key is a positive smi.
+ __ tst(key, Operand(0x8000001));
+ __ b(ne, slow_case);
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+ __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
+ __ cmp(key, Operand(scratch2));
+ __ b(cs, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kOffset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ mov(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, key, scratch3);
+ __ add(scratch3, scratch3, Operand(kOffset));
+
+ __ ldr(scratch2, MemOperand(scratch1, scratch3));
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch2, scratch3);
+ __ b(eq, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ mov(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, scratch2, scratch3);
+ __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+ return MemOperand(scratch1, scratch3);
}
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete) {
- // Find the end of the inlined code for handling the contextual load if
- // this is inlined IC call site.
- Address inline_end_address;
- int marker = InlinedICSiteMarker(address, &inline_end_address);
- if (!((marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT) ||
- (marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE))) {
- return false;
- }
- // On ARM we don't rely on the is_dont_delete argument as the hint is already
- // embedded in the code marker.
- bool marker_is_dont_delete =
- marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE;
-
- // These are the offsets from the end of the inlined code.
- // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
- int ldr_map_offset = marker_is_dont_delete ? -5: -8;
- int ldr_cell_offset = marker_is_dont_delete ? -2: -5;
- if (FLAG_debug_code && marker_is_dont_delete) {
- // Three extra instructions were generated to check for the_hole_value.
- ldr_map_offset -= 3;
- ldr_cell_offset -= 3;
- }
- Address ldr_map_instr_address =
- inline_end_address + ldr_map_offset * Assembler::kInstrSize;
- Address ldr_cell_instr_address =
- inline_end_address + ldr_cell_offset * Assembler::kInstrSize;
-
- // Patch the map check.
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
- // Patch the cell address.
- Assembler::set_target_address_at(ldr_cell_instr_address,
- reinterpret_cast<Address>(cell));
-
- return true;
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(scratch));
+ __ b(cs, slow_case);
+ __ mov(scratch, Operand(kPointerSize >> 1));
+ __ mul(scratch, key, scratch);
+ __ add(scratch,
+ scratch,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ return MemOperand(backing_store, scratch);
}
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // Find the end of the inlined code for the store if there is an
- // inlined version of the store.
- Address inline_end_address;
- if (InlinedICSiteMarker(address, &inline_end_address)
- != Assembler::PROPERTY_ACCESS_INLINED) {
- return false;
- }
-
- // Compute the address of the map load instruction.
- Address ldr_map_instr_address =
- inline_end_address -
- (CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
- Assembler::kInstrSize);
-
- // Update the offsets if initializing the inlined store. No reason
- // to update the offsets when clearing the inlined version because
- // it will bail out in the map check.
- if (map != Heap::null_value()) {
- // Patch the offset in the actual store instruction.
- Address str_property_instr_address =
- ldr_map_instr_address + 3 * Assembler::kInstrSize;
- Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
- ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
- str_property_instr = Assembler::SetStrRegisterImmediateOffset(
- str_property_instr, offset - kHeapObjectTag);
- Assembler::instr_at_put(str_property_instr_address, str_property_instr);
-
- // Patch the offset in the add instruction that is part of the
- // write barrier.
- Address add_offset_instr_address =
- str_property_instr_address + Assembler::kInstrSize;
- Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
- ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
- add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
- add_offset_instr, offset - kHeapObjectTag);
- Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
-
- // Indicate that code has changed.
- CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
- }
-
- // Patch the map check.
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
-
- return true;
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, &notin, &slow);
+ __ ldr(r0, mapped_location);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in r2.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow);
+ __ ldr(r2, unmapped_location);
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ cmp(r2, r3);
+ __ b(eq, &slow);
+ __ mov(r0, r2);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
}
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- if (V8::UseCrankshaft()) return false;
-
- Address inline_end_address;
- if (InlinedICSiteMarker(address, &inline_end_address)
- != Assembler::PROPERTY_ACCESS_INLINED) {
- return false;
- }
-
- // Patch the map check.
- Address ldr_map_instr_address =
- inline_end_address -
- (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
- Assembler::kInstrSize);
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
- return true;
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
+ __ str(r0, mapped_location);
+ __ add(r6, r3, r5);
+ __ RecordWrite(r3, r6, r9);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in r3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
+ __ str(r0, unmapped_location);
+ __ add(r6, r3, r4);
+ __ RecordWrite(r3, r6, r9);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
}
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- if (V8::UseCrankshaft()) return false;
-
- // Find the end of the inlined code for handling the store if this is an
- // inlined IC call site.
- Address inline_end_address;
- if (InlinedICSiteMarker(address, &inline_end_address)
- != Assembler::PROPERTY_ACCESS_INLINED) {
- return false;
- }
-
- // Patch the map check.
- Address ldr_map_instr_address =
- inline_end_address -
- (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
- Assembler::kInstrSize);
- Assembler::set_target_address_at(ldr_map_instr_address,
- reinterpret_cast<Address>(map));
- return true;
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ // Load receiver.
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, r1, r2, r3, r4, r5, &notin, &slow);
+ __ ldr(r1, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow, r3);
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in r3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, r2, r3, r4, &slow);
+ __ ldr(r1, unmapped_location);
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ cmp(r1, r3);
+ __ b(eq, &slow);
+ GenerateFunctionTailCall(masm, argc, &slow, r3);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
}
Object* KeyedLoadIC_Miss(Arguments args);
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
+ Isolate* isolate = masm->isolate();
- __ IncrementCounter(&Counters::keyed_load_miss, 1, r3, r4);
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
__ Push(r1, r0);
- ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
+ // Perform tail call to the entry.
+ ExternalReference ref = force_generic
+ ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
+ : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
__ TailCallExternalReference(ref, 2, 1);
}
@@ -1170,11 +1088,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- r1 : receiver
// -----------------------------------
Label slow, check_string, index_smi, index_string, property_array_property;
- Label check_pixel_array, probe_dictionary, check_number_dictionary;
+ Label probe_dictionary, check_number_dictionary;
Register key = r0;
Register receiver = r1;
+ Isolate* isolate = masm->isolate();
+
// Check that the key is a smi.
__ JumpIfNotSmi(key, &check_string);
__ bind(&index_smi);
@@ -1184,35 +1104,18 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(
masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
- // Check the "has fast elements" bit in the receiver's map which is
- // now in r2.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset));
- __ tst(r3, Operand(1 << Map::kHasFastElements));
- __ b(eq, &check_pixel_array);
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(r2, r3, &check_number_dictionary);
GenerateFastArrayLoad(
masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
- __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r2, r3);
__ Ret();
- // Check whether the elements is a pixel array.
- // r0: key
- // r1: receiver
- __ bind(&check_pixel_array);
-
- GenerateFastPixelArrayLoad(masm,
- r1,
- r0,
- r3,
- r4,
- r2,
- r5,
- r0,
- &check_number_dictionary,
- NULL,
- &slow);
-
__ bind(&check_number_dictionary);
+ __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
+
// Check whether the elements is a number dictionary.
// r0: key
// r3: elements map
@@ -1226,7 +1129,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r2, r3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
+ 1, r2, r3);
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
@@ -1253,7 +1157,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the key (consisting of map and symbol) from the cache and
// check for match.
- ExternalReference cache_keys = ExternalReference::keyed_lookup_cache_keys();
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
__ mov(r4, Operand(cache_keys));
__ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
__ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // Move r4 to symbol.
@@ -1268,8 +1173,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// r1 : receiver
// r2 : receiver's map
// r3 : lookup cache index
- ExternalReference cache_field_offsets
- = ExternalReference::keyed_lookup_cache_field_offsets();
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
__ mov(r4, Operand(cache_field_offsets));
__ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
__ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
@@ -1281,7 +1186,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ add(r6, r6, r5); // Index from start of object.
__ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
__ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1, r2, r3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1, r2, r3);
__ Ret();
// Load property array property.
@@ -1289,7 +1195,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset));
__ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1, r2, r3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1, r2, r3);
__ Ret();
// Do a quick inline probe of the receiver's dictionary, if it
@@ -1303,7 +1210,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
// Load the property to r0.
GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
- __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+ 1, r2, r3);
__ Ret();
__ bind(&index_string);
@@ -1343,7 +1251,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm);
+ GenerateMiss(masm, false);
}
@@ -1376,15 +1284,18 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ Push(r1, r0); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallExternalReference(ExternalReference(
- IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
+ masm->isolate()),
+ 2,
+ 1);
__ bind(&slow);
- GenerateMiss(masm);
+ GenerateMiss(masm, false);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1395,7 +1306,29 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
- ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
+ ExternalReference ref = force_generic
+ ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+ masm->isolate())
+ : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(r2, r1, r0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
@@ -1428,7 +1361,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
- Label slow, fast, array, extra, check_pixel_array;
+ Label slow, fast, array, extra;
// Register usage.
Register value = r0;
@@ -1438,11 +1371,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
- __ tst(key, Operand(kSmiTagMask));
- __ b(ne, &slow);
+ __ JumpIfNotSmi(key, &slow);
// Check that the object isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, &slow);
+ __ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
@@ -1454,9 +1385,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
- // Check that the object is some kind of JS object.
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ // Check that the object is some kind of JSObject.
+ __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(lt, &slow);
+ __ cmp(r4, Operand(JS_PROXY_TYPE));
+ __ b(eq, &slow);
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(eq, &slow);
// Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -1464,7 +1399,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
- __ b(ne, &check_pixel_array);
+ __ b(ne, &slow);
// Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
@@ -1478,24 +1413,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// r2: receiver.
GenerateRuntimeSetProperty(masm, strict_mode);
- // Check whether the elements is a pixel array.
- // r4: elements map.
- __ bind(&check_pixel_array);
- GenerateFastPixelArrayStore(masm,
- r2,
- r1,
- r0,
- elements,
- r4,
- r5,
- r6,
- false,
- false,
- NULL,
- &slow,
- &slow,
- &slow);
-
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
@@ -1559,7 +1476,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
NOT_IN_LOOP,
MONOMORPHIC,
strict_mode);
- StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
+
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, r1, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1577,7 +1496,8 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
__ Push(r1, r2, r0);
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
@@ -1622,7 +1542,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// Prepare tail call to StoreIC_ArrayLength.
__ Push(receiver, value);
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
__ bind(&miss);
@@ -1643,11 +1564,13 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
- __ IncrementCounter(&Counters::store_normal_hit, 1, r4, r5);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(),
+ 1, r4, r5);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(&Counters::store_normal_miss, 1, r4, r5);
+ __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/arm/jump-target-arm.cc b/deps/v8/src/arm/jump-target-arm.cc
deleted file mode 100644
index df370c443..000000000
--- a/deps/v8/src/arm/jump-target-arm.cc
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
- ASSERT(cgen()->has_valid_frame());
- // Live non-frame registers are not allowed at unconditional jumps
- // because we have no way of invalidating the corresponding results
- // which are still live in the C++ code.
- ASSERT(cgen()->HasValidEntryRegisters());
-
- if (entry_frame_set_) {
- if (entry_label_.is_bound()) {
- // If we already bound and generated code at the destination then it
- // is too late to ask for less optimistic type assumptions.
- ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
- }
- // There already a frame expectation at the target.
- cgen()->frame()->MergeTo(&entry_frame_);
- cgen()->DeleteFrame();
- } else {
- // Clone the current frame to use as the expected one at the target.
- set_entry_frame(cgen()->frame());
- // Zap the fall-through frame since the jump was unconditional.
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- }
- if (entry_label_.is_bound()) {
- // You can't jump backwards to an already bound label unless you admitted
- // up front that this was a bidirectional jump target. Bidirectional jump
- // targets will zap their type info when bound in case some later virtual
- // frame with less precise type info branches to them.
- ASSERT(direction_ != FORWARD_ONLY);
- }
- __ jmp(&entry_label_);
-}
-
-
-void JumpTarget::DoBranch(Condition cond, Hint ignored) {
- ASSERT(cgen()->has_valid_frame());
-
- if (entry_frame_set_) {
- if (entry_label_.is_bound()) {
- // If we already bound and generated code at the destination then it
- // is too late to ask for less optimistic type assumptions.
- ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
- }
- // We have an expected frame to merge to on the backward edge.
- cgen()->frame()->MergeTo(&entry_frame_, cond);
- } else {
- // Clone the current frame to use as the expected one at the target.
- set_entry_frame(cgen()->frame());
- }
- if (entry_label_.is_bound()) {
- // You can't branch backwards to an already bound label unless you admitted
- // up front that this was a bidirectional jump target. Bidirectional jump
- // targets will zap their type info when bound in case some later virtual
- // frame with less precise type info branches to them.
- ASSERT(direction_ != FORWARD_ONLY);
- }
- __ b(cond, &entry_label_);
- if (cond == al) {
- cgen()->DeleteFrame();
- }
-}
-
-
-void JumpTarget::Call() {
- // Call is used to push the address of the catch block on the stack as
- // a return address when compiling try/catch and try/finally. We
- // fully spill the frame before making the call. The expected frame
- // at the label (which should be the only one) is the spilled current
- // frame plus an in-memory return address. The "fall-through" frame
- // at the return site is the spilled current frame.
- ASSERT(cgen()->has_valid_frame());
- // There are no non-frame references across the call.
- ASSERT(cgen()->HasValidEntryRegisters());
- ASSERT(!is_linked());
-
- // Calls are always 'forward' so we use a copy of the current frame (plus
- // one for a return address) as the expected frame.
- ASSERT(!entry_frame_set_);
- VirtualFrame target_frame = *cgen()->frame();
- target_frame.Adjust(1);
- set_entry_frame(&target_frame);
-
- __ bl(&entry_label_);
-}
-
-
-void JumpTarget::DoBind() {
- ASSERT(!is_bound());
-
- // Live non-frame registers are not allowed at the start of a basic
- // block.
- ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
- if (cgen()->has_valid_frame()) {
- if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo();
- // If there is a current frame we can use it on the fall through.
- if (!entry_frame_set_) {
- entry_frame_ = *cgen()->frame();
- entry_frame_set_ = true;
- } else {
- cgen()->frame()->MergeTo(&entry_frame_);
- // On fall through we may have to merge both ways.
- if (direction_ != FORWARD_ONLY) {
- // This will not need to adjust the virtual frame entries that are
- // register allocated since that was done above and they now match.
- // But it does need to adjust the entry_frame_ of this jump target
- // to make it potentially less optimistic. Later code can branch back
- // to this jump target and we need to assert that that code does not
- // have weaker assumptions about types.
- entry_frame_.MergeTo(cgen()->frame());
- }
- }
- } else {
- // If there is no current frame we must have an entry frame which we can
- // copy.
- ASSERT(entry_frame_set_);
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty);
- }
-
- __ bind(&entry_label_);
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index c04e5ca8e..93a1865e7 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
#include "lithium-allocator-inl.h"
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
@@ -59,22 +61,21 @@ void LOsrEntry::MarkSpilledRegister(int allocation_index,
#ifdef DEBUG
void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as
- // temporaries and outputs because all registers
- // are blocked by the calling convention.
- // Inputs must use a fixed register.
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
}
- for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
#endif
@@ -110,21 +111,18 @@ void LInstruction::PrintTo(StringStream* stream) {
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- inputs_.PrintOperandsTo(stream);
+ for (int i = 0; i < inputs_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ inputs_[i]->PrintTo(stream);
+ }
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
- for (int i = 0; i < N; i++) {
+ for (int i = 0; i < results_.length(); i++) {
if (i > 0) stream->Add(" ");
- elems_[i]->PrintTo(stream);
+ results_[i]->PrintTo(stream);
}
}
@@ -149,7 +147,7 @@ bool LGap::IsRedundant() const {
}
-void LGap::PrintDataTo(StringStream* stream) const {
+void LGap::PrintDataTo(StringStream* stream) {
for (int i = 0; i < 4; i++) {
stream->Add("(");
if (parallel_moves_[i] != NULL) {
@@ -236,6 +234,13 @@ void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
}
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
@@ -299,6 +304,13 @@ void LStoreContextSlot::PrintDataTo(StringStream* stream) {
}
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[r2] #%d / ", arity());
}
@@ -382,8 +394,9 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
-LChunk::LChunk(HGraph* graph)
+LChunk::LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
+ info_(info),
graph_(graph),
instructions_(32),
pointer_maps_(8),
@@ -420,8 +433,7 @@ void LChunk::MarkEmptyBlocks() {
LLabel* label = LLabel::cast(first_instr);
if (last_instr->IsGoto()) {
LGoto* goto_instr = LGoto::cast(last_instr);
- if (!goto_instr->include_stack_check() &&
- label->IsRedundant() &&
+ if (label->IsRedundant() &&
!label->is_loop_header()) {
bool can_eliminate = true;
for (int i = first + 1; i < last && can_eliminate; ++i) {
@@ -446,7 +458,7 @@ void LChunk::MarkEmptyBlocks() {
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LGap* gap = new LGap(block);
+ LInstructionGap* gap = new LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap);
@@ -474,7 +486,7 @@ int LChunk::GetParameterStackSlot(int index) const {
// shift all parameter indexes down by the number of parameters, and
// make sure they end up negative so they are distinguishable from
// spill slots.
- int result = index - graph()->info()->scope()->num_parameters() - 1;
+ int result = index - info()->scope()->num_parameters() - 1;
ASSERT(result < 0);
return result;
}
@@ -482,7 +494,7 @@ int LChunk::GetParameterStackSlot(int index) const {
// A parameter relative to ebp in the arguments stub.
int LChunk::ParameterAt(int index) {
ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + graph()->info()->scope()->num_parameters() - index) *
+ return (1 + info()->scope()->num_parameters() - index) *
kPointerSize;
}
@@ -521,7 +533,7 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new LChunk(graph());
+ chunk_ = new LChunk(info(), graph());
HPhase phase("Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -538,8 +550,8 @@ LChunk* LChunkBuilder::Build() {
void LChunkBuilder::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
- SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
- PrintF("Aborting LChunk building in @\"%s\": ", *debug_name);
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LChunk building in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
@@ -792,6 +804,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new LDeoptimize);
}
@@ -805,7 +822,7 @@ LInstruction* LChunkBuilder::DoBit(Token::Value op,
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
+ return DefineAsRegister(new LBitI(op, left, right));
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->left()->representation().IsTagged());
@@ -844,27 +861,25 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
right = chunk_->DefineConstantOperand(constant);
constant_value = constant->Integer32Value() & 0x1f;
} else {
- right = UseRegister(right_value);
+ right = UseRegisterAtStart(right_value);
}
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
- bool can_deopt = (op == Token::SHR && constant_value == 0);
- if (can_deopt) {
- bool can_truncate = true;
- for (int i = 0; i < instr->uses()->length(); i++) {
- if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
- can_truncate = false;
+ bool may_deopt = (op == Token::SHR && constant_value == 0);
+ bool does_deopt = false;
+ if (may_deopt) {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
break;
}
}
- can_deopt = !can_truncate;
}
LInstruction* result =
- DefineSameAsFirst(new LShiftI(op, left, right, can_deopt));
- if (can_deopt) AssignEnvironment(result);
- return result;
+ DefineAsRegister(new LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
}
@@ -873,10 +888,11 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
+ ASSERT(op != Token::MOD);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LArithmeticD* result = new LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ return DefineAsRegister(result);
}
@@ -1007,6 +1023,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
outer);
int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
+ if (hydrogen_env->is_special_index(i)) continue;
+
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
@@ -1024,106 +1042,88 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- if (instr->include_stack_check()) result = AssignPointerMap(result);
- return result;
+ return new LGoto(instr->FirstSuccessor()->block_id());
}
LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HValue* v = instr->value();
- if (v->EmitAtUses()) {
- if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- Token::Value op = compare->token();
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else if (r.IsDouble()) {
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- bool reversed = op == Token::GT || op == Token::LTE;
- LOperand* left_operand = UseFixed(left, reversed ? r0 : r1);
- LOperand* right_operand = UseFixed(right, reversed ? r1 : r0);
- LInstruction* result = new LCmpTAndBranch(left_operand,
- right_operand);
- return MarkAsCall(result, instr);
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- LOperand* temp = TempRegister();
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp);
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsInstanceOf()) {
- HInstanceOf* instance_of = HInstanceOf::cast(v);
- LInstruction* result =
- new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0),
- UseFixed(instance_of->right(), r1));
- return MarkAsCall(result, instr);
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
+ if (!v->EmitAtUses()) return new LBranch(UseRegisterAtStart(v));
+ ASSERT(!v->HasSideEffects());
+ if (v->IsClassOfTest()) {
+ HClassOfTest* compare = HClassOfTest::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+ TempRegister());
+ } else if (v->IsCompare()) {
+ HCompare* compare = HCompare::cast(v);
+ HValue* left = compare->left();
+ HValue* right = compare->right();
+ Representation r = compare->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(left->representation().IsInteger32());
+ ASSERT(right->representation().IsInteger32());
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
} else {
- if (v->IsConstant()) {
- if (HConstant::cast(v)->handle()->IsTrue()) {
- return new LGoto(instr->FirstSuccessor()->block_id());
- } else if (HConstant::cast(v)->handle()->IsFalse()) {
- return new LGoto(instr->SecondSuccessor()->block_id());
- }
- }
- Abort("Undefined compare before branch");
- return NULL;
+ ASSERT(r.IsDouble());
+ ASSERT(left->representation().IsDouble());
+ ASSERT(right->representation().IsDouble());
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
}
+ } else if (v->IsIsSmi()) {
+ HIsSmi* compare = HIsSmi::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsSmiAndBranch(Use(compare->value()));
+ } else if (v->IsIsUndetectable()) {
+ HIsUndetectable* compare = HIsUndetectable::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
+ TempRegister());
+ } else if (v->IsHasInstanceType()) {
+ HHasInstanceType* compare = HHasInstanceType::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()));
+ } else if (v->IsHasCachedArrayIndex()) {
+ HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(compare->value()));
+ } else if (v->IsIsNull()) {
+ HIsNull* compare = HIsNull::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
+ } else if (v->IsIsObject()) {
+ HIsObject* compare = HIsObject::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp);
+ } else if (v->IsCompareObjectEq()) {
+ HCompareObjectEq* compare = HCompareObjectEq::cast(v);
+ return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+ UseRegisterAtStart(compare->right()));
+ } else if (v->IsCompareConstantEq()) {
+ HCompareConstantEq* compare = HCompareConstantEq::cast(v);
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(compare->value()));
+ } else if (v->IsTypeofIs()) {
+ HTypeofIs* typeof_is = HTypeofIs::cast(v);
+ return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
+ } else if (v->IsIsConstructCall()) {
+ return new LIsConstructCallAndBranch(TempRegister());
+ } else if (v->IsConstant()) {
+ HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+ ? instr->FirstSuccessor()
+ : instr->SecondSuccessor();
+ return new LGoto(successor->block_id());
+ } else {
+ Abort("Undefined compare before branch");
+ return NULL;
}
- return new LBranch(UseRegisterAtStart(v));
}
+
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1178,8 +1178,13 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return DefineAsRegister(new LContext);
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
@@ -1208,35 +1213,39 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
}
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), r1);
+ argument_count_ -= instr->argument_count();
+ LInvokeFunction* result = new LInvokeFunction(function);
+ return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
- LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
- LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathPowHalf:
- Abort("MathPowHalf LUnaryMathOperation not implemented");
- return NULL;
- case kMathLog:
- Abort("MathLog LUnaryMathOperation not implemented");
- return NULL;
- case kMathCos:
- Abort("MathCos LUnaryMathOperation not implemented");
- return NULL;
- case kMathSin:
- Abort("MathSin LUnaryMathOperation not implemented");
- return NULL;
- default:
- UNREACHABLE();
- return NULL;
+ if (op == kMathLog || op == kMathSin || op == kMathCos) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
+ return MarkAsCall(DefineFixedDouble(result, d2), instr);
+ } else {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
+ LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+ switch (op) {
+ case kMathAbs:
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ case kMathFloor:
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ case kMathSqrt:
+ return DefineAsRegister(result);
+ case kMathRound:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kMathPowHalf:
+ return DefineAsRegister(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
}
}
@@ -1310,7 +1319,7 @@ LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
- return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
+ return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value())));
}
@@ -1329,7 +1338,7 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
// TODO(1042) The fixed register allocation
- // is needed because we call GenericBinaryOpStub from
+ // is needed because we call TypeRecordingBinaryOpStub from
// the generated code, which requires registers r0
// and r1 to be used. We should remove that
// when we provide a native implementation.
@@ -1345,18 +1354,30 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
- // TODO(1042) The fixed register allocation
- // is needed because we call GenericBinaryOpStub from
- // the generated code, which requires registers r0
- // and r1 to be used. We should remove that
- // when we provide a native implementation.
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* value = UseFixed(instr->left(), r0);
- LOperand* divisor = UseFixed(instr->right(), r1);
- LInstruction* result = DefineFixed(new LModI(value, divisor), r0);
- result = AssignEnvironment(AssignPointerMap(result));
- return result;
+
+ LModI* mod;
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ mod = new LModI(value, UseOrConstant(instr->right()));
+ } else {
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ mod = new LModI(dividend,
+ divisor,
+ TempRegister(),
+ FixedTemp(d10),
+ FixedTemp(d11));
+ }
+
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanBeDivByZero)) {
+ return AssignEnvironment(DefineAsRegister(mod));
+ } else {
+ return DefineAsRegister(mod);
+ }
} else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
@@ -1376,16 +1397,22 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* left;
LOperand* right = UseOrConstant(instr->MostConstantOperand());
LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+ (instr->CheckFlag(HValue::kCanOverflow) ||
+ !right->IsConstantOperand())) {
+ left = UseRegister(instr->LeastConstantOperand());
temp = TempRegister();
+ } else {
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
}
- LMulI* mul = new LMulI(left, right, temp);
- return AssignEnvironment(DefineSameAsFirst(mul));
+ return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
+
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
+
} else {
return DoArithmeticT(Token::MUL, instr);
}
@@ -1399,7 +1426,7 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
+ LInstruction* result = DefineAsRegister(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
@@ -1419,7 +1446,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LAddI* add = new LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
+ LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
@@ -1477,15 +1504,21 @@ LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
}
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
+ LCmpObjectEq* result = new LCmpObjectEq(left, right);
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoCompareConstantEq(
+ HCompareConstantEq* instr) {
+ LOperand* left = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LCmpConstantEq(left));
+}
+
+
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1510,6 +1543,14 @@ LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
}
+LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LIsUndetectable(value));
+}
+
+
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1521,7 +1562,7 @@ LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
HGetCachedArrayIndex* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LGetCachedArrayIndex(value));
}
@@ -1549,9 +1590,10 @@ LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
}
-LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
+LInstruction* LChunkBuilder::DoExternalArrayLength(
+ HExternalArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LPixelArrayLength(array));
+ return DefineAsRegister(new LExternalArrayLength(array));
}
@@ -1561,10 +1603,16 @@ LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
}
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+ LOperand* object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LElementsKind(object));
+}
+
+
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object, TempRegister());
- return AssignEnvironment(DefineSameAsFirst(result));
+ return AssignEnvironment(DefineAsRegister(result));
}
@@ -1587,6 +1635,19 @@ LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
}
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
@@ -1600,12 +1661,15 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
LInstruction* res = NULL;
- if (needs_check) {
- res = DefineSameAsFirst(new LTaggedToI(value, FixedTemp(d1)));
- } else {
+ if (!needs_check) {
res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
- }
- if (needs_check) {
+ } else {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
+ : NULL;
+ LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d11)
+ : NULL;
+ res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
res = AssignEnvironment(res);
}
return res;
@@ -1625,7 +1689,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LDoubleToI* res = new LDoubleToI(value, TempRegister());
+ LDoubleToI* res =
+ new LDoubleToI(value,
+ TempRegister(),
+ instr->CanTruncateToInt32() ? TempRegister() : NULL);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
@@ -1651,7 +1718,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value, eq));
+ return AssignEnvironment(new LCheckNonSmi(value));
}
@@ -1672,7 +1739,7 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value, ne));
+ return AssignEnvironment(new LCheckSmi(value));
}
@@ -1689,6 +1756,49 @@ LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
}
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(d11)));
+ } else if (input_rep.IsInteger32()) {
+ return DefineAsRegister(new LClampIToUint8(reg));
+ } else {
+ ASSERT(input_rep.IsTagged());
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve d1 explicitly.
+ LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(d11));
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LDoubleToI* res = new LDoubleToI(reg, temp1, temp2);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else if (input_rep.IsInteger32()) {
+ // Canonicalization should already have removed the hydrogen instruction in
+ // this case, since it is a noop.
+ UNREACHABLE();
+ return NULL;
+ } else {
+ ASSERT(input_rep.IsTagged());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = FixedTemp(d11);
+ LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3);
+ return AssignEnvironment(DefineSameAsFirst(res));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new LReturn(UseFixed(instr->value(), r0));
}
@@ -1709,26 +1819,42 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- LLoadGlobal* result = new LLoadGlobal();
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
-LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* global_object = UseFixed(instr->global_object(), r0);
+ LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
if (instr->check_hole_value()) {
LOperand* temp = TempRegister();
LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(new LStoreGlobal(value, temp));
+ return AssignEnvironment(new LStoreGlobalCell(value, temp));
} else {
LOperand* value = UseRegisterAtStart(instr->value());
- return new LStoreGlobal(value, NULL);
+ return new LStoreGlobalCell(value, NULL);
}
}
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* global_object = UseFixed(instr->global_object(), r1);
+ LOperand* value = UseFixed(instr->value(), r0);
+ LStoreGlobalGeneric* result =
+ new LStoreGlobalGeneric(global_object, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadContextSlot(context));
@@ -1755,6 +1881,21 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
+LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
+ HLoadNamedFieldPolymorphic* instr) {
+ ASSERT(instr->representation().IsTagged());
+ if (instr->need_generic()) {
+ LOperand* obj = UseFixed(instr->object(), r0);
+ LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+ } else {
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), r0);
LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), r0);
@@ -1775,10 +1916,10 @@ LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
}
-LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer(
- HLoadPixelArrayExternalPointer* instr) {
+LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
+ HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadPixelArrayExternalPointer(input));
+ return DefineAsRegister(new LLoadExternalArrayPointer(input));
}
@@ -1789,20 +1930,31 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
- return AssignEnvironment(DefineSameAsFirst(result));
+ return AssignEnvironment(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
- HLoadPixelArrayElement* instr) {
- ASSERT(instr->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+ HLoadKeyedSpecializedArrayElement* instr) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Representation representation(instr->representation());
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer =
- UseRegisterAtStart(instr->external_pointer());
- LOperand* key = UseRegisterAtStart(instr->key());
- LLoadPixelArrayElement* result =
- new LLoadPixelArrayElement(external_pointer, key);
- return DefineAsRegister(result);
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LLoadKeyedSpecializedArrayElement* result =
+ new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ LInstruction* load_instr = DefineAsRegister(result);
+ // An unsigned int array load might overflow and cause a deopt, make sure it
+ // has an environment.
+ return (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
+ AssignEnvironment(load_instr) : load_instr;
}
@@ -1835,10 +1987,32 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
}
-LInstruction* LChunkBuilder::DoStorePixelArrayElement(
- HStorePixelArrayElement* instr) {
- Abort("DoStorePixelArrayElement not implemented");
- return NULL;
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+ HStoreKeyedSpecializedArrayElement* instr) {
+ Representation representation(instr->value()->representation());
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->external_pointer()->representation().IsExternal());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ bool val_is_temp_register =
+ elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS;
+ LOperand* val = val_is_temp_register
+ ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+
+ return new LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
}
@@ -1879,6 +2053,13 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return MarkAsCall(DefineFixed(new LStringAdd(left, right), r0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
@@ -1887,6 +2068,13 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
}
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LStringCharFromCode* result = new LStringCharFromCode(char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LOperand* string = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LStringLength(string));
@@ -1968,6 +2156,13 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), r0);
+ LToFastProperties* result = new LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LTypeof* result = new LTypeof(UseFixed(instr->value(), r0));
return MarkAsCall(DefineFixed(result, r0), instr);
@@ -2000,8 +2195,6 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
}
}
- ASSERT(env->length() == instr->environment_length());
-
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
@@ -2018,7 +2211,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- return MarkAsCall(new LStackCheck, instr);
+ if (instr->is_function_entry()) {
+ return MarkAsCall(new LStackCheck, instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ return AssignEnvironment(AssignPointerMap(new LStackCheck));
+ }
}
@@ -2027,8 +2225,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->function(),
- false,
- undefined);
+ undefined,
+ instr->call_kind());
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2042,4 +2240,12 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LOperand* object = UseRegisterAtStart(instr->object());
+ LIn* result = new LIn(key, object);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 77d6b71a9..c864d20f3 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -32,6 +32,7 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -69,18 +70,23 @@ class LCodeGen;
V(CallStub) \
V(CheckFunction) \
V(CheckInstanceType) \
+ V(CheckNonSmi) \
V(CheckMap) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
V(ClassOfTest) \
V(ClassOfTestAndBranch) \
+ V(CmpConstantEq) \
+ V(CmpConstantEqAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
+ V(CmpObjectEq) \
+ V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
- V(CmpTAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
@@ -89,9 +95,10 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(ElementsKind) \
+ V(ExternalArrayLength) \
V(FixedArrayLength) \
V(FunctionLiteral) \
- V(Gap) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
@@ -100,29 +107,37 @@ class LCodeGen;
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
+ V(In) \
V(InstanceOf) \
- V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCall) \
+ V(IsConstructCallAndBranch) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
+ V(IsUndetectable) \
+ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
+ V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
- V(LoadGlobal) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
+ V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
- V(LoadPixelArrayElement) \
- V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -132,7 +147,6 @@ class LCodeGen;
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
- V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@@ -142,40 +156,40 @@ class LCodeGen;
V(SmiUntag) \
V(StackCheck) \
V(StoreContextSlot) \
- V(StoreGlobal) \
+ V(StoreGlobalCell) \
+ V(StoreGlobalGeneric) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StringAdd) \
V(StringCharCodeAt) \
+ V(StringCharFromCode) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
+ V(ThisFunction) \
V(Throw) \
+ V(ToFastProperties) \
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
- V(IsConstructCall) \
- V(IsConstructCallAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
-#define DECLARE_INSTRUCTION(type) \
- virtual bool Is##type() const { return true; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const { return LInstruction::k##type; } \
+ virtual void CompileToNative(LCodeGen* generator); \
+ virtual const char* Mnemonic() const { return mnemonic; } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- DECLARE_INSTRUCTION(type)
-
-
#define DECLARE_HYDROGEN_ACCESSOR(type) \
H##type* hydrogen() const { \
return H##type::cast(hydrogen_value()); \
@@ -197,10 +211,25 @@ class LInstruction: public ZoneObject {
virtual void PrintDataTo(StringStream* stream) = 0;
virtual void PrintOutputOperandTo(StringStream* stream) = 0;
- // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
- LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
@@ -258,37 +287,6 @@ class LInstruction: public ZoneObject {
};
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
- OperandContainer() {
- for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
- }
- int length() { return NumElements; }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
- void PrintOperandsTo(StringStream* stream);
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
- int length() { return 0; }
- void PrintOperandsTo(StringStream* stream) { }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
@@ -311,9 +309,9 @@ class LTemplateInstruction: public LInstruction {
virtual void PrintOutputOperandTo(StringStream* stream);
protected:
- OperandContainer<LOperand*, R> results_;
- OperandContainer<LOperand*, I> inputs_;
- OperandContainer<LOperand*, T> temps_;
+ EmbeddedContainer<LOperand*, R> results_;
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
};
@@ -327,8 +325,13 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
parallel_moves_[AFTER] = NULL;
}
- DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
- virtual void PrintDataTo(StringStream* stream) const;
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const { return true; }
+ virtual void PrintDataTo(StringStream* stream);
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
bool IsRedundant() const;
@@ -358,21 +361,26 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
+class LInstructionGap: public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
- LGoto(int block_id, bool include_stack_check = false)
- : block_id_(block_id), include_stack_check_(include_stack_check) { }
+ explicit LGoto(int block_id) : block_id_(block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
- bool include_stack_check() const { return include_stack_check_; }
private:
int block_id_;
- bool include_stack_check_;
};
@@ -446,7 +454,6 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
- DECLARE_INSTRUCTION(ControlInstruction)
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -519,11 +526,29 @@ class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
};
-class LModI: public LTemplateInstruction<1, 2, 0> {
+class LModI: public LTemplateInstruction<1, 2, 3> {
public:
- LModI(LOperand* left, LOperand* right) {
+ // Used when the right hand is a constant power of 2.
+ LModI(LOperand* left,
+ LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
+ temps_[0] = NULL;
+ temps_[1] = NULL;
+ temps_[2] = NULL;
+ }
+
+ // Used for the standard case.
+ LModI(LOperand* left,
+ LOperand* right,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
}
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
@@ -607,26 +632,49 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ LCmpObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
+};
+
+
+class LCmpConstantEq: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCmpConstantEq(LOperand* left) {
+ inputs_[0] = left;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEq, "cmp-constant-eq")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
+};
+
+
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpConstantEqAndBranch(LOperand* left) {
+ inputs_[0] = left;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+ "cmp-constant-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
};
@@ -657,7 +705,7 @@ class LIsNullAndBranch: public LControlInstruction<1, 0> {
};
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
+class LIsObject: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsObject(LOperand* value) {
inputs_[0] = value;
@@ -667,7 +715,7 @@ class LIsObject: public LTemplateInstruction<1, 1, 1> {
};
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -703,6 +751,31 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
};
+class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LIsUndetectable(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
@@ -728,25 +801,25 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LHasCachedArrayIndex(LOperand* value) {
+ explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGetCachedArrayIndex(LOperand* value) {
+ explicit LHasCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
};
@@ -804,20 +877,6 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpTAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
@@ -829,17 +888,6 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
- public:
- LInstanceOfAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-};
-
-
class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
@@ -991,14 +1039,14 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
};
-class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LPixelArrayLength(LOperand* value) {
+ explicit LExternalArrayLength(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
- DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
+ DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength)
};
@@ -1013,6 +1061,17 @@ class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
};
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LElementsKind(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
class LValueOf: public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
@@ -1079,6 +1138,7 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
Token::Value op() const { return op_; }
+ virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@@ -1095,6 +1155,7 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@@ -1126,6 +1187,19 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedFieldPolymorphic(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
+
+ LOperand* object() { return inputs_[0]; }
+};
+
+
class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
@@ -1163,14 +1237,14 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadPixelArrayExternalPointer(LOperand* object) {
+ explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
- "load-pixel-array-external-pointer")
+ DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
+ "load-external-array-pointer")
};
@@ -1189,19 +1263,23 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) {
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
- "load-pixel-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+ "load-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
};
@@ -1219,22 +1297,55 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
public:
- LStoreGlobal(LOperand* value, LOperand* temp) {
+ explicit LLoadGlobalGeneric(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ LOperand* global_object() { return inputs_[0]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+ explicit LStoreGlobalGeneric(LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = global_object;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+ LOperand* global_object() { return InputAt(0); }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ LOperand* value() { return InputAt(1); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -1283,6 +1394,11 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+};
+
+
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@@ -1337,6 +1453,23 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
};
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInvokeFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ LOperand* function() { return inputs_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
@@ -1418,7 +1551,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- Runtime::Function* function() const { return hydrogen()->function(); }
+ const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
@@ -1456,30 +1589,36 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
+class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
public:
- explicit LDoubleToI(LOperand* value, LOperand* temp1) {
+ LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp1;
+ temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
public:
- LTaggedToI(LOperand* value, LOperand* temp) {
+ LTaggedToI(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
}
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1502,6 +1641,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -1559,6 +1699,7 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -1591,15 +1732,55 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+ "store-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
};
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringAdd(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+};
+
+
+
class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
@@ -1615,6 +1796,19 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
+class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringCharFromCode(LOperand* char_code) {
+ inputs_[0] = char_code;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+
+ LOperand* char_code() { return inputs_[0]; }
+};
+
+
class LStringLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringLength(LOperand* string) {
@@ -1678,20 +1872,59 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
- LCheckSmi(LOperand* value, Condition condition)
- : condition_(condition) {
+ explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
}
- Condition condition() const { return condition_; }
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const {
- return (condition_ == eq) ? "check-non-smi" : "check-smi";
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
}
- private:
- Condition condition_;
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+};
+
+
+class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampDToUint8(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampTToUint8(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
@@ -1725,6 +1958,17 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
};
+class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
@@ -1823,13 +2067,33 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+class LIn: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LIn(LOperand* key, LOperand* object) {
+ inputs_[0] = key;
+ inputs_[1] = object;
+ }
+
+ LOperand* key() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(In, "in")
};
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
- explicit LChunk(HGraph* graph);
+ explicit LChunk(CompilationInfo* info, HGraph* graph);
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
@@ -1842,6 +2106,7 @@ class LChunk: public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
+ CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
void AddGapMove(int index, LOperand* from, LOperand* to);
@@ -1878,6 +2143,7 @@ class LChunk: public ZoneObject {
private:
int spill_slot_count_;
+ CompilationInfo* info_;
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
@@ -1887,8 +2153,9 @@ class LChunk: public ZoneObject {
class LChunkBuilder BASE_EMBEDDED {
public:
- LChunkBuilder(HGraph* graph, LAllocator* allocator)
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
+ info_(info),
graph_(graph),
status_(UNUSED),
current_instruction_(NULL),
@@ -1917,6 +2184,7 @@ class LChunkBuilder BASE_EMBEDDED {
};
LChunk* chunk() const { return chunk_; }
+ CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
bool is_unused() const { return status_ == UNUSED; }
@@ -2023,6 +2291,7 @@ class LChunkBuilder BASE_EMBEDDED {
HArithmeticBinaryOperation* instr);
LChunk* chunk_;
+ CompilationInfo* info_;
HGraph* const graph_;
Status status_;
HInstruction* current_instruction_;
@@ -2038,7 +2307,6 @@ class LChunkBuilder BASE_EMBEDDED {
};
#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 1ec2b9842..e23bad268 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
#include "arm/lithium-codegen-arm.h"
#include "arm/lithium-gap-resolver-arm.h"
#include "code-stubs.h"
@@ -34,7 +36,7 @@ namespace v8 {
namespace internal {
-class SafepointGenerator : public PostCallGenerator {
+class SafepointGenerator : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -44,7 +46,24 @@ class SafepointGenerator : public PostCallGenerator {
deoptimization_index_(deoptimization_index) { }
virtual ~SafepointGenerator() { }
- virtual void Generate() {
+ virtual void BeforeCall(int call_size) const {
+ ASSERT(call_size >= 0);
+ // Ensure that we have enough space after the previous safepoint position
+ // for the generated code there.
+ int call_end = codegen_->masm()->pc_offset() + call_size;
+ int prev_jump_end =
+ codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
+ if (call_end < prev_jump_end) {
+ int padding_size = prev_jump_end - call_end;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ codegen_->masm()->nop();
+ padding_size -= Assembler::kInstrSize;
+ }
+ }
+ }
+
+ virtual void AfterCall() const {
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
@@ -66,13 +85,14 @@ bool LCodeGen::GenerateCode() {
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
+ GenerateDeoptJumpTable() &&
GenerateSafepointTable();
}
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
- code->set_stack_slots(StackSlotCount());
+ code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -81,8 +101,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
void LCodeGen::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
- SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
- PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LCodeGen in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
@@ -126,11 +146,25 @@ bool LCodeGen::GeneratePrologue() {
// fp: Caller's frame pointer.
// lr: Caller's pc.
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (info_->is_strict_mode() || info_->is_native()) {
+ Label ok;
+ __ cmp(r5, Operand(0));
+ __ b(eq, &ok);
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ str(r2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
+
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
__ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
// Reserve space for the stack slots needed by the code.
- int slots = StackSlotCount();
+ int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
__ mov(r0, Operand(slots));
@@ -155,7 +189,7 @@ bool LCodeGen::GeneratePrologue() {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
// Context is returned in both r0 and cp. It replaces the context
@@ -230,13 +264,43 @@ bool LCodeGen::GenerateDeferredCode() {
__ jmp(code->exit());
}
- // Force constant pool emission at the end of deferred code to make
- // sure that no constant pools are emitted after the official end of
- // the instruction sequence.
+ // Force constant pool emission at the end of the deferred code to make
+ // sure that no constant pools are emitted after.
masm()->CheckConstPool(true, false);
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+ // Check that the jump table is accessible from everywhere in the function
+ // code, ie that offsets to the table can be encoded in the 24bit signed
+ // immediate of a branch instruction.
+ // To simplify we consider the code size from the first instruction to the
+ // end of the jump table. We also don't consider the pc load delta.
+ // Each entry in the jump table generates one instruction and inlines one
+ // 32bit data after it.
+ if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
+ deopt_jump_table_.length() * 2)) {
+ Abort("Generated code is too large");
+ }
+
+ // Block the constant pool emission during the jump table emission.
+ __ BlockConstPoolFor(deopt_jump_table_.length());
+ __ RecordComment("[ Deoptimisation jump table");
+ Label table_start;
+ __ bind(&table_start);
+ for (int i = 0; i < deopt_jump_table_.length(); i++) {
+ __ bind(&deopt_jump_table_[i].label);
+ __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
+ __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
+ }
+ ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
+ deopt_jump_table_.length() * 2);
+ __ RecordComment("]");
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
if (!is_aborted()) status_ = DONE;
return !is_aborted();
}
@@ -244,7 +308,7 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
- safepoints_.Emit(masm(), StackSlotCount());
+ safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
@@ -440,7 +504,7 @@ void LCodeGen::AddToTranslation(Translation* translation,
translation->StoreDoubleStackSlot(op->index());
} else if (op->IsArgument()) {
ASSERT(is_tagged);
- int src_index = StackSlotCount() + op->index();
+ int src_index = GetStackSlotCount() + op->index();
translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -481,7 +545,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
}
-void LCodeGen::CallRuntime(Runtime::Function* function,
+void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr) {
ASSERT(instr != NULL);
@@ -576,19 +640,18 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
return;
}
+ if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
+
if (cc == al) {
- if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- if (FLAG_trap_on_deopt) {
- Label done;
- __ b(&done, NegateCondition(cc));
- __ stop("trap_on_deopt");
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&done);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc);
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (deopt_jump_table_.is_empty() ||
+ (deopt_jump_table_.last().address != entry)) {
+ deopt_jump_table_.Add(JumpTableEntry(entry));
}
+ __ b(cc, &deopt_jump_table_.last().label);
}
}
@@ -598,14 +661,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
if (length == 0) return;
ASSERT(FLAG_deopt);
Handle<DeoptimizationInputData> data =
- Factory::NewDeoptimizationInputData(length, TENURED);
+ factory()->NewDeoptimizationInputData(length, TENURED);
Handle<ByteArray> translations = translations_.CreateByteArray();
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
- Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
@@ -707,7 +770,7 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles(
void LCodeGen::RecordPosition(int position) {
- if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+ if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
}
@@ -720,7 +783,7 @@ void LCodeGen::DoLabel(LLabel* label) {
}
__ bind(label->label());
current_block_ = label->block_id();
- LCodeGen::DoGap(label);
+ DoGap(label);
}
@@ -746,6 +809,11 @@ void LCodeGen::DoGap(LGap* gap) {
}
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
void LCodeGen::DoParameter(LParameter* instr) {
// Nothing to do.
}
@@ -769,15 +837,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCharAt: {
- StringCharAtStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::MathPow: {
- Abort("MathPowStub unimplemented.");
- break;
- }
case CodeStub::NumberToString: {
NumberToStringStub stub;
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -795,7 +854,8 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
}
case CodeStub::TranscendentalCache: {
__ ldr(r0, MemOperand(sp, 0));
- TranscendentalCacheStub stub(instr->transcendental_type());
+ TranscendentalCacheStub stub(instr->transcendental_type(),
+ TranscendentalCacheStub::TAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
@@ -811,55 +871,92 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
- class DeferredModI: public LDeferredCode {
- public:
- DeferredModI(LCodeGen* codegen, LModI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredBinaryOpStub(instr_, Token::MOD);
+ if (instr->hydrogen()->HasPowerOf2Divisor()) {
+ Register dividend = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ int32_t divisor =
+ HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+
+ if (divisor < 0) divisor = -divisor;
+
+ Label positive_dividend, done;
+ __ cmp(dividend, Operand(0));
+ __ b(pl, &positive_dividend);
+ __ rsb(result, dividend, Operand(0));
+ __ and_(result, result, Operand(divisor - 1), SetCC);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
}
- private:
- LModI* instr_;
- };
+ __ rsb(result, result, Operand(0));
+ __ b(&done);
+ __ bind(&positive_dividend);
+ __ and_(result, dividend, Operand(divisor - 1));
+ __ bind(&done);
+ return;
+ }
+
// These registers hold untagged 32 bit values.
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
+
Register scratch = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+ DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1));
+ DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
+ DwVfpRegister quotient = double_scratch0();
+
+ ASSERT(!dividend.is(divisor));
+ ASSERT(!dividend.is(quotient));
+ ASSERT(!divisor.is(quotient));
+ ASSERT(!scratch.is(left));
+ ASSERT(!scratch.is(right));
+ ASSERT(!scratch.is(result));
+
+ Label done, vfp_modulo, both_positive, right_negative;
- Label deoptimize, done;
// Check for x % 0.
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ tst(right, Operand(right));
- __ b(eq, &deoptimize);
+ __ cmp(right, Operand(0));
+ DeoptimizeIf(eq, instr->environment());
}
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label ok;
- __ tst(left, Operand(left));
- __ b(ne, &ok);
- __ tst(right, Operand(right));
- __ b(pl, &ok);
- __ b(al, &deoptimize);
- __ bind(&ok);
- }
+ __ Move(result, left);
- // Try a few common cases before using the stub.
- Label call_stub;
- const int kUnfolds = 3;
- // Skip if either side is negative.
+ // (0 % x) must yield 0 (if x is finite, which is the case here).
__ cmp(left, Operand(0));
- __ cmp(right, Operand(0), NegateCondition(mi));
- __ b(mi, &call_stub);
+ __ b(eq, &done);
+ // Preload right in a vfp register.
+ __ vmov(divisor.low(), right);
+ __ b(lt, &vfp_modulo);
+
+ __ cmp(left, Operand(right));
+ __ b(lt, &done);
+
+ // Check for (positive) power of two on the right hand side.
+ __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
+ scratch,
+ &right_negative,
+ &both_positive);
+ // Perform modulo operation (scratch contains right - 1).
+ __ and_(result, scratch, Operand(left));
+ __ b(&done);
+
+ __ bind(&right_negative);
+ // Negate right. The sign of the divisor does not matter.
+ __ rsb(right, right, Operand(0));
+
+ __ bind(&both_positive);
+ const int kUnfolds = 3;
// If the right hand side is smaller than the (nonnegative)
- // left hand side, it is the result. Else try a few subtractions
- // of the left hand side.
+ // left hand side, the left hand side is the result.
+ // Else try a few subtractions of the left hand side.
__ mov(scratch, left);
for (int i = 0; i < kUnfolds; i++) {
// Check if the left hand side is less or equal than the
// the right hand side.
- __ cmp(scratch, right);
+ __ cmp(scratch, Operand(right));
__ mov(result, scratch, LeaveCC, lt);
__ b(lt, &done);
// If not, reduce the left hand side by the right hand
@@ -867,28 +964,45 @@ void LCodeGen::DoModI(LModI* instr) {
if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
}
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch, &call_stub);
- // Perform modulo operation (scratch contains right - 1).
- __ and_(result, scratch, Operand(left));
-
- __ bind(&call_stub);
- // Call the stub. The numbers in r0 and r1 have
- // to be tagged to Smis. If that is not possible, deoptimize.
- DeferredModI* deferred = new DeferredModI(this, instr);
- __ TrySmiTag(left, &deoptimize, scratch);
- __ TrySmiTag(right, &deoptimize, scratch);
-
- __ b(al, deferred->entry());
- __ bind(deferred->exit());
-
- // If the result in r0 is a Smi, untag it, else deoptimize.
- __ JumpIfNotSmi(result, &deoptimize);
- __ SmiUntag(result);
+ __ bind(&vfp_modulo);
+ // Load the arguments in VFP registers.
+ // The divisor value is preloaded before. Be careful that 'right' is only live
+ // on entry.
+ __ vmov(dividend.low(), left);
+ // From here on don't use right as it may have been reallocated (for example
+ // to scratch2).
+ right = no_reg;
+
+ __ vcvt_f64_s32(dividend, dividend.low());
+ __ vcvt_f64_s32(divisor, divisor.low());
+
+ // We do not care about the sign of the divisor.
+ __ vabs(divisor, divisor);
+ // Compute the quotient and round it to a 32bit integer.
+ __ vdiv(quotient, dividend, divisor);
+ __ vcvt_s32_f64(quotient.low(), quotient);
+ __ vcvt_f64_s32(quotient, quotient.low());
+
+ // Compute the remainder in result.
+ DwVfpRegister double_scratch = dividend;
+ __ vmul(double_scratch, divisor, quotient);
+ __ vcvt_s32_f64(double_scratch.low(), double_scratch);
+ __ vmov(scratch, double_scratch.low());
+
+ if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ sub(result, left, scratch);
+ } else {
+ Label ok;
+ // Check for -0.
+ __ sub(scratch2, left, scratch, SetCC);
+ __ b(ne, &ok);
+ __ cmp(left, Operand(0));
+ DeoptimizeIf(mi, instr->environment());
+ __ bind(&ok);
+ // Load the result and we are done.
+ __ mov(result, scratch2);
+ }
- __ b(al, &done);
- __ bind(&deoptimize);
- DeoptimizeIf(al, instr->environment());
__ bind(&done);
}
@@ -912,16 +1026,16 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ tst(right, right);
+ __ cmp(right, Operand(0));
DeoptimizeIf(eq, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
- __ tst(left, Operand(left));
+ __ cmp(left, Operand(0));
__ b(ne, &left_not_zero);
- __ tst(right, Operand(right));
+ __ cmp(right, Operand(0));
DeoptimizeIf(mi, instr->environment());
__ bind(&left_not_zero);
}
@@ -994,7 +1108,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
__ mov(r0, right);
__ mov(r1, left);
}
- TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT);
+ BinaryOpStub stub(op, OVERWRITE_LEFT);
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
0,
@@ -1006,59 +1120,125 @@ void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+ // Note that result may alias left.
Register left = ToRegister(instr->InputAt(0));
- Register right = EmitLoadRegister(instr->InputAt(1), scratch);
+ LOperand* right_op = instr->InputAt(1);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) &&
- !instr->InputAt(1)->IsConstantOperand()) {
- __ orr(ToRegister(instr->TempAt(0)), left, right);
- }
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (right_op->IsConstantOperand() && !can_overflow) {
+ // Use optimized code for specific constants.
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+ if (bailout_on_minus_zero && (constant < 0)) {
+ // The case of a null constant will be handled separately.
+ // If constant is negative and left is null, the result should be -0.
+ __ cmp(left, Operand(0));
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ switch (constant) {
+ case -1:
+ __ rsb(result, left, Operand(0));
+ break;
+ case 0:
+ if (bailout_on_minus_zero) {
+ // If left is strictly negative and the constant is null, the
+ // result is -0. Deoptimize if required, otherwise return 0.
+ __ cmp(left, Operand(0));
+ DeoptimizeIf(mi, instr->environment());
+ }
+ __ mov(result, Operand(0));
+ break;
+ case 1:
+ __ Move(result, left);
+ break;
+ default:
+ // Multiplying by powers of two and powers of two plus or minus
+ // one can be done faster with shifted operands.
+ // For other constants we emit standard code.
+ int32_t mask = constant >> 31;
+ uint32_t constant_abs = (constant + mask) ^ mask;
+
+ if (IsPowerOf2(constant_abs) ||
+ IsPowerOf2(constant_abs - 1) ||
+ IsPowerOf2(constant_abs + 1)) {
+ if (IsPowerOf2(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ mov(result, Operand(left, LSL, shift));
+ } else if (IsPowerOf2(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ add(result, left, Operand(left, LSL, shift));
+ } else if (IsPowerOf2(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ rsb(result, left, Operand(left, LSL, shift));
+ }
+
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) __ rsb(result, result, Operand(0));
+
+ } else {
+ // Generate standard code.
+ __ mov(ip, Operand(constant));
+ __ mul(result, left, ip);
+ }
+ }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- // scratch:left = left * right.
- __ smull(left, scratch, left, right);
- __ mov(ip, Operand(left, ASR, 31));
- __ cmp(ip, Operand(scratch));
- DeoptimizeIf(ne, instr->environment());
} else {
- __ mul(left, left, right);
- }
+ Register right = EmitLoadRegister(right_op, scratch);
+ if (bailout_on_minus_zero) {
+ __ orr(ToRegister(instr->TempAt(0)), left, right);
+ }
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ tst(left, Operand(left));
- __ b(ne, &done);
- if (instr->InputAt(1)->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) {
- DeoptimizeIf(al, instr->environment());
- }
+ if (can_overflow) {
+ // scratch:result = left * right.
+ __ smull(result, scratch, left, right);
+ __ cmp(scratch, Operand(result, ASR, 31));
+ DeoptimizeIf(ne, instr->environment());
} else {
- // Test the non-zero operand for negative sign.
+ __ mul(result, left, right);
+ }
+
+ if (bailout_on_minus_zero) {
+ // Bail out if the result is supposed to be negative zero.
+ Label done;
+ __ cmp(result, Operand(0));
+ __ b(ne, &done);
__ cmp(ToRegister(instr->TempAt(0)), Operand(0));
DeoptimizeIf(mi, instr->environment());
+ __ bind(&done);
}
- __ bind(&done);
}
}
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- Register result = ToRegister(left);
- Register right_reg = EmitLoadRegister(right, ip);
+ LOperand* left_op = instr->InputAt(0);
+ LOperand* right_op = instr->InputAt(1);
+ ASSERT(left_op->IsRegister());
+ Register left = ToRegister(left_op);
+ Register result = ToRegister(instr->result());
+ Operand right(no_reg);
+
+ if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ right = Operand(EmitLoadRegister(right_op, ip));
+ } else {
+ ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+ right = ToOperand(right_op);
+ }
+
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(result, ToRegister(left), Operand(right_reg));
+ __ and_(result, left, right);
break;
case Token::BIT_OR:
- __ orr(result, ToRegister(left), Operand(right_reg));
+ __ orr(result, left, right);
break;
case Token::BIT_XOR:
- __ eor(result, ToRegister(left), Operand(right_reg));
+ __ eor(result, left, right);
break;
default:
UNREACHABLE();
@@ -1068,54 +1248,62 @@ void LCodeGen::DoBitI(LBitI* instr) {
void LCodeGen::DoShiftI(LShiftI* instr) {
+ // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+ // result may alias either of them.
+ LOperand* right_op = instr->InputAt(1);
+ Register left = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
Register scratch = scratch0();
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- Register result = ToRegister(left);
- if (right->IsRegister()) {
- // Mask the right operand.
- __ and_(scratch, ToRegister(right), Operand(0x1F));
+ if (right_op->IsRegister()) {
+ // Mask the right_op operand.
+ __ and_(scratch, ToRegister(right_op), Operand(0x1F));
switch (instr->op()) {
case Token::SAR:
- __ mov(result, Operand(result, ASR, scratch));
+ __ mov(result, Operand(left, ASR, scratch));
break;
case Token::SHR:
if (instr->can_deopt()) {
- __ mov(result, Operand(result, LSR, scratch), SetCC);
+ __ mov(result, Operand(left, LSR, scratch), SetCC);
DeoptimizeIf(mi, instr->environment());
} else {
- __ mov(result, Operand(result, LSR, scratch));
+ __ mov(result, Operand(left, LSR, scratch));
}
break;
case Token::SHL:
- __ mov(result, Operand(result, LSL, scratch));
+ __ mov(result, Operand(left, LSL, scratch));
break;
default:
UNREACHABLE();
break;
}
} else {
- int value = ToInteger32(LConstantOperand::cast(right));
+ // Mask the right_op operand.
+ int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
case Token::SAR:
if (shift_count != 0) {
- __ mov(result, Operand(result, ASR, shift_count));
+ __ mov(result, Operand(left, ASR, shift_count));
+ } else {
+ __ Move(result, left);
}
break;
case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ tst(result, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment());
+ if (shift_count != 0) {
+ __ mov(result, Operand(left, LSR, shift_count));
} else {
- __ mov(result, Operand(result, LSR, shift_count));
+ if (instr->can_deopt()) {
+ __ tst(left, Operand(0x80000000));
+ DeoptimizeIf(ne, instr->environment());
+ }
+ __ Move(result, left);
}
break;
case Token::SHL:
if (shift_count != 0) {
- __ mov(result, Operand(result, LSL, shift_count));
+ __ mov(result, Operand(left, LSL, shift_count));
+ } else {
+ __ Move(result, left);
}
break;
default:
@@ -1127,11 +1315,21 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
void LCodeGen::DoSubI(LSubI* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = EmitLoadRegister(instr->InputAt(1), ip);
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- __ sub(left, left, right, SetCC);
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ SBit set_cond = can_overflow ? SetCC : LeaveCC;
+
+ if (right->IsStackSlot() || right->IsArgument()) {
+ Register right_reg = EmitLoadRegister(right, ip);
+ __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
+ } else {
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
+ __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
+ }
+
+ if (can_overflow) {
DeoptimizeIf(vs, instr->environment());
}
}
@@ -1147,7 +1345,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
double v = instr->value();
- __ vmov(result, v);
+ __ Vmov(result, v);
}
@@ -1164,10 +1362,10 @@ void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
}
-void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
+void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->InputAt(0));
- __ ldr(result, FieldMemOperand(array, PixelArray::kLengthOffset));
+ __ ldr(result, FieldMemOperand(array, ExternalArray::kLengthOffset));
}
@@ -1178,19 +1376,34 @@ void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
}
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+
+ // Load map into |result|.
+ __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+}
+
+
void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->TempAt(0));
- ASSERT(input.is(result));
Label done;
// If the object is a smi return the object.
__ tst(input, Operand(kSmiTagMask));
+ __ Move(result, input, eq);
__ b(eq, &done);
// If the object is not a value type, return the object.
__ CompareObjectType(input, map, map, JS_VALUE_TYPE);
+ __ Move(result, input, ne);
__ b(ne, &done);
__ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
@@ -1199,9 +1412,9 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->Equals(instr->result()));
- __ mvn(ToRegister(input), Operand(ToRegister(input)));
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ __ mvn(result, Operand(input));
}
@@ -1219,12 +1432,19 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ SBit set_cond = can_overflow ? SetCC : LeaveCC;
- Register right_reg = EmitLoadRegister(right, ip);
- __ add(ToRegister(left), ToRegister(left), Operand(right_reg), SetCC);
+ if (right->IsStackSlot() || right->IsArgument()) {
+ Register right_reg = EmitLoadRegister(right, ip);
+ __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
+ } else {
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
+ __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
+ }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ if (can_overflow) {
DeoptimizeIf(vs, instr->environment());
}
}
@@ -1233,29 +1453,31 @@ void LCodeGen::DoAddI(LAddI* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
+ DoubleRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
- __ vadd(left, left, right);
+ __ vadd(result, left, right);
break;
case Token::SUB:
- __ vsub(left, left, right);
+ __ vsub(result, left, right);
break;
case Token::MUL:
- __ vmul(left, left, right);
+ __ vmul(result, left, right);
break;
case Token::DIV:
- __ vdiv(left, left, right);
+ __ vdiv(result, left, right);
break;
case Token::MOD: {
// Save r0-r3 on the stack.
__ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
- __ PrepareCallCFunction(4, scratch0());
- __ vmov(r0, r1, left);
- __ vmov(r2, r3, right);
- __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
+ __ PrepareCallCFunction(0, 2, scratch0());
+ __ SetCallCDoubleArguments(left, right);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ 0, 2);
// Move the result in the double result register.
- __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
+ __ GetCFunctionDoubleResult(result);
// Restore r0-r3.
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
@@ -1273,7 +1495,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->InputAt(1)).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
- TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -1344,12 +1566,11 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ b(eq, false_label);
__ cmp(reg, Operand(0));
__ b(eq, false_label);
- __ tst(reg, Operand(kSmiTagMask));
- __ b(eq, true_label);
+ __ JumpIfSmi(reg, true_label);
// Test double values. Zero and NaN are false.
Label call_stub;
- DoubleRegister dbl_scratch = d0;
+ DoubleRegister dbl_scratch = double_scratch0();
Register scratch = scratch0();
__ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
@@ -1377,45 +1598,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+void LCodeGen::EmitGoto(int block) {
block = chunk_->LookupDestination(block);
int next_block = GetNextEmittedBlock(current_block_);
if (block != next_block) {
- // Perform stack overflow check if this goto needs it before jumping.
- if (deferred_stack_check != NULL) {
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, chunk_->GetAssemblyLabel(block));
- __ jmp(deferred_stack_check->entry());
- deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
- } else {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
+ __ jmp(chunk_->GetAssemblyLabel(block));
}
}
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
-}
-
-
void LCodeGen::DoGoto(LGoto* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- private:
- LGoto* instr_;
- };
-
- DeferredStackCheck* deferred = NULL;
- if (instr->include_stack_check()) {
- deferred = new DeferredStackCheck(this, instr);
- }
- EmitGoto(instr->block_id(), deferred);
+ EmitGoto(instr->block_id());
}
@@ -1502,7 +1695,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
}
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
@@ -1513,7 +1706,7 @@ void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
}
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1524,6 +1717,27 @@ void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
}
+void LCodeGen::DoCmpConstantEq(LCmpConstantEq* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ Label done;
+ __ cmp(left, Operand(instr->hydrogen()->right()));
+ __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
+}
+
+
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ cmp(left, Operand(instr->hydrogen()->right()));
+ EmitBranch(true_block, false_block, eq);
+}
+
+
void LCodeGen::DoIsNull(LIsNull* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1539,8 +1753,7 @@ void LCodeGen::DoIsNull(LIsNull* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(ip, reg);
__ b(eq, &true_value);
- __ tst(reg, Operand(kSmiTagMask));
- __ b(eq, &false_value);
+ __ JumpIfSmi(reg, &false_value);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = result;
@@ -1579,8 +1792,7 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(reg, ip);
__ b(eq, true_label);
- __ tst(reg, Operand(kSmiTagMask));
- __ b(eq, false_label);
+ __ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
__ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
@@ -1593,13 +1805,13 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object) {
+ Register temp2 = scratch0();
__ JumpIfSmi(input, is_not_object);
- __ LoadRoot(temp1, Heap::kNullValueRootIndex);
- __ cmp(input, temp1);
+ __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+ __ cmp(input, temp2);
__ b(eq, is_object);
// Load map.
@@ -1611,9 +1823,9 @@ Condition LCodeGen::EmitIsObject(Register input,
// Load instance type and check that it is in object type range.
__ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ b(lt, is_not_object);
- __ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
return le;
}
@@ -1621,10 +1833,9 @@ Condition LCodeGen::EmitIsObject(Register input,
void LCodeGen::DoIsObject(LIsObject* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- Register temp = scratch0();
Label is_false, is_true, done;
- Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
+ Condition true_cond = EmitIsObject(reg, result, &is_false, &is_true);
__ b(true_cond, &is_true);
__ bind(&is_false);
@@ -1649,7 +1860,7 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition true_cond =
- EmitIsObject(reg, temp1, temp2, false_label, true_label);
+ EmitIsObject(reg, temp1, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
}
@@ -1659,10 +1870,9 @@ void LCodeGen::DoIsSmi(LIsSmi* instr) {
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
Register result = ToRegister(instr->result());
Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
- __ tst(input_reg, Operand(kSmiTagMask));
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
Label done;
- __ b(eq, &done);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ JumpIfSmi(input_reg, &done);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
@@ -1678,6 +1888,40 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
+void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ Label false_label, done;
+ __ JumpIfSmi(input, &false_label);
+ __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kBitFieldOffset));
+ __ tst(result, Operand(1 << Map::kIsUndetectable));
+ __ b(eq, &false_label);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&false_label);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ __ tst(temp, Operand(1 << Map::kIsUndetectable));
+ EmitBranch(true_block, false_block, ne);
+}
+
+
static InstanceType TestType(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
@@ -1724,8 +1968,7 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, false_label);
+ __ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
@@ -1735,10 +1978,13 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- __ ldr(scratch, FieldMemOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(scratch, result);
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(input);
+ }
+
+ __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
}
@@ -1781,28 +2027,28 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
Register temp2) {
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, is_false);
- __ CompareObjectType(input, temp, temp2, FIRST_JS_OBJECT_TYPE);
+ __ JumpIfSmi(input, is_false);
+ __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, is_false);
// Map is now in temp.
// Functions have class 'Function'.
- __ CompareInstanceType(temp, temp2, JS_FUNCTION_TYPE);
+ __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ b(eq, is_true);
+ __ b(ge, is_true);
} else {
- __ b(eq, is_false);
+ __ b(ge, is_false);
}
// Check if the constructor in the map is a function.
__ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
@@ -1886,24 +2132,9 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- Label true_value, done;
- __ tst(r0, r0);
- __ mov(r0, Operand(Factory::false_value()), LeaveCC, ne);
- __ mov(r0, Operand(Factory::true_value()), LeaveCC, eq);
-}
-
-
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
- ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ tst(r0, Operand(r0));
- EmitBranch(true_block, false_block, eq);
+ __ cmp(r0, Operand(0));
+ __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
+ __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
}
@@ -1948,13 +2179,13 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
- __ mov(ip, Operand(Factory::the_hole_value()));
+ __ mov(ip, Operand(factory()->the_hole_value()));
__ cmp(map, Operand(ip));
__ b(ne, &cache_miss);
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
- __ mov(result, Operand(Factory::the_hole_value()));
+ __ mov(result, Operand(factory()->the_hole_value()));
__ b(&done);
// The inlined call site cache did not match. Check null and string before
@@ -2061,25 +2292,6 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
}
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- // The compare stub expects compare condition and the input operands
- // reversed for GT and LTE.
- Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ cmp(r0, Operand(0));
- EmitBranch(true_block, false_block, condition);
-}
-
-
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
@@ -2087,7 +2299,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- int32_t sp_delta = (ParameterCount() + 1) * kPointerSize;
+ int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
__ add(sp, sp, Operand(sp_delta));
@@ -2095,7 +2307,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
__ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
@@ -2107,7 +2319,19 @@ void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
}
-void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->global_object()).is(r0));
+ ASSERT(ToRegister(instr->result()).is(r0));
+
+ __ mov(r2, Operand(instr->name()));
+ RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, mode, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
@@ -2132,6 +2356,18 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
}
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->global_object()).is(r1));
+ ASSERT(ToRegister(instr->value()).is(r0));
+
+ __ mov(r2, Operand(instr->name()));
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2162,13 +2398,83 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name) {
+ LookupResult lookup;
+ type->LookupInDescriptors(NULL, *name, &lookup);
+ ASSERT(lookup.IsProperty() &&
+ (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+ if (lookup.type() == FIELD) {
+ int index = lookup.GetLocalFieldIndexFromMap(*type);
+ int offset = index * kPointerSize;
+ if (index < 0) {
+ // Negative property indices are in-object properties, indexed
+ // from the end of the fixed part of the object.
+ __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
+ } else {
+ // Non-negative property indices are in the properties array.
+ __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
+ }
+ } else {
+ Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+ LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ }
+}
+
+
+void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ int map_count = instr->hydrogen()->types()->length();
+ Handle<String> name = instr->hydrogen()->name();
+ if (map_count == 0) {
+ ASSERT(instr->hydrogen()->need_generic());
+ __ mov(r2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ Label done;
+ __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ for (int i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
+ Label next;
+ __ cmp(scratch, Operand(map));
+ __ b(ne, &next);
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ b(&done);
+ __ bind(&next);
+ }
+ Handle<Map> map = instr->hydrogen()->types()->last();
+ __ cmp(scratch, Operand(map));
+ if (instr->hydrogen()->need_generic()) {
+ Label generic;
+ __ b(ne, &generic);
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ b(&done);
+ __ bind(&generic);
+ __ mov(r2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ DeoptimizeIf(ne, instr->environment());
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ }
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2224,27 +2530,37 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
- Label done;
+ Label done, fail;
__ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
- __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
__ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
__ cmp(scratch, ip);
- __ Check(eq, "Check for fast elements failed.");
+ __ b(eq, &done);
+ // |scratch| still contains |input|'s map.
+ __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
+ __ ubfx(scratch, scratch, Map::kElementsKindShift,
+ Map::kElementsKindBitCount);
+ __ cmp(scratch, Operand(JSObject::FAST_ELEMENTS));
+ __ b(eq, &done);
+ __ cmp(scratch, Operand(JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ b(lt, &fail);
+ __ cmp(scratch, Operand(JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ b(le, &done);
+ __ bind(&fail);
+ __ Abort("Check for fast or external elements failed.");
__ bind(&done);
}
}
-void LCodeGen::DoLoadPixelArrayExternalPointer(
- LLoadPixelArrayExternalPointer* instr) {
+void LCodeGen::DoLoadExternalArrayPointer(
+ LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
Register from_reg = ToRegister(instr->InputAt(0));
- __ ldr(to_reg, FieldMemOperand(from_reg, PixelArray::kExternalPointerOffset));
+ __ ldr(to_reg, FieldMemOperand(from_reg,
+ ExternalArray::kExternalPointerOffset));
}
@@ -2271,26 +2587,90 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register key = EmitLoadRegister(instr->key(), scratch0());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
- ASSERT(result.is(elements));
// Load the result.
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
__ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
// Check for the hole value.
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, scratch);
+ DeoptimizeIf(eq, instr->environment());
+ }
}
-void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
- Register external_elements = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- Register result = ToRegister(instr->result());
-
- // Load the result.
- __ ldrb(result, MemOperand(external_elements, key));
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+ LLoadKeyedSpecializedArrayElement* instr) {
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = no_reg;
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int shift_size = ElementsKindToShiftSize(elements_kind);
+
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ CpuFeatures::Scope scope(VFP3);
+ DwVfpRegister result(ToDoubleRegister(instr->result()));
+ Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
+ : Operand(key, LSL, shift_size));
+ __ add(scratch0(), external_pointer, operand);
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ __ vldr(result.low(), scratch0(), 0);
+ __ vcvt_f64_f32(result, result.low());
+ } else { // i.e. elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS
+ __ vldr(result, scratch0(), 0);
+ }
+ } else {
+ Register result(ToRegister(instr->result()));
+ MemOperand mem_operand(key_is_constant
+ ? MemOperand(external_pointer, constant_key * (1 << shift_size))
+ : MemOperand(external_pointer, key, LSL, shift_size));
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ __ ldrsb(result, mem_operand);
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ ldrb(result, mem_operand);
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ __ ldrsh(result, mem_operand);
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ ldrh(result, mem_operand);
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ __ ldr(result, mem_operand);
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ ldr(result, mem_operand);
+ __ cmp(result, Operand(0x80000000));
+ // TODO(danno): we could be more clever here, perhaps having a special
+ // version of the stub that detects if the overflow case actually
+ // happens, and generate code that returns a double rather than int.
+ DeoptimizeIf(cs, instr->environment());
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
}
@@ -2298,7 +2678,7 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->key()).is(r0));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2352,9 +2732,26 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
ASSERT(function.is(r1)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(r0));
- // If the receiver is null or undefined, we have to pass the global object
- // as a receiver.
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok;
+
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(scratch,
+ Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
+ __ b(ne, &receiver_ok);
+
+ // Do not transform the receiver to object for builtins.
+ __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &receiver_ok);
+
+ // Normal function. Replace undefined or null with global receiver.
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
__ cmp(receiver, scratch);
__ b(eq, &global_object);
@@ -2365,12 +2762,14 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Deoptimize if the receiver is not a JS object.
__ tst(receiver, Operand(kSmiTagMask));
DeoptimizeIf(eq, instr->environment());
- __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE);
- DeoptimizeIf(lo, instr->environment());
+ __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
+ DeoptimizeIf(lt, instr->environment());
__ jmp(&receiver_ok);
__ bind(&global_object);
__ ldr(receiver, GlobalObjectOperand());
+ __ ldr(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
// Copy the arguments to this function possibly from the
@@ -2390,7 +2789,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// stack.
Label invoke, loop;
// length is a small non-negative integer, due to the test above.
- __ tst(length, Operand(length));
+ __ cmp(length, Operand(0));
__ b(eq, &invoke);
__ bind(&loop);
__ ldr(scratch, MemOperand(elements, length, LSL, 2));
@@ -2410,7 +2809,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is r0, as expected
// by InvokeFunction.
v8::internal::ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+ __ InvokeFunction(function, actual, CALL_FUNCTION,
+ safepoint_generator, CALL_AS_METHOD);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -2426,6 +2826,12 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
}
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
__ mov(result, cp);
@@ -2436,8 +2842,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ ldr(result,
- MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset));
+ MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
@@ -2457,10 +2862,11 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int arity,
- LInstruction* instr) {
+ LInstruction* instr,
+ CallKind call_kind) {
// Change context if needed.
bool change_context =
- (graph()->info()->closure()->context() != function->context()) ||
+ (info()->closure()->context() != function->context()) ||
scope()->contains_with() ||
(scope()->num_heap_slots() > 0);
if (change_context) {
@@ -2477,6 +2883,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
RecordPosition(pointers->position());
// Invoke function.
+ __ SetCallKind(r5, call_kind);
__ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ Call(ip);
@@ -2491,13 +2898,16 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r1, Operand(instr->function()));
- CallKnownFunction(instr->function(), instr->arity(), instr);
+ CallKnownFunction(instr->function(),
+ instr->arity(),
+ instr,
+ CALL_AS_METHOD);
}
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
Register scratch = scratch0();
// Deoptimize if not a heap number.
@@ -2511,10 +2921,10 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
scratch = no_reg;
__ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| would be restored
- // unchanged by popping safepoint registers.
+ // return it.
__ tst(exponent, Operand(HeapNumber::kSignMask));
+ // Move the input to the result if necessary.
+ __ Move(result, input);
__ b(eq, &done);
// Input is negative. Reverse its sign.
@@ -2554,7 +2964,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
__ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
__ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
- __ StoreToSafepointRegisterSlot(tmp1, input);
+ __ StoreToSafepointRegisterSlot(tmp1, result);
}
__ bind(&done);
@@ -2563,11 +2973,13 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
__ cmp(input, Operand(0));
+ __ Move(result, input, pl);
// We can make rsb conditional because the previous cmp instruction
// will clear the V (overflow) flag and rsb won't set this flag
// if input is positive.
- __ rsb(input, input, Operand(0), SetCC, mi);
+ __ rsb(result, input, Operand(0), SetCC, mi);
// Deoptimize on overflow.
DeoptimizeIf(vs, instr->environment());
}
@@ -2587,11 +2999,11 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
LUnaryMathOperation* instr_;
};
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
- __ vabs(input, input);
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ __ vabs(result, input);
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else {
@@ -2625,23 +3037,65 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
// Move the result back to general purpose register r0.
__ vmov(result, single_scratch);
- // Test for -0.
- Label done;
- __ cmp(result, Operand(0));
- __ b(ne, &done);
- __ vmov(scratch1, input.high());
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- __ bind(&done);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Test for -0.
+ Label done;
+ __ cmp(result, Operand(0));
+ __ b(ne, &done);
+ __ vmov(scratch1, input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+ __ bind(&done);
+ }
}
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- Register scratch1 = scratch0();
- Register scratch2 = result;
- __ EmitVFPTruncate(kRoundToNearest,
+ Register scratch1 = result;
+ Register scratch2 = scratch0();
+ Label done, check_sign_on_zero;
+
+ // Extract exponent bits.
+ __ vmov(scratch1, input.high());
+ __ ubfx(scratch2,
+ scratch1,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+ __ cmp(scratch2, Operand(HeapNumber::kExponentBias - 2));
+ __ mov(result, Operand(0), LeaveCC, le);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ b(le, &check_sign_on_zero);
+ } else {
+ __ b(le, &done);
+ }
+
+ // The following conversion will not work with numbers
+ // outside of ]-2^32, 2^32[.
+ __ cmp(scratch2, Operand(HeapNumber::kExponentBias + 32));
+ DeoptimizeIf(ge, instr->environment());
+
+ // Save the original sign for later comparison.
+ __ and_(scratch2, scratch1, Operand(HeapNumber::kSignMask));
+
+ __ Vmov(double_scratch0(), 0.5);
+ __ vadd(input, input, double_scratch0());
+
+ // Check sign of the result: if the sign changed, the input
+ // value was in ]0.5, 0[ and the result should be -0.
+ __ vmov(scratch1, input.high());
+ __ eor(scratch1, scratch1, Operand(scratch2), SetCC);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(mi, instr->environment());
+ } else {
+ __ mov(result, Operand(0), LeaveCC, mi);
+ __ b(mi, &done);
+ }
+
+ __ EmitVFPTruncate(kRoundToMinusInf,
double_scratch0().low(),
input,
scratch1,
@@ -2649,21 +3103,32 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
DeoptimizeIf(ne, instr->environment());
__ vmov(result, double_scratch0().low());
- // Test for -0.
- Label done;
- __ cmp(result, Operand(0));
- __ b(ne, &done);
- __ vmov(scratch1, input.high());
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Test for -0.
+ __ cmp(result, Operand(0));
+ __ b(ne, &done);
+ __ bind(&check_sign_on_zero);
+ __ vmov(scratch1, input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+ }
__ bind(&done);
}
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- ASSERT(ToDoubleRegister(instr->result()).is(input));
- __ vsqrt(input, input);
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ vsqrt(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ // Add +0 to convert -0 to +0.
+ __ vadd(result, input, kDoubleRegZero);
+ __ vsqrt(result, result);
}
@@ -2675,17 +3140,18 @@ void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
if (exponent_type.IsDouble()) {
// Prepare arguments and call C function.
- __ PrepareCallCFunction(4, scratch);
- __ vmov(r0, r1, ToDoubleRegister(left));
- __ vmov(r2, r3, ToDoubleRegister(right));
- __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(ToDoubleRegister(left),
+ ToDoubleRegister(right));
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
} else if (exponent_type.IsInteger32()) {
ASSERT(ToRegister(right).is(r0));
// Prepare arguments and call C function.
- __ PrepareCallCFunction(4, scratch);
- __ mov(r2, ToRegister(right));
- __ vmov(r0, r1, ToDoubleRegister(left));
- __ CallCFunction(ExternalReference::power_double_int_function(), 4);
+ __ PrepareCallCFunction(1, 1, scratch);
+ __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(isolate()), 1, 1);
} else {
ASSERT(exponent_type.IsTagged());
ASSERT(instr->hydrogen()->left()->representation().IsDouble());
@@ -2715,16 +3181,40 @@ void LCodeGen::DoPower(LPower* instr) {
// Prepare arguments and call C function.
__ bind(&call);
- __ PrepareCallCFunction(4, scratch);
- __ vmov(r0, r1, ToDoubleRegister(left));
- __ vmov(r2, r3, result_reg);
- __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
}
// Store the result in the result register.
__ GetCFunctionDoubleResult(result_reg);
}
+void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+ ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
switch (instr->op()) {
case kMathAbs:
@@ -2739,6 +3229,18 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathSqrt:
DoMathSqrt(instr);
break;
+ case kMathPowHalf:
+ DoMathPowHalf(instr);
+ break;
+ case kMathCos:
+ DoMathCos(instr);
+ break;
+ case kMathSin:
+ DoMathSin(instr);
+ break;
+ case kMathLog:
+ DoMathLog(instr);
+ break;
default:
Abort("Unimplemented type of LUnaryMathOperation.");
UNREACHABLE();
@@ -2746,11 +3248,27 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(r1));
+ ASSERT(instr->HasPointerMap());
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator generator(this, pointers, env->deoptimization_index());
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -2760,9 +3278,11 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
__ mov(r2, Operand(instr->name()));
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, mode, instr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -2772,7 +3292,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2783,9 +3303,11 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
__ mov(r2, Operand(instr->name()));
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+ CallCode(ic, mode, instr);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -2793,7 +3315,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r1, Operand(instr->target()));
- CallKnownFunction(instr->target(), instr->arity(), instr);
+ CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -2801,7 +3323,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->InputAt(0)).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
- Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
__ mov(r0, Operand(instr->arity()));
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -2850,9 +3372,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic(Builtins::builtin(
- info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2889,18 +3411,89 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
}
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+ LStoreKeyedSpecializedArrayElement* instr) {
+
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = no_reg;
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int shift_size = ElementsKindToShiftSize(elements_kind);
+
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ CpuFeatures::Scope scope(VFP3);
+ DwVfpRegister value(ToDoubleRegister(instr->value()));
+ Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
+ : Operand(key, LSL, shift_size));
+ __ add(scratch0(), external_pointer, operand);
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ __ vcvt_f32_f64(double_scratch0().low(), value);
+ __ vstr(double_scratch0().low(), scratch0(), 0);
+ } else { // i.e. elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS
+ __ vstr(value, scratch0(), 0);
+ }
+ } else {
+ Register value(ToRegister(instr->value()));
+ MemOperand mem_operand(key_is_constant
+ ? MemOperand(external_pointer, constant_key * (1 << shift_size))
+ : MemOperand(external_pointer, key, LSL, shift_size));
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ strb(value, mem_operand);
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ strh(value, mem_operand);
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ str(value, mem_operand);
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r2));
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
- Handle<Code> ic(Builtins::builtin(
- info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ __ push(ToRegister(instr->left()));
+ __ push(ToRegister(instr->right()));
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt: public LDeferredCode {
public:
@@ -3039,6 +3632,53 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
}
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode: public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new DeferredStringCharFromCode(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+ ASSERT(!char_code.is(result));
+
+ __ cmp(char_code, Operand(String::kMaxAsciiCharCode));
+ __ b(hi, deferred->entry());
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
+ __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(result, ip);
+ __ b(eq, deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, Operand(0));
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTag(char_code);
+ __ push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
void LCodeGen::DoStringLength(LStringLength* instr) {
Register string = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -3087,8 +3727,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
Label slow;
Register reg = ToRegister(instr->InputAt(0));
- DoubleRegister dbl_scratch = d0;
- SwVfpRegister flt_scratch = s0;
+ DoubleRegister dbl_scratch = double_scratch0();
+ SwVfpRegister flt_scratch = dbl_scratch.low();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
@@ -3182,44 +3822,52 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
- __ tst(ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment());
+ ASSERT(kHeapObjectTag == 1);
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
+ __ SmiUntag(ToRegister(input), SetCC);
+ DeoptimizeIf(cs, instr->environment());
+ } else {
+ __ SmiUntag(ToRegister(input));
}
- __ SmiUntag(ToRegister(input));
}
void LCodeGen::EmitNumberUntagD(Register input_reg,
DoubleRegister result_reg,
+ bool deoptimize_on_undefined,
LEnvironment* env) {
Register scratch = scratch0();
- SwVfpRegister flt_scratch = s0;
- ASSERT(!result_reg.is(d0));
+ SwVfpRegister flt_scratch = double_scratch0().low();
+ ASSERT(!result_reg.is(double_scratch0()));
Label load_smi, heap_number, done;
// Smi check.
- __ tst(input_reg, Operand(kSmiTagMask));
- __ b(eq, &load_smi);
+ __ JumpIfSmi(input_reg, &load_smi);
// Heap number map check.
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- __ b(eq, &heap_number);
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(ne, env);
+ } else {
+ Label heap_number;
+ __ b(eq, &heap_number);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, env);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(input_reg, Operand(ip));
+ DeoptimizeIf(ne, env);
- // Convert undefined to NaN.
- __ LoadRoot(ip, Heap::kNanValueRootIndex);
- __ sub(ip, ip, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
- __ jmp(&done);
+ // Convert undefined to NaN.
+ __ LoadRoot(ip, Heap::kNanValueRootIndex);
+ __ sub(ip, ip, Operand(kHeapObjectTag));
+ __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+ __ jmp(&done);
+ __ bind(&heap_number);
+ }
// Heap number to double register conversion.
- __ bind(&heap_number);
__ sub(ip, input_reg, Operand(kHeapObjectTag));
__ vldr(result_reg, ip, HeapNumber::kValueOffset);
__ jmp(&done);
@@ -3245,19 +3893,36 @@ class DeferredTaggedToI: public LDeferredCode {
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Label done;
Register input_reg = ToRegister(instr->InputAt(0));
- Register scratch = scratch0();
- DoubleRegister dbl_scratch = d0;
- SwVfpRegister flt_scratch = s0;
- DoubleRegister dbl_tmp = ToDoubleRegister(instr->TempAt(0));
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+ DwVfpRegister double_scratch = double_scratch0();
+ SwVfpRegister single_scratch = double_scratch.low();
+
+ ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+ ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+ Label done;
+
+ // The input was optimistically untagged; revert it.
+ // The carry flag is set when we reach this deferred code as we just executed
+ // SmiUntag(heap_object, SetCC)
+ ASSERT(kHeapObjectTag == 1);
+ __ adc(input_reg, input_reg, Operand(input_reg));
// Heap number map check.
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
+ __ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
+ Register scratch3 = ToRegister(instr->TempAt(1));
+ DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
+ ASSERT(!scratch3.is(input_reg) &&
+ !scratch3.is(scratch1) &&
+ !scratch3.is(scratch2));
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations.
Label heap_number;
__ b(eq, &heap_number);
// Check for undefined. Undefined is converted to zero for truncating
@@ -3269,36 +3934,38 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ b(&done);
__ bind(&heap_number);
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset);
- __ vcmp(dbl_tmp, 0.0); // Sets overflow bit in FPSCR flags if NaN.
- __ vcvt_s32_f64(flt_scratch, dbl_tmp);
- __ vmov(input_reg, flt_scratch); // 32-bit result of conversion.
- __ vmrs(pc); // Move vector status bits to normal status bits.
- // Overflow bit is set if dbl_tmp is Nan.
- __ cmn(input_reg, Operand(1), vc); // 0x7fffffff + 1 -> overflow.
- __ cmp(input_reg, Operand(1), vc); // 0x80000000 - 1 -> overflow.
- DeoptimizeIf(vs, instr->environment()); // Saturation may have occured.
+ __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
+ __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
+
+ __ EmitECMATruncate(input_reg,
+ double_scratch2,
+ single_scratch,
+ scratch1,
+ scratch2,
+ scratch3);
} else {
+ CpuFeatures::Scope scope(VFP3);
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment());
__ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset);
- __ vcvt_s32_f64(flt_scratch, dbl_tmp);
- __ vmov(input_reg, flt_scratch); // 32-bit result of conversion.
- // Non-truncating conversion means that we cannot lose bits, so we convert
- // back to check; note that using non-overlapping s and d regs would be
- // slightly faster.
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
- __ VFPCompareAndSetFlags(dbl_scratch, dbl_tmp);
- DeoptimizeIf(ne, instr->environment()); // Not equal or unordered.
+ __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
+ __ EmitVFPTruncate(kRoundToZero,
+ single_scratch,
+ double_scratch,
+ scratch1,
+ scratch2,
+ kCheckForInexactConversion);
+ DeoptimizeIf(ne, instr->environment());
+ // Load the result.
+ __ vmov(input_reg, single_scratch);
+
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ tst(input_reg, Operand(input_reg));
+ __ cmp(input_reg, Operand(0));
__ b(ne, &done);
- __ vmov(lr, ip, dbl_tmp);
- __ tst(ip, Operand(1 << 31)); // Test sign bit.
+ __ vmov(scratch1, double_scratch.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
}
}
@@ -3315,13 +3982,12 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
- // Smi check.
- __ tst(input_reg, Operand(kSmiTagMask));
- __ b(ne, deferred->entry());
-
- // Smi to int32 conversion
- __ SmiUntag(input_reg); // Untag smi.
-
+ // Optimistically untag the input.
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
+ __ SmiUntag(input_reg, SetCC);
+ // Branch to deferred code if the input was tagged.
+ // The deferred code will take care of restoring the tag.
+ __ b(cs, deferred->entry());
__ bind(deferred->exit());
}
@@ -3335,83 +4001,100 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
DoubleRegister result_reg = ToDoubleRegister(result);
- EmitNumberUntagD(input_reg, result_reg, instr->environment());
+ EmitNumberUntagD(input_reg, result_reg,
+ instr->hydrogen()->deoptimize_on_undefined(),
+ instr->environment());
}
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsDoubleRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsRegister());
-
- DoubleRegister double_input = ToDoubleRegister(input);
- Register result_reg = ToRegister(result);
- SwVfpRegister single_scratch = double_scratch0().low();
+ Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0));
+ DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
+ DwVfpRegister double_scratch = double_scratch0();
+ SwVfpRegister single_scratch = double_scratch0().low();
- __ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_input,
- scratch1,
- scratch2);
-
- // Deoptimize if we had a vfp invalid exception.
- DeoptimizeIf(ne, instr->environment());
-
- // Retrieve the result.
- __ vmov(result_reg, single_scratch);
+ Label done;
- if (!instr->truncating()) {
- // Convert result back to double and compare with input
- // to check if the conversion was exact.
- __ vmov(single_scratch, result_reg);
- __ vcvt_f64_s32(double_scratch0(), single_scratch);
- __ VFPCompareAndSetFlags(double_scratch0(), double_input);
+ if (instr->truncating()) {
+ Register scratch3 = ToRegister(instr->TempAt(1));
+ __ EmitECMATruncate(result_reg,
+ double_input,
+ single_scratch,
+ scratch1,
+ scratch2,
+ scratch3);
+ } else {
+ VFPRoundingMode rounding_mode = kRoundToMinusInf;
+ __ EmitVFPTruncate(rounding_mode,
+ single_scratch,
+ double_input,
+ scratch1,
+ scratch2,
+ kCheckForInexactConversion);
+ // Deoptimize if we had a vfp invalid exception,
+ // including inexact operation.
DeoptimizeIf(ne, instr->environment());
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- __ cmp(result_reg, Operand(0));
- __ b(ne, &done);
- // Check for -0.
- __ vmov(scratch1, double_input.high());
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
-
- __ bind(&done);
- }
+ // Retrieve the result.
+ __ vmov(result_reg, single_scratch);
}
+ __ bind(&done);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
__ tst(ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(instr->condition(), instr->environment());
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ LOperand* input = instr->InputAt(0);
+ __ tst(ToRegister(input), Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment());
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
- InstanceType first = instr->hydrogen()->first();
- InstanceType last = instr->hydrogen()->last();
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(first));
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(ne, instr->environment());
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first;
+ InstanceType last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ __ cmp(scratch, Operand(first));
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ DeoptimizeIf(lo, instr->environment());
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ __ cmp(scratch, Operand(last));
+ DeoptimizeIf(hi, instr->environment());
+ }
+ }
} else {
- DeoptimizeIf(lo, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr->environment());
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT(tag == 0 || IsPowerOf2(tag));
+ __ tst(scratch, Operand(mask));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
+ } else {
+ __ and_(scratch, scratch, Operand(mask));
+ __ cmp(scratch, Operand(tag));
+ DeoptimizeIf(ne, instr->environment());
}
}
}
@@ -3436,11 +4119,64 @@ void LCodeGen::DoCheckMap(LCheckMap* instr) {
}
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ Register unclamped_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ Register scratch = scratch0();
+ Register input_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ Label is_smi, done, heap_number;
+
+ // Both smi and heap number cases are handled.
+ __ JumpIfSmi(input_reg, &is_smi);
+
+ // Check for heap number
+ __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ cmp(scratch, Operand(factory()->heap_number_map()));
+ __ b(eq, &heap_number);
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ __ cmp(input_reg, Operand(factory()->undefined_value()));
+ DeoptimizeIf(ne, instr->environment());
+ __ movt(input_reg, 0);
+ __ jmp(&done);
+
+ // Heap number
+ __ bind(&heap_number);
+ __ vldr(double_scratch0(), FieldMemOperand(input_reg,
+ HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+ __ jmp(&done);
+
+ // smi
+ __ bind(&is_smi);
+ __ SmiUntag(result_reg, input_reg);
+ __ ClampUint8(result_reg, result_reg);
+
+ __ bind(&done);
+}
+
+
void LCodeGen::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- if (Heap::InNewSpace(*object)) {
+ if (heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
- Factory::NewJSGlobalPropertyCell(object);
+ factory()->NewJSGlobalPropertyCell(object);
__ mov(result, Operand(cell));
__ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
} else {
@@ -3522,6 +4258,13 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
}
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->InputAt(0)).is(r0));
+ __ push(r0);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
Label materialized;
// Registers will be used as follows:
@@ -3582,16 +4325,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
- if (shared_info->num_literals() == 0 && !pretenure) {
- FastNewClosureStub stub;
+ if (!pretenure && shared_info->num_literals() == 0) {
+ FastNewClosureStub stub(
+ shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
__ mov(r1, Operand(shared_info));
__ push(r1);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ mov(r2, Operand(shared_info));
__ mov(r1, Operand(pretenure
- ? Factory::true_value()
- : Factory::false_value()));
+ ? factory()->true_value()
+ : factory()->false_value()));
__ Push(cp, r2, r1);
CallRuntime(Runtime::kNewClosure, 3, instr);
}
@@ -3650,71 +4394,56 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Handle<String> type_name) {
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
- if (type_name->Equals(Heap::number_symbol())) {
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, true_label);
+ if (type_name->Equals(heap()->number_symbol())) {
+ __ JumpIfSmi(input, true_label);
__ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(input, Operand(ip));
final_branch_condition = eq;
- } else if (type_name->Equals(Heap::string_symbol())) {
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, false_label);
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
+ } else if (type_name->Equals(heap()->string_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
+ __ b(ge, false_label);
__ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsUndetectable));
- __ b(ne, false_label);
- __ CompareInstanceType(input, scratch, FIRST_NONSTRING_TYPE);
- final_branch_condition = lo;
+ final_branch_condition = eq;
- } else if (type_name->Equals(Heap::boolean_symbol())) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(input, ip);
+ } else if (type_name->Equals(heap()->boolean_symbol())) {
+ __ CompareRoot(input, Heap::kTrueValueRootIndex);
__ b(eq, true_label);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(input, ip);
+ __ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = eq;
- } else if (type_name->Equals(Heap::undefined_symbol())) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input, ip);
+ } else if (type_name->Equals(heap()->undefined_symbol())) {
+ __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ b(eq, true_label);
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, false_label);
+ __ JumpIfSmi(input, false_label);
// Check for undetectable objects => true.
__ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsUndetectable));
final_branch_condition = ne;
- } else if (type_name->Equals(Heap::function_symbol())) {
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, false_label);
- __ CompareObjectType(input, input, scratch, JS_FUNCTION_TYPE);
- __ b(eq, true_label);
- // Regular expressions => 'function' (they are callable).
- __ CompareInstanceType(input, scratch, JS_REGEXP_TYPE);
- final_branch_condition = eq;
+ } else if (type_name->Equals(heap()->function_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareObjectType(input, input, scratch,
+ FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ final_branch_condition = ge;
- } else if (type_name->Equals(Heap::object_symbol())) {
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, false_label);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(input, ip);
+ } else if (type_name->Equals(heap()->object_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
__ b(eq, true_label);
- // Regular expressions => 'function', not 'object'.
- __ CompareObjectType(input, input, scratch, JS_REGEXP_TYPE);
- __ b(eq, false_label);
+ __ CompareObjectType(input, input, scratch,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ b(lt, false_label);
+ __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ b(gt, false_label);
// Check for undetectable objects => false.
__ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsUndetectable));
- __ b(ne, false_label);
- // Check for JS objects => true.
- __ CompareInstanceType(input, scratch, FIRST_JS_OBJECT_TYPE);
- __ b(lo, false_label);
- __ CompareInstanceType(input, scratch, LAST_JS_OBJECT_TYPE);
- final_branch_condition = ls;
+ final_branch_condition = eq;
} else {
final_branch_condition = ne;
@@ -3800,19 +4529,62 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
SafepointGenerator safepoint_generator(this,
pointers,
env->deoptimization_index());
- __ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoIn(LIn* instr) {
+ Register obj = ToRegister(instr->object());
+ Register key = ToRegister(instr->key());
+ __ Push(key, obj);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
}
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- // Perform stack overflow check.
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&ok);
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ private:
+ LStackCheck* instr_;
+ };
+
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &done);
+ StackCheckStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new DeferredStackCheck(this, instr);
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(lo, deferred_stack_check->entry());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ }
}
@@ -3833,6 +4605,8 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 393b6423e..ead848903 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -51,9 +51,10 @@ class LCodeGen BASE_EMBEDDED {
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4),
+ deopt_jump_table_(4),
deoptimization_literals_(8),
inlined_function_count_(0),
- scope_(chunk->graph()->info()->scope()),
+ scope_(info->scope()),
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
@@ -65,6 +66,10 @@ class LCodeGen BASE_EMBEDDED {
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info_->isolate(); }
+ Factory* factory() const { return isolate()->factory(); }
+ Heap* heap() const { return isolate()->heap(); }
// Support for converting LOperands to assembler types.
// LOperand must be a register.
@@ -103,13 +108,15 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
+ void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -133,7 +140,7 @@ class LCodeGen BASE_EMBEDDED {
bool is_aborted() const { return status_ == ABORTED; }
int strict_mode_flag() const {
- return info_->is_strict() ? kStrictMode : kNonStrictMode;
+ return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
LChunk* chunk() const { return chunk_; }
@@ -141,7 +148,7 @@ class LCodeGen BASE_EMBEDDED {
HGraph* graph() const { return chunk_->graph(); }
Register scratch0() { return r9; }
- DwVfpRegister double_scratch0() { return d0; }
+ DwVfpRegister double_scratch0() { return d15; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@@ -153,8 +160,8 @@ class LCodeGen BASE_EMBEDDED {
Register temporary,
Register temporary2);
- int StackSlotCount() const { return chunk()->spill_slot_count(); }
- int ParameterCount() const { return scope()->num_parameters(); }
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* format, ...);
void Comment(const char* format, ...);
@@ -166,6 +173,7 @@ class LCodeGen BASE_EMBEDDED {
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
+ bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
enum SafepointMode {
@@ -182,14 +190,14 @@ class LCodeGen BASE_EMBEDDED {
LInstruction* instr,
SafepointMode safepoint_mode);
- void CallRuntime(Runtime::Function* function,
+ void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
@@ -201,7 +209,8 @@ class LCodeGen BASE_EMBEDDED {
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
- LInstruction* instr);
+ LInstruction* instr,
+ CallKind call_kind);
void LoadHeapObject(Register result, Handle<HeapObject> object);
@@ -228,6 +237,10 @@ class LCodeGen BASE_EMBEDDED {
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
+ void DoMathPowHalf(LUnaryMathOperation* instr);
+ void DoMathLog(LUnaryMathOperation* instr);
+ void DoMathCos(LUnaryMathOperation* instr);
+ void DoMathSin(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
@@ -243,13 +256,17 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
int deoptimization_index);
void RecordPosition(int position);
+ int LastSafepointEnd() {
+ return static_cast<int>(safepoints_.GetPcAfterGap());
+ }
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+ void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
DoubleRegister result,
+ bool deoptimize_on_undefined,
LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
@@ -263,7 +280,6 @@ class LCodeGen BASE_EMBEDDED {
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object);
@@ -271,6 +287,19 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
+ void EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name);
+
+ struct JumpTableEntry {
+ explicit inline JumpTableEntry(Address entry)
+ : label(),
+ address(entry) { }
+ Label label;
+ Address address;
+ };
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -279,6 +308,7 @@ class LCodeGen BASE_EMBEDDED {
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
index 1a2326b74..02608a695 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
#include "arm/lithium-gap-resolver-arm.h"
#include "arm/lithium-codegen-arm.h"
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 9340b61dd..49282b871 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -32,18 +32,21 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
#include "runtime.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ allow_stub_calls_(true) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
}
@@ -80,7 +83,7 @@ void MacroAssembler::Jump(Register target, Condition cond) {
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
#if USE_BX
- mov(ip, Operand(target, rmode), LeaveCC, cond);
+ mov(ip, Operand(target, rmode));
bx(ip, cond);
#else
mov(pc, Operand(target, rmode), LeaveCC, cond);
@@ -103,7 +106,22 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
}
+int MacroAssembler::CallSize(Register target, Condition cond) {
+#if USE_BLX
+ return kInstrSize;
+#else
+ return 2 * kInstrSize;
+#endif
+}
+
+
void MacroAssembler::Call(Register target, Condition cond) {
+ // Block constant pool for the call instruction sequence.
+ BlockConstPoolScope block_const_pool(this);
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
#if USE_BLX
blx(target, cond);
#else
@@ -111,29 +129,47 @@ void MacroAssembler::Call(Register target, Condition cond) {
mov(lr, Operand(pc), LeaveCC, cond);
mov(pc, Operand(target), LeaveCC, cond);
#endif
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, cond), post_position);
+#endif
}
-void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
+int MacroAssembler::CallSize(
+ intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+ int size = 2 * kInstrSize;
+ Instr mov_instr = cond | MOV | LeaveCC;
+ if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
+ size += kInstrSize;
+ }
+ return size;
+}
+
+
+void MacroAssembler::Call(intptr_t target,
+ RelocInfo::Mode rmode,
Condition cond) {
+ // Block constant pool for the call instruction sequence.
+ BlockConstPoolScope block_const_pool(this);
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
#if USE_BLX
// On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...]
// blx ip
- // The two instructions (ldr and blx) could be separated by a constant
- // pool and the code would still work. The issue comes from the
- // patching code which expect the ldr to be just above the blx.
- { BlockConstPoolScope block_const_pool(this);
- // Statement positions are expected to be recorded when the target
- // address is loaded. The mov method will automatically record
- // positions when pc is the target, since this is not the case here
- // we have to do it explicitly.
- positions_recorder()->WriteRecordedPositions();
+ // Statement positions are expected to be recorded when the target
+ // address is loaded. The mov method will automatically record
+ // positions when pc is the target, since this is not the case here
+ // we have to do it explicitly.
+ positions_recorder()->WriteRecordedPositions();
- mov(ip, Operand(target, rmode), LeaveCC, cond);
- blx(ip, cond);
- }
+ mov(ip, Operand(target, rmode));
+ blx(ip, cond);
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else
@@ -141,24 +177,81 @@ void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(target, rmode), LeaveCC, cond);
-
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
+#endif
}
-void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
- Condition cond) {
+int MacroAssembler::CallSize(
+ byte* target, RelocInfo::Mode rmode, Condition cond) {
+ return CallSize(reinterpret_cast<intptr_t>(target), rmode);
+}
+
+
+void MacroAssembler::Call(
+ byte* target, RelocInfo::Mode rmode, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Call(reinterpret_cast<intptr_t>(target), rmode, cond);
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
+#endif
+}
+
+
+int MacroAssembler::CallSize(
+ Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
+ return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::CallWithAstId(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
+ ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
+ ASSERT(ast_id != kNoASTId);
+ ASSERT(ast_id_for_reloc_info_ == kNoASTId);
+ ast_id_for_reloc_info_ = ast_id;
+ // 'code' is always generated ARM code, never THUMB code
+ Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
+#endif
}
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+void MacroAssembler::Call(Handle<Code> code,
+ RelocInfo::Mode rmode,
Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
ASSERT(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
+#endif
}
@@ -205,14 +298,29 @@ void MacroAssembler::Call(Label* target) {
}
+void MacroAssembler::Push(Handle<Object> handle) {
+ mov(ip, Operand(handle));
+ push(ip);
+}
+
+
void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, Operand(value));
}
-void MacroAssembler::Move(Register dst, Register src) {
+void MacroAssembler::Move(Register dst, Register src, Condition cond) {
+ if (!dst.is(src)) {
+ mov(dst, src, LeaveCC, cond);
+ }
+}
+
+
+void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+ ASSERT(CpuFeatures::IsSupported(VFP3));
+ CpuFeatures::Scope scope(VFP3);
if (!dst.is(src)) {
- mov(dst, src);
+ vmov(dst, src);
}
}
@@ -228,7 +336,8 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
!src2.must_use_constant_pool() &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
- ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
+ ubfx(dst, src1, 0,
+ WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
} else {
and_(dst, src1, src2, LeaveCC, cond);
@@ -336,20 +445,6 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
}
-void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
- // Empty the const pool.
- CheckConstPool(true, true);
- add(pc, pc, Operand(index,
- LSL,
- Instruction::kInstrSizeLog2 - kSmiTagSize));
- BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
- nop(); // Jump table alignment.
- for (int i = 0; i < targets.length(); i++) {
- b(targets[i]);
- }
-}
-
-
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
@@ -367,7 +462,7 @@ void MacroAssembler::StoreRoot(Register source,
void MacroAssembler::RecordWriteHelper(Register object,
Register address,
Register scratch) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check that the object is not in new space.
Label not_in_new_space;
InNewSpace(object, scratch, ne, &not_in_new_space);
@@ -395,8 +490,8 @@ void MacroAssembler::InNewSpace(Register object,
Condition cond,
Label* branch) {
ASSERT(cond == eq || cond == ne);
- and_(scratch, object, Operand(ExternalReference::new_space_mask()));
- cmp(scratch, Operand(ExternalReference::new_space_start()));
+ and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
+ cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
b(cond, branch);
}
@@ -429,7 +524,7 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
@@ -461,7 +556,7 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(address, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
@@ -552,19 +647,36 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
ASSERT_EQ(0, dst1.code() % 2);
ASSERT_EQ(dst1.code() + 1, dst2.code());
+ // V8 does not use this addressing mode, so the fallback code
+ // below doesn't support it yet.
+ ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
+
// Generate two ldr instructions if ldrd is not available.
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
- MemOperand src2(src);
- src2.set_offset(src2.offset() + 4);
- if (dst1.is(src.rn())) {
- ldr(dst2, src2, cond);
- ldr(dst1, src, cond);
- } else {
- ldr(dst1, src, cond);
- ldr(dst2, src2, cond);
+ if ((src.am() == Offset) || (src.am() == NegOffset)) {
+ MemOperand src2(src);
+ src2.set_offset(src2.offset() + 4);
+ if (dst1.is(src.rn())) {
+ ldr(dst2, src2, cond);
+ ldr(dst1, src, cond);
+ } else {
+ ldr(dst1, src, cond);
+ ldr(dst2, src2, cond);
+ }
+ } else { // PostIndex or NegPostIndex.
+ ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
+ if (dst1.is(src.rn())) {
+ ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
+ ldr(dst1, src, cond);
+ } else {
+ MemOperand src2(src);
+ src2.set_offset(src2.offset() - 4);
+ ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
+ ldr(dst2, src2, cond);
+ }
}
}
}
@@ -577,15 +689,26 @@ void MacroAssembler::Strd(Register src1, Register src2,
ASSERT_EQ(0, src1.code() % 2);
ASSERT_EQ(src1.code() + 1, src2.code());
+ // V8 does not use this addressing mode, so the fallback code
+ // below doesn't support it yet.
+ ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
+
// Generate two str instructions if strd is not available.
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
MemOperand dst2(dst);
- dst2.set_offset(dst2.offset() + 4);
- str(src1, dst, cond);
- str(src2, dst2, cond);
+ if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
+ dst2.set_offset(dst2.offset() + 4);
+ str(src1, dst, cond);
+ str(src2, dst2, cond);
+ } else { // PostIndex or NegPostIndex.
+ ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
+ dst2.set_offset(dst2.offset() - 4);
+ str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
+ str(src2, dst2, cond);
+ }
}
}
@@ -632,6 +755,23 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
vmrs(fpscr_flags, cond);
}
+void MacroAssembler::Vmov(const DwVfpRegister dst,
+ const double imm,
+ const Condition cond) {
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ static const DoubleRepresentation minus_zero(-0.0);
+ static const DoubleRepresentation zero(0.0);
+ DoubleRepresentation value(imm);
+ // Handle special values first.
+ if (value.bits == zero.bits) {
+ vmov(dst, kDoubleRegZero, cond);
+ } else if (value.bits == minus_zero.bits) {
+ vneg(dst, kDoubleRegZero, cond);
+ } else {
+ vmov(dst, imm, cond);
+ }
+}
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
// r0-r3: preserved
@@ -665,7 +805,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
mov(fp, Operand(sp)); // Setup new frame pointer.
// Reserve room for saved entry sp and code object.
sub(sp, sp, Operand(2 * kPointerSize));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(ip, Operand(0));
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -673,19 +813,17 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
- mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
str(fp, MemOperand(ip));
- mov(ip, Operand(ExternalReference(Top::k_context_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
str(cp, MemOperand(ip));
// Optionally save all double registers.
if (save_doubles) {
- sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
- const int offset = -2 * kPointerSize;
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- vstr(reg, fp, offset - ((i + 1) * kDoubleSize));
- }
+ DwVfpRegister first = d0;
+ DwVfpRegister last =
+ DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
+ vstm(db_w, sp, first, last);
// Note that d0 will be accessible at
// fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
// since the sp slot and code slot were pushed after the fp.
@@ -742,20 +880,22 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count) {
// Optionally restore all double registers.
if (save_doubles) {
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- const int offset = -2 * kPointerSize;
- vldr(reg, fp, offset - ((i + 1) * kDoubleSize));
- }
+ // Calculate the stack location of the saved doubles and restore them.
+ const int offset = 2 * kPointerSize;
+ sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
+ DwVfpRegister first = d0;
+ DwVfpRegister last =
+ DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
+ vldm(ia, r3, first, last);
}
// Clear top frame.
mov(r3, Operand(0, RelocInfo::NONE));
- mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
str(r3, MemOperand(ip));
// Restore current context from top and clear it in debug mode.
- mov(ip, Operand(ExternalReference(Top::k_context_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
ldr(cp, MemOperand(ip));
#ifdef DEBUG
str(r3, MemOperand(ip));
@@ -770,11 +910,25 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
-#if !defined(USE_ARM_EABI)
- UNREACHABLE();
-#else
- vmov(dst, r0, r1);
-#endif
+ if (use_eabi_hardfloat()) {
+ Move(dst, d0);
+ } else {
+ vmov(dst, r0, r1);
+ }
+}
+
+
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+ // This macro takes the dst register to make the code more readable
+ // at the call sites. However, the dst register has to be r5 to
+ // follow the calling convention which requires the call type to be
+ // in r5.
+ ASSERT(dst.is(r5));
+ if (call_kind == CALL_AS_FUNCTION) {
+ mov(dst, Operand(Smi::FromInt(1)));
+ } else {
+ mov(dst, Operand(Smi::FromInt(0)));
+ }
}
@@ -784,7 +938,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Register code_reg,
Label* done,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
bool definitely_matches = false;
Label regular_invoke;
@@ -837,12 +992,15 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
Handle<Code> adaptor =
- Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ SetCallKind(r5, call_kind);
Call(adaptor, RelocInfo::CODE_TARGET);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ call_wrapper.AfterCall();
b(done);
} else {
+ SetCallKind(r5, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&regular_invoke);
@@ -854,16 +1012,20 @@ void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
- post_call_generator);
+ call_wrapper, call_kind);
if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(r5, call_kind);
Call(code);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(r5, call_kind);
Jump(code);
}
@@ -877,13 +1039,17 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ CallKind call_kind) {
Label done;
- InvokePrologue(expected, actual, code, no_reg, &done, flag);
+ InvokePrologue(expected, actual, code, no_reg, &done, flag,
+ NullCallWrapper(), call_kind);
if (flag == CALL_FUNCTION) {
+ SetCallKind(r5, call_kind);
Call(code, rmode);
} else {
+ SetCallKind(r5, call_kind);
Jump(code, rmode);
}
@@ -896,7 +1062,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
@@ -913,13 +1080,14 @@ void MacroAssembler::InvokeFunction(Register fun,
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, post_call_generator);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
}
void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ CallKind call_kind) {
ASSERT(function->is_compiled());
// Get the function and setup the context.
@@ -934,9 +1102,9 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
// code field in the function to allow recompilation to take effect
// without changing any of the call sites.
ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag);
+ InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
} else {
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
}
}
@@ -954,9 +1122,9 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
Register scratch,
Label* fail) {
ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
+ cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
b(lt, fail);
- cmp(scratch, Operand(LAST_JS_OBJECT_TYPE));
+ cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
b(gt, fail);
}
@@ -977,7 +1145,7 @@ void MacroAssembler::IsObjectJSStringType(Register object,
void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
mov(r0, Operand(0, RelocInfo::NONE));
- mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
+ mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
@@ -1000,7 +1168,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize);
stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
// Save the current handler as the next handler.
- mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
ldr(r1, MemOperand(r3));
ASSERT(StackHandlerConstants::kNextOffset == 0);
push(r1);
@@ -1019,7 +1187,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize);
stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
// Save the current handler as the next handler.
- mov(r7, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r7, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
ldr(r6, MemOperand(r7));
ASSERT(StackHandlerConstants::kNextOffset == 0);
push(r6);
@@ -1032,7 +1200,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
pop(r1);
- mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
str(r1, MemOperand(ip));
}
@@ -1048,7 +1216,7 @@ void MacroAssembler::Throw(Register value) {
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler.
- mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
ldr(sp, MemOperand(r3));
// Restore the next handler and frame pointer, discard handler state.
@@ -1067,7 +1235,7 @@ void MacroAssembler::Throw(Register value) {
// Restore cp otherwise.
ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(lr, Operand(pc));
}
#endif
@@ -1087,7 +1255,7 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
}
// Drop sp to the top stack handler.
- mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
ldr(sp, MemOperand(r3));
// Unwind the handlers until the ENTRY handler is found.
@@ -1111,7 +1279,8 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
+ ExternalReference external_caught(
+ Isolate::k_external_caught_exception_address, isolate());
mov(r0, Operand(false, RelocInfo::NONE));
mov(r2, Operand(external_caught));
str(r0, MemOperand(r2));
@@ -1119,7 +1288,8 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
// Set pending exception and r0 to out of memory exception.
Failure* out_of_memory = Failure::OutOfMemoryException();
mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ isolate())));
str(r0, MemOperand(r2));
}
@@ -1140,7 +1310,7 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
// Restore cp otherwise.
ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(lr, Operand(pc));
}
#endif
@@ -1172,7 +1342,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
@@ -1191,7 +1361,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
b(eq, &same_contexts);
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
@@ -1233,7 +1403,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Operand(0x7091));
mov(scratch1, Operand(0x7191));
@@ -1246,6 +1416,8 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
ASSERT(!result.is(scratch1));
ASSERT(!result.is(scratch2));
ASSERT(!scratch1.is(scratch2));
+ ASSERT(!scratch1.is(ip));
+ ASSERT(!scratch2.is(ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -1258,9 +1430,9 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Also, assert that the registers are numbered such that the values
// are loaded in the correct order.
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
intptr_t top =
reinterpret_cast<intptr_t>(new_space_allocation_top.address());
intptr_t limit =
@@ -1280,7 +1452,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Load allocation top into result and allocation limit into ip.
ldm(ia, topaddr, result.bit() | ip.bit());
} else {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Assert that result actually contains top on entry. ip is used
// immediately below so this use of ip does not cause difference with
// respect to register content between debug and release mode.
@@ -1314,7 +1486,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Operand(0x7091));
mov(scratch1, Operand(0x7191));
@@ -1338,9 +1510,9 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Also, assert that the registers are numbered such that the values
// are loaded in the correct order.
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
intptr_t top =
reinterpret_cast<intptr_t>(new_space_allocation_top.address());
intptr_t limit =
@@ -1358,7 +1530,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Load allocation top into result and allocation limit into ip.
ldm(ia, topaddr, result.bit() | ip.bit());
} else {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Assert that result actually contains top on entry. ip is used
// immediately below so this use of ip does not cause difference with
// respect to register content between debug and release mode.
@@ -1383,7 +1555,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
b(hi, gc_required);
// Update allocation top. result temporarily holds the new top.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
tst(scratch2, Operand(kObjectAlignmentMask));
Check(eq, "Unaligned allocation in new space");
}
@@ -1399,7 +1571,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
void MacroAssembler::UndoAllocationInNewSpace(Register object,
Register scratch) {
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
and_(object, object, Operand(~kHeapObjectTagMask));
@@ -1535,12 +1707,30 @@ void MacroAssembler::CompareInstanceType(Register map,
}
+void MacroAssembler::CompareRoot(Register obj,
+ Heap::RootListIndex index) {
+ ASSERT(!obj.is(ip));
+ LoadRoot(ip, index);
+ cmp(obj, ip);
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+ b(hi, fail);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
- bool is_heap_object) {
- if (!is_heap_object) {
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -1554,8 +1744,8 @@ void MacroAssembler::CheckMap(Register obj,
Register scratch,
Heap::RootListIndex index,
Label* fail,
- bool is_heap_object) {
- if (!is_heap_object) {
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -1565,6 +1755,23 @@ void MacroAssembler::CheckMap(Register obj,
}
+void MacroAssembler::DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ mov(ip, Operand(map));
+ cmp(scratch, ip);
+ Jump(success, RelocInfo::CODE_TARGET, eq);
+ bind(&fail);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@@ -1618,6 +1825,17 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
}
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Object* result;
+ { MaybeObject* maybe_result = stub->TryGetCode();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
+ return result;
+}
+
+
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
@@ -1630,7 +1848,7 @@ MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+ Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
return result;
}
@@ -1679,7 +1897,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
str(r4, MemOperand(r7, kNextOffset));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
ldr(r1, MemOperand(r7, kLevelOffset));
cmp(r1, r6);
Check(eq, "Unexpected level after return from api call");
@@ -1693,7 +1911,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
// Check if the function scheduled an exception.
bind(&leave_exit_frame);
LoadRoot(r4, Heap::kTheHoleValueRootIndex);
- mov(ip, Operand(ExternalReference::scheduled_exception_address()));
+ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
ldr(r5, MemOperand(ip));
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
@@ -1704,8 +1922,11 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
mov(pc, lr);
bind(&promote_scheduled_exception);
- MaybeObject* result = TryTailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException), 0, 1);
+ MaybeObject* result
+ = TryTailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0,
+ 1);
if (result->IsFailure()) {
return result;
}
@@ -1714,8 +1935,10 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
bind(&delete_allocated_handles);
str(r5, MemOperand(r7, kLimitOffset));
mov(r4, r0);
- PrepareCallCFunction(0, r5);
- CallCFunction(ExternalReference::delete_handle_scope_extensions(), 0);
+ PrepareCallCFunction(1, r5);
+ mov(r0, Operand(ExternalReference::isolate_address()));
+ CallCFunction(
+ ExternalReference::delete_handle_scope_extensions(isolate()), 1);
mov(r0, r4);
jmp(&leave_exit_frame);
@@ -1952,6 +2175,121 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
}
+void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
+ Register input_high,
+ Register input_low,
+ Register scratch) {
+ Label done, normal_exponent, restore_sign;
+
+ // Extract the biased exponent in result.
+ Ubfx(result,
+ input_high,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Check for Infinity and NaNs, which should return 0.
+ cmp(result, Operand(HeapNumber::kExponentMask));
+ mov(result, Operand(0), LeaveCC, eq);
+ b(eq, &done);
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ sub(result,
+ result,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
+ SetCC);
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ b(le, &normal_exponent);
+ mov(result, Operand(0));
+ b(&done);
+
+ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
+
+ // Save the sign.
+ Register sign = result;
+ result = no_reg;
+ and_(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ orr(input_high,
+ input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ mov(input_high, Operand(input_high, LSL, scratch));
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ rsb(scratch, scratch, Operand(32), SetCC);
+ b(&pos_shift, ge);
+
+ // Negate scratch.
+ rsb(scratch, scratch, Operand(0));
+ mov(input_low, Operand(input_low, LSL, scratch));
+ b(&shift_done);
+
+ bind(&pos_shift);
+ mov(input_low, Operand(input_low, LSR, scratch));
+
+ bind(&shift_done);
+ orr(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ cmp(sign, Operand(0));
+ result = sign;
+ sign = no_reg;
+ rsb(result, input_high, Operand(0), LeaveCC, ne);
+ mov(result, input_high, LeaveCC, eq);
+ bind(&done);
+}
+
+
+void MacroAssembler::EmitECMATruncate(Register result,
+ DwVfpRegister double_input,
+ SwVfpRegister single_scratch,
+ Register scratch,
+ Register input_high,
+ Register input_low) {
+ CpuFeatures::Scope scope(VFP3);
+ ASSERT(!input_high.is(result));
+ ASSERT(!input_low.is(result));
+ ASSERT(!input_low.is(input_high));
+ ASSERT(!scratch.is(result) &&
+ !scratch.is(input_high) &&
+ !scratch.is(input_low));
+ ASSERT(!single_scratch.is(double_input.low()) &&
+ !single_scratch.is(double_input.high()));
+
+ Label done;
+
+ // Clear cumulative exception flags.
+ ClearFPSCRBits(kVFPExceptionMask, scratch);
+ // Try a conversion to a signed integer.
+ vcvt_s32_f64(single_scratch, double_input);
+ vmov(result, single_scratch);
+ // Retrieve he FPSCR.
+ vmrs(scratch);
+ // Check for overflow and NaNs.
+ tst(scratch, Operand(kVFPOverflowExceptionBit |
+ kVFPUnderflowExceptionBit |
+ kVFPInvalidOpExceptionBit));
+ // If we had no exceptions we are done.
+ b(eq, &done);
+
+ // Load the double value and perform a manual truncation.
+ vmov(input_low, input_high, double_input);
+ EmitOutOfInt32RangeTruncate(result,
+ input_high,
+ input_low,
+ scratch);
+ bind(&done);
+}
+
+
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
@@ -1971,7 +2309,8 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
}
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -1987,7 +2326,7 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(num_arguments));
- mov(r1, Operand(ExternalReference(f)));
+ mov(r1, Operand(ExternalReference(f, isolate())));
CEntryStub stub(1);
CallStub(&stub);
}
@@ -1999,9 +2338,9 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
- mov(r1, Operand(ExternalReference(function)));
+ mov(r1, Operand(ExternalReference(function, isolate())));
CEntryStub stub(1);
stub.SaveDoubles();
CallStub(&stub);
@@ -2044,7 +2383,9 @@ MaybeObject* MacroAssembler::TryTailCallExternalReference(
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
}
@@ -2072,14 +2413,17 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- PostCallGenerator* post_call_generator) {
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
GetBuiltinEntry(r2, id);
- if (flags == CALL_JS) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(r2));
+ SetCallKind(r5, CALL_AS_METHOD);
Call(r2);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ call_wrapper.AfterCall();
} else {
- ASSERT(flags == JUMP_JS);
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(r5, CALL_AS_METHOD);
Jump(r2);
}
}
@@ -2139,14 +2483,14 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
void MacroAssembler::Assert(Condition cond, const char* msg) {
- if (FLAG_debug_code)
+ if (emit_debug_code())
Check(cond, msg);
}
void MacroAssembler::AssertRegisterIsRoot(Register reg,
Heap::RootListIndex index) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
LoadRoot(ip, index);
cmp(reg, ip);
Check(eq, "Register did not match expected root");
@@ -2155,7 +2499,7 @@ void MacroAssembler::AssertRegisterIsRoot(Register reg,
void MacroAssembler::AssertFastElements(Register elements) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
ASSERT(!elements.is(ip));
Label ok;
push(elements);
@@ -2225,12 +2569,9 @@ void MacroAssembler::Abort(const char* msg) {
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
@@ -2238,17 +2579,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
// cannot be allowed to destroy the context in esi).
mov(dst, cp);
}
-
- // We should not have found a 'with' context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (FLAG_debug_code) {
- ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- cmp(dst, ip);
- Check(eq, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts");
- }
}
@@ -2268,9 +2598,9 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register scratch) {
// Load the initial map. The global functions all have initial maps.
ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
b(&ok);
bind(&fail);
Abort("Global functions must have initial map");
@@ -2290,6 +2620,18 @@ void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
}
+void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
+ Register reg,
+ Register scratch,
+ Label* zero_and_neg,
+ Label* not_power_of_two) {
+ sub(scratch, reg, Operand(1), SetCC);
+ b(mi, zero_and_neg);
+ tst(scratch, reg);
+ b(ne, not_power_of_two);
+}
+
+
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2,
Label* on_not_both_smi) {
@@ -2340,9 +2682,7 @@ void MacroAssembler::AbortIfNotString(Register object) {
void MacroAssembler::AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message) {
- ASSERT(!src.is(ip));
- LoadRoot(ip, root_value_index);
- cmp(src, ip);
+ CompareRoot(src, root_value_index);
Assert(eq, message);
}
@@ -2386,8 +2726,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
// Check that neither is a smi.
STATIC_ASSERT(kSmiTag == 0);
and_(scratch1, first, Operand(second));
- tst(scratch1, Operand(kSmiTagMask));
- b(eq, failure);
+ JumpIfSmi(scratch1, failure);
JumpIfNonSmisNotBothSequentialAsciiStrings(first,
second,
scratch1,
@@ -2478,7 +2817,7 @@ void MacroAssembler::CopyBytes(Register src,
// Copy bytes in word size chunks.
bind(&word_loop);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
tst(src, Operand(kPointerSize - 1));
Assert(eq, "Expecting alignment for CopyBytes");
}
@@ -2577,11 +2916,38 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
b(ne, failure);
}
+static const int kRegisterPassedArguments = 4;
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
- int frame_alignment = ActivationFrameAlignment();
+
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments) {
+ int stack_passed_words = 0;
+ if (use_eabi_hardfloat()) {
+ // In the hard floating point calling convention, we can use
+ // all double registers to pass doubles.
+ if (num_double_arguments > DoubleRegister::kNumRegisters) {
+ stack_passed_words +=
+ 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+ }
+ } else {
+ // In the soft floating point calling convention, every double
+ // argument is passed using two registers.
+ num_reg_arguments += 2 * num_double_arguments;
+ }
// Up to four simple arguments are passed in registers r0..r3.
- int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+ if (num_reg_arguments > kRegisterPassedArguments) {
+ stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+ }
+ return stack_passed_words;
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+ int num_double_arguments,
+ Register scratch) {
+ int frame_alignment = ActivationFrameAlignment();
+ int stack_passed_arguments = CalculateStackPassedWords(
+ num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
@@ -2596,19 +2962,97 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
}
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+ Register scratch) {
+ PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+ if (use_eabi_hardfloat()) {
+ Move(d0, dreg);
+ } else {
+ vmov(r0, r1, dreg);
+ }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
+ DoubleRegister dreg2) {
+ if (use_eabi_hardfloat()) {
+ if (dreg2.is(d0)) {
+ ASSERT(!dreg1.is(d1));
+ Move(d1, dreg2);
+ Move(d0, dreg1);
+ } else {
+ Move(d0, dreg1);
+ Move(d1, dreg2);
+ }
+ } else {
+ vmov(r0, r1, dreg1);
+ vmov(r2, r3, dreg2);
+ }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+ Register reg) {
+ if (use_eabi_hardfloat()) {
+ Move(d0, dreg);
+ Move(r0, reg);
+ } else {
+ Move(r2, reg);
+ vmov(r0, r1, dreg);
+ }
+}
+
+
void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(no_reg,
+ function,
+ ip,
+ num_reg_arguments,
+ num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ Register scratch,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function,
+ ExternalReference::the_hole_value_location(isolate()),
+ scratch,
+ num_reg_arguments,
+ num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ Register scratch,
int num_arguments) {
- mov(ip, Operand(function));
- CallCFunction(ip, num_arguments);
+ CallCFunction(function, scratch, num_arguments, 0);
}
-void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+void MacroAssembler::CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
+ int num_reg_arguments,
+ int num_double_arguments) {
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
#if defined(V8_HOST_ARCH_ARM)
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
int frame_alignment = OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
@@ -2627,9 +3071,14 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) {
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
+ if (function.is(no_reg)) {
+ mov(scratch, Operand(function_reference));
+ function = scratch;
+ }
Call(function);
- int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
- if (OS::ActivationFrameAlignment() > kPointerSize) {
+ int stack_passed_arguments = CalculateStackPassedWords(
+ num_reg_arguments, num_double_arguments);
+ if (ActivationFrameAlignment() > kPointerSize) {
ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
@@ -2642,7 +3091,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
const uint32_t kLdrOffsetMask = (1 << 12) - 1;
const int32_t kPCRegOffset = 2 * kPointerSize;
ldr(result, MemOperand(ldr_location));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check that the instruction is a ldr reg, [pc + offset] .
and_(result, result, Operand(kLdrPCPattern));
cmp(result, Operand(kLdrPCPattern));
@@ -2657,11 +3106,60 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
}
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+ Usat(output_reg, 8, Operand(input_reg));
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+ DoubleRegister input_reg,
+ DoubleRegister temp_double_reg) {
+ Label above_zero;
+ Label done;
+ Label in_bounds;
+
+ Vmov(temp_double_reg, 0.0);
+ VFPCompareAndSetFlags(input_reg, temp_double_reg);
+ b(gt, &above_zero);
+
+ // Double value is less than zero, NaN or Inf, return 0.
+ mov(result_reg, Operand(0));
+ b(al, &done);
+
+ // Double value is >= 255, return 255.
+ bind(&above_zero);
+ Vmov(temp_double_reg, 255.0);
+ VFPCompareAndSetFlags(input_reg, temp_double_reg);
+ b(le, &in_bounds);
+ mov(result_reg, Operand(255));
+ b(al, &done);
+
+ // In 0-255 range, round and truncate.
+ bind(&in_bounds);
+ Vmov(temp_double_reg, 0.5);
+ vadd(temp_double_reg, input_reg, temp_double_reg);
+ vcvt_u32_f64(s0, temp_double_reg);
+ vmov(result_reg, s0);
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ ldr(descriptors,
+ FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
+ Label not_smi;
+ JumpIfNotSmi(descriptors, &not_smi);
+ mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
+ bind(&not_smi);
+}
+
+
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
- masm_(address, size_ + Assembler::kGap) {
+ masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index acd1d79b7..c601f26b7 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,13 +29,11 @@
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h"
+#include "v8globals.h"
namespace v8 {
namespace internal {
-// Forward declaration.
-class PostCallGenerator;
-
// ----------------------------------------------------------------------------
// Static helper functions
@@ -55,12 +53,6 @@ static inline Operand SmiUntagOperand(Register object) {
const Register cp = { 8 }; // JavaScript context pointer
const Register roots = { 10 }; // Roots array pointer.
-enum InvokeJSFlags {
- CALL_JS,
- JUMP_JS
-};
-
-
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
@@ -90,15 +82,28 @@ enum ObjectToDoubleFlags {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- MacroAssembler(void* buffer, int size);
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
+ int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
- void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ void Call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Condition cond = al);
+ void CallWithAstId(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond = al);
void Ret(Condition cond = al);
// Emit code to discard a non-negative number of pointer-sized elements
@@ -135,11 +140,12 @@ class MacroAssembler: public Assembler {
Condition cond = al);
void Call(Label* target);
+
+ // Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
- // May do nothing if the registers are identical.
- void Move(Register dst, Register src);
- // Jumps to the label at the index given by the Smi in "index".
- void SmiJumpTable(Register index, Vector<Label*> targets);
+ void Move(Register dst, Register src, Condition cond = al);
+ void Move(DoubleRegister dst, DoubleRegister src);
+
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index,
@@ -184,6 +190,9 @@ class MacroAssembler: public Assembler {
Register address,
Register scratch);
+ // Push a handle.
+ void Push(Handle<Object> handle);
+
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
ASSERT(!src1.is(src2));
@@ -303,6 +312,10 @@ class MacroAssembler: public Assembler {
const Register fpscr_flags,
const Condition cond = al);
+ void Vmov(const DwVfpRegister dst,
+ const double imm,
+ const Condition cond = al);
+
// ---------------------------------------------------------------------------
// Activation frames
@@ -338,29 +351,38 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
+ // Setup call kind marking in ecx. The method takes ecx as an
+ // explicit first parameter to make the code more readable at the
+ // call sites.
+ void SetCallKind(Register dst, CallKind kind);
+
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
- InvokeFlag flag);
+ InvokeFlag flag,
+ CallKind call_kind);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ CallKind call_kind);
void IsObjectJSObjectType(Register heap_object,
Register map,
@@ -560,6 +582,12 @@ class MacroAssembler: public Assembler {
InstanceType type);
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Register scratch,
+ Label* fail);
+
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@@ -568,13 +596,29 @@ class MacroAssembler: public Assembler {
Register scratch,
Handle<Map> map,
Label* fail,
- bool is_heap_object);
+ SmiCheckType smi_check_type);
+
void CheckMap(Register obj,
Register scratch,
Heap::RootListIndex index,
Label* fail,
- bool is_heap_object);
+ SmiCheckType smi_check_type);
+
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+
+ // Compare the object in a register to a value from the root list.
+ // Uses the ip register as scratch.
+ void CompareRoot(Register obj, Heap::RootListIndex index);
// Load and check the instance type of an object for being a string.
@@ -641,11 +685,11 @@ class MacroAssembler: public Assembler {
DwVfpRegister double_scratch,
Label *not_int32);
-// Truncates a double using a specific rounding mode.
-// Clears the z flag (ne condition) if an overflow occurs.
-// If exact_conversion is true, the z flag is also cleared if the conversion
-// was inexact, ie. if the double value could not be converted exactly
-// to a 32bit integer.
+ // Truncates a double using a specific rounding mode.
+ // Clears the z flag (ne condition) if an overflow occurs.
+ // If exact_conversion is true, the z flag is also cleared if the conversion
+ // was inexact, ie. if the double value could not be converted exactly
+ // to a 32bit integer.
void EmitVFPTruncate(VFPRoundingMode rounding_mode,
SwVfpRegister result,
DwVfpRegister double_input,
@@ -654,6 +698,27 @@ class MacroAssembler: public Assembler {
CheckForInexactConversion check
= kDontCheckForInexactConversion);
+ // Helper for EmitECMATruncate.
+ // This will truncate a floating-point value outside of the singed 32bit
+ // integer range to a 32bit signed integer.
+ // Expects the double value loaded in input_high and input_low.
+ // Exits with the answer in 'result'.
+ // Note that this code does not work for values in the 32bit range!
+ void EmitOutOfInt32RangeTruncate(Register result,
+ Register input_high,
+ Register input_low,
+ Register scratch);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer and all other registers clobbered.
+ void EmitECMATruncate(Register result,
+ DwVfpRegister double_input,
+ SwVfpRegister single_scratch,
+ Register scratch,
+ Register scratch2,
+ Register scratch3);
+
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
// for 0 (31 instead of 32). Source and scratch can be the same in which case
@@ -669,6 +734,11 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = al);
+ // Call a code stub and return the code object called. Try to generate
+ // the code if necessary. Do not perform a GC but instead return a retry
+ // after GC failure.
+ MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
+
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
@@ -679,7 +749,7 @@ class MacroAssembler: public Assembler {
Condition cond = al);
// Call a runtime routine.
- void CallRuntime(Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
// Convenience function: Same as above, but takes the fid instead.
@@ -707,15 +777,32 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
- // are word sized.
+ // are word sized. If double arguments are used, this function assumes that
+ // all double arguments are stored before core registers; otherwise the
+ // correct alignment of the double values is not guaranteed.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
- void PrepareCallCFunction(int num_arguments, Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments,
+ int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments,
+ Register scratch);
+
+ // There are two ways of passing double arguments on ARM, depending on
+ // whether soft or hard floating point ABI is used. These functions
+ // abstract parameter passing for the three different ways we call
+ // C functions from generated code.
+ void SetCallCDoubleArguments(DoubleRegister dreg);
+ void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
+ void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -723,7 +810,13 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(Register function, Register scratch, int num_arguments);
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, Register scratch,
+ int num_reg_arguments,
+ int num_double_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
@@ -742,8 +835,8 @@ class MacroAssembler: public Assembler {
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- PostCallGenerator* post_call_generator = NULL);
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
// setup the function in r1.
@@ -752,7 +845,10 @@ class MacroAssembler: public Assembler {
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
- Handle<Object> CodeObject() { return code_object_; }
+ Handle<Object> CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+ }
// ---------------------------------------------------------------------------
@@ -787,6 +883,15 @@ class MacroAssembler: public Assembler {
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ // EABI variant for double arguments in use.
+ bool use_eabi_hardfloat() {
+#if USE_EABI_HARDFLOAT
+ return true;
+#else
+ return false;
+#endif
+ }
+
// ---------------------------------------------------------------------------
// Number utilities
@@ -797,6 +902,16 @@ class MacroAssembler: public Assembler {
void JumpIfNotPowerOfTwoOrZero(Register reg,
Register scratch,
Label* not_power_of_two_or_zero);
+ // Check whether the value of reg is a power of two and not zero.
+ // Control falls through if it is, with scratch containing the mask
+ // value (reg - 1).
+ // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
+ // zero or negative, or jumps to the 'not_power_of_two' label if the value is
+ // strictly positive but not a power of two.
+ void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
+ Register scratch,
+ Label* zero_and_neg,
+ Label* not_power_of_two);
// ---------------------------------------------------------------------------
// Smi utilities
@@ -904,9 +1019,27 @@ class MacroAssembler: public Assembler {
Register result);
+ void ClampUint8(Register output_reg, Register input_reg);
+
+ void ClampDoubleToUint8(Register result_reg,
+ DoubleRegister input_reg,
+ DoubleRegister temp_double_reg);
+
+
+ void LoadInstanceDescriptors(Register map, Register descriptors);
+
private:
+ void CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
+ int num_reg_arguments,
+ int num_double_arguments);
+
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+ void Call(intptr_t target,
+ RelocInfo::Mode rmode,
+ Condition cond = al);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@@ -915,7 +1048,8 @@ class MacroAssembler: public Assembler {
Register code_reg,
Label* done,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -976,17 +1110,6 @@ class CodePatcher {
#endif // ENABLE_DEBUGGER_SUPPORT
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
- public:
- PostCallGenerator() { }
- virtual ~PostCallGenerator() { }
- virtual void Generate() = 0;
-};
-
-
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index 1f6ed6712..983a5286e 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -60,6 +60,7 @@ namespace internal {
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
+ * - fp[52] Isolate* isolate (Address of the current isolate)
* - fp[48] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
* - fp[44] stack_area_base (High end of the memory area to use as
@@ -115,7 +116,7 @@ namespace internal {
RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
Mode mode,
int registers_to_save)
- : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+ : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -346,7 +347,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ sub(current_input_offset(), r2, end_of_input_address());
} else {
ASSERT(mode_ == UC16);
- int argument_count = 3;
+ int argument_count = 4;
__ PrepareCallCFunction(argument_count, r2);
// r0 - offset of start of capture
@@ -357,6 +358,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// r0: Address byte_offset1 - Address captured substring's start.
// r1: Address byte_offset2 - Address of current character position.
// r2: size_t byte_length - length of capture in bytes(!)
+ // r3: Isolate* isolate
// Address of start of capture.
__ add(r0, r0, Operand(end_of_input_address()));
@@ -366,9 +368,11 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ mov(r4, Operand(r1));
// Address of current input position.
__ add(r1, current_input_offset(), Operand(end_of_input_address()));
+ // Isolate.
+ __ mov(r3, Operand(ExternalReference::isolate_address()));
ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16();
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
__ CallCFunction(function, argument_count);
// Check if function returned non-zero for success or zero for failure.
@@ -601,7 +605,7 @@ void RegExpMacroAssemblerARM::Fail() {
}
-Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
+Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Finalize code - write the entry point code now we know how many
// registers we need.
@@ -626,7 +630,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label stack_ok;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
+ ExternalReference::address_of_stack_limit(masm_->isolate());
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ sub(r0, sp, r0, SetCC);
@@ -777,12 +781,13 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label grow_failed;
// Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 2;
+ static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, r0);
__ mov(r0, backtrack_stackpointer());
__ add(r1, frame_pointer(), Operand(kStackHighEnd));
+ __ mov(r2, Operand(ExternalReference::isolate_address()));
ExternalReference grow_stack =
- ExternalReference::re_grow_stack();
+ ExternalReference::re_grow_stack(masm_->isolate());
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
@@ -804,11 +809,11 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(&code_desc);
- Handle<Code> code = Factory::NewCode(code_desc,
+ Handle<Code> code = FACTORY->NewCode(code_desc,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
- PROFILE(RegExpCodeCreateEvent(*code, *source));
- return Handle<Object>::cast(code);
+ PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
}
@@ -894,13 +899,12 @@ void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
constant_offset - offset_of_pc_register_read;
ASSERT(pc_offset_of_constant < 0);
if (is_valid_memory_offset(pc_offset_of_constant)) {
- masm_->BlockConstPoolBefore(masm_->pc_offset() + Assembler::kInstrSize);
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ ldr(r0, MemOperand(pc, pc_offset_of_constant));
} else {
// Not a 12-bit offset, so it needs to be loaded from the constant
// pool.
- masm_->BlockConstPoolBefore(
- masm_->pc_offset() + 2 * Assembler::kInstrSize);
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
__ ldr(r0, MemOperand(pc, r0));
}
@@ -998,7 +1002,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
__ mov(r1, Operand(masm_->CodeObject()));
// r0 becomes return address pointer.
ExternalReference stack_guard_check =
- ExternalReference::re_check_stack_guard_state();
+ ExternalReference::re_check_stack_guard_state(masm_->isolate());
CallCFunctionUsingStub(stack_guard_check, num_arguments);
}
@@ -1013,8 +1017,10 @@ static T& frame_entry(Address re_frame, int frame_offset) {
int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
- if (StackGuard::IsStackOverflow()) {
- Top::StackOverflow();
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ ASSERT(isolate == Isolate::Current());
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
return EXCEPTION;
}
@@ -1158,7 +1164,7 @@ void RegExpMacroAssemblerARM::Pop(Register target) {
void RegExpMacroAssemblerARM::CheckPreemption() {
// Check for preemption.
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
+ ExternalReference::address_of_stack_limit(masm_->isolate());
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ cmp(sp, r0);
@@ -1168,7 +1174,7 @@ void RegExpMacroAssemblerARM::CheckPreemption() {
void RegExpMacroAssemblerARM::CheckStackLimit() {
ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit();
+ ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ cmp(backtrack_stackpointer(), Operand(r0));
@@ -1178,8 +1184,7 @@ void RegExpMacroAssemblerARM::CheckStackLimit() {
void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
__ CheckConstPool(false, false);
- __ BlockConstPoolBefore(
- masm_->pc_offset() + kBacktrackConstantPoolSize * Assembler::kInstrSize);
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
backtrack_constant_pool_offset_ = masm_->pc_offset();
for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
__ emit(0);
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index d9d0b3562..d771e4033 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -82,7 +82,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
- virtual Handle<Object> GetCode(Handle<String> source);
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
@@ -127,6 +127,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
diff --git a/deps/v8/src/arm/register-allocator-arm-inl.h b/deps/v8/src/arm/register-allocator-arm-inl.h
deleted file mode 100644
index 945cdeb3c..000000000
--- a/deps/v8/src/arm/register-allocator-arm-inl.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
-#define V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- return reg.is(cp) || reg.is(fp) || reg.is(sp) || reg.is(pc);
-}
-
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers. The mapping is:
-//
-// r0 <-> 0
-// r1 <-> 1
-// r2 <-> 2
-// r3 <-> 3
-// r4 <-> 4
-// r5 <-> 5
-// r6 <-> 6
-// r7 <-> 7
-// r9 <-> 8
-// r10 <-> 9
-// ip <-> 10
-// lr <-> 11
-
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // r0
- 1, // r1
- 2, // r2
- 3, // r3
- 4, // r4
- 5, // r5
- 6, // r6
- 7, // r7
- -1, // cp
- 8, // r9
- 9, // r10
- -1, // fp
- 10, // ip
- -1, // sp
- 11, // lr
- -1 // pc
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] =
- { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index f475a18b0..6af535553 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -49,12 +49,12 @@ namespace internal {
// Windows C Run-Time Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
-// The Debugger class is used by the simulator while debugging simulated ARM
+// The ArmDebugger class is used by the simulator while debugging simulated ARM
// code.
-class Debugger {
+class ArmDebugger {
public:
- explicit Debugger(Simulator* sim);
- ~Debugger();
+ explicit ArmDebugger(Simulator* sim);
+ ~ArmDebugger();
void Stop(Instruction* instr);
void Debug();
@@ -67,6 +67,7 @@ class Debugger {
Simulator* sim_;
int32_t GetRegisterValue(int regnum);
+ double GetRegisterPairDoubleValue(int regnum);
double GetVFPDoubleRegisterValue(int regnum);
bool GetValue(const char* desc, int32_t* value);
bool GetVFPSingleValue(const char* desc, float* value);
@@ -83,12 +84,12 @@ class Debugger {
};
-Debugger::Debugger(Simulator* sim) {
+ArmDebugger::ArmDebugger(Simulator* sim) {
sim_ = sim;
}
-Debugger::~Debugger() {
+ArmDebugger::~ArmDebugger() {
}
@@ -105,7 +106,7 @@ static void InitializeCoverage() {
}
-void Debugger::Stop(Instruction* instr) {
+void ArmDebugger::Stop(Instruction* instr) {
// Get the stop code.
uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
@@ -137,7 +138,7 @@ static void InitializeCoverage() {
}
-void Debugger::Stop(Instruction* instr) {
+void ArmDebugger::Stop(Instruction* instr) {
// Get the stop code.
uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
@@ -159,7 +160,7 @@ void Debugger::Stop(Instruction* instr) {
#endif
-int32_t Debugger::GetRegisterValue(int regnum) {
+int32_t ArmDebugger::GetRegisterValue(int regnum) {
if (regnum == kPCRegister) {
return sim_->get_pc();
} else {
@@ -168,12 +169,17 @@ int32_t Debugger::GetRegisterValue(int regnum) {
}
-double Debugger::GetVFPDoubleRegisterValue(int regnum) {
+double ArmDebugger::GetRegisterPairDoubleValue(int regnum) {
+ return sim_->get_double_from_register_pair(regnum);
+}
+
+
+double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) {
return sim_->get_double_from_d_register(regnum);
}
-bool Debugger::GetValue(const char* desc, int32_t* value) {
+bool ArmDebugger::GetValue(const char* desc, int32_t* value) {
int regnum = Registers::Number(desc);
if (regnum != kNoRegister) {
*value = GetRegisterValue(regnum);
@@ -189,7 +195,7 @@ bool Debugger::GetValue(const char* desc, int32_t* value) {
}
-bool Debugger::GetVFPSingleValue(const char* desc, float* value) {
+bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && !is_double) {
@@ -200,7 +206,7 @@ bool Debugger::GetVFPSingleValue(const char* desc, float* value) {
}
-bool Debugger::GetVFPDoubleValue(const char* desc, double* value) {
+bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && is_double) {
@@ -211,7 +217,7 @@ bool Debugger::GetVFPDoubleValue(const char* desc, double* value) {
}
-bool Debugger::SetBreakpoint(Instruction* breakpc) {
+bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
return false;
@@ -226,7 +232,7 @@ bool Debugger::SetBreakpoint(Instruction* breakpc) {
}
-bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
+bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
@@ -237,21 +243,21 @@ bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
}
-void Debugger::UndoBreakpoints() {
+void ArmDebugger::UndoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
-void Debugger::RedoBreakpoints() {
+void ArmDebugger::RedoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
-void Debugger::Debug() {
+void ArmDebugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
@@ -305,27 +311,45 @@ void Debugger::Debug() {
// Leave the debugger shell.
done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
- if (argc == 2) {
+ if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
int32_t value;
float svalue;
double dvalue;
if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
- PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value);
+ PrintF("%3s: 0x%08x %10d", Registers::Name(i), value, value);
+ if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
+ i < 8 &&
+ (i % 2) == 0) {
+ dvalue = GetRegisterPairDoubleValue(i);
+ PrintF(" (%f)\n", dvalue);
+ } else {
+ PrintF("\n");
+ }
}
for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
dvalue = GetVFPDoubleRegisterValue(i);
- PrintF("%3s: %f\n",
- VFPRegisters::Name(i, true), dvalue);
+ uint64_t as_words = BitCast<uint64_t>(dvalue);
+ PrintF("%3s: %f 0x%08x %08x\n",
+ VFPRegisters::Name(i, true),
+ dvalue,
+ static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xffffffff));
}
} else {
if (GetValue(arg1, &value)) {
PrintF("%s: 0x%08x %d \n", arg1, value, value);
} else if (GetVFPSingleValue(arg1, &svalue)) {
- PrintF("%s: %f \n", arg1, svalue);
+ uint32_t as_word = BitCast<uint32_t>(svalue);
+ PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
} else if (GetVFPDoubleValue(arg1, &dvalue)) {
- PrintF("%s: %f \n", arg1, dvalue);
+ uint64_t as_words = BitCast<uint64_t>(dvalue);
+ PrintF("%s: %f 0x%08x %08x\n",
+ arg1,
+ dvalue,
+ static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xffffffff));
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -380,11 +404,24 @@ void Debugger::Debug() {
end = cur + words;
while (cur < end) {
- PrintF(" 0x%08x: 0x%08x %10d\n",
+ PrintF(" 0x%08x: 0x%08x %10d",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+ int value = *cur;
+ Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ if (current_heap->Contains(obj) || ((value & 1) == 0)) {
+ PrintF(" (");
+ if ((value & 1) == 0) {
+ PrintF("smi %d", value / 2);
+ } else {
+ obj->ShortPrint();
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
cur++;
}
- } else if (strcmp(cmd, "disasm") == 0) {
+ } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
// use a reasonably large buffer
@@ -398,11 +435,23 @@ void Debugger::Debug() {
cur = reinterpret_cast<byte*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstrSize);
} else if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte*>(sim_->get_pc());
- // Disassemble <arg1> instructions.
- end = cur + (value * Instruction::kInstrSize);
+ int regnum = Registers::Number(arg1);
+ if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * Instruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * Instruction::kInstrSize);
+ }
}
} else {
int32_t value1;
@@ -515,6 +564,7 @@ void Debugger::Debug() {
PrintF("print <register>\n");
PrintF(" print register content (alias 'p')\n");
PrintF(" use register name 'all' to print all registers\n");
+ PrintF(" add argument 'fp' to print register pair double values\n");
PrintF("printobject <register>\n");
PrintF(" print an object from a register (alias 'po')\n");
PrintF("flags\n");
@@ -524,8 +574,10 @@ void Debugger::Debug() {
PrintF("mem <address> [<words>]\n");
PrintF(" dump memory content, default dump 10 words)\n");
PrintF("disasm [<instructions>]\n");
- PrintF("disasm [[<address>] <instructions>]\n");
- PrintF(" disassemble code, default is 10 instructions from pc\n");
+ PrintF("disasm [<address/register>]\n");
+ PrintF("disasm [[<address/register>] <instructions>]\n");
+ PrintF(" disassemble code, default is 10 instructions\n");
+ PrintF(" from pc (alias 'di')\n");
PrintF("gdb\n");
PrintF(" enter gdb\n");
PrintF("break <address>\n");
@@ -539,11 +591,11 @@ void Debugger::Debug() {
PrintF(" Stops are debug instructions inserted by\n");
PrintF(" the Assembler::stop() function.\n");
PrintF(" When hitting a stop, the Simulator will\n");
- PrintF(" stop and and give control to the Debugger.\n");
+ PrintF(" stop and and give control to the ArmDebugger.\n");
PrintF(" The first %d stop codes are watched:\n",
Simulator::kNumOfWatchedStops);
PrintF(" - They can be enabled / disabled: the Simulator\n");
- PrintF(" will / won't stop when hitting them.\n");
+ PrintF(" will / won't stop when hitting them.\n");
PrintF(" - The Simulator keeps track of how many times they \n");
PrintF(" are met. (See the info command.) Going over a\n");
PrintF(" disabled stop still increases its counter. \n");
@@ -593,7 +645,9 @@ static bool AllOnOnePage(uintptr_t start, int size) {
}
-void Simulator::FlushICache(void* start_addr, size_t size) {
+void Simulator::FlushICache(v8::internal::HashMap* i_cache,
+ void* start_addr,
+ size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
start -= intra_line;
@@ -602,22 +656,22 @@ void Simulator::FlushICache(void* start_addr, size_t size) {
int offset = (start & CachePage::kPageMask);
while (!AllOnOnePage(start, size - 1)) {
int bytes_to_flush = CachePage::kPageSize - offset;
- FlushOnePage(start, bytes_to_flush);
+ FlushOnePage(i_cache, start, bytes_to_flush);
start += bytes_to_flush;
size -= bytes_to_flush;
ASSERT_EQ(0, start & CachePage::kPageMask);
offset = 0;
}
if (size != 0) {
- FlushOnePage(start, size);
+ FlushOnePage(i_cache, start, size);
}
}
-CachePage* Simulator::GetCachePage(void* page) {
- v8::internal::HashMap::Entry* entry = i_cache_->Lookup(page,
- ICacheHash(page),
- true);
+CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
+ v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
+ ICacheHash(page),
+ true);
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
@@ -627,25 +681,28 @@ CachePage* Simulator::GetCachePage(void* page) {
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(intptr_t start, int size) {
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
+ intptr_t start,
+ int size) {
ASSERT(size <= CachePage::kPageSize);
ASSERT(AllOnOnePage(start, size - 1));
ASSERT((start & CachePage::kLineMask) == 0);
ASSERT((size & CachePage::kLineMask) == 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(page);
+ CachePage* cache_page = GetCachePage(i_cache, page);
char* valid_bytemap = cache_page->ValidityByte(offset);
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
-void Simulator::CheckICache(Instruction* instr) {
+void Simulator::CheckICache(v8::internal::HashMap* i_cache,
+ Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
int offset = (address & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(page);
+ CachePage* cache_page = GetCachePage(i_cache, page);
char* cache_valid_byte = cache_page->ValidityByte(offset);
bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
@@ -662,29 +719,21 @@ void Simulator::CheckICache(Instruction* instr) {
}
-// Create one simulator per thread and keep it in thread local storage.
-static v8::internal::Thread::LocalStorageKey simulator_key;
-
-
-bool Simulator::initialized_ = false;
-
-
-void Simulator::Initialize() {
- if (initialized_) return;
- simulator_key = v8::internal::Thread::CreateThreadLocalKey();
- initialized_ = true;
- ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+void Simulator::Initialize(Isolate* isolate) {
+ if (isolate->simulator_initialized()) return;
+ isolate->set_simulator_initialized(true);
+ ::v8::internal::ExternalReference::set_redirector(isolate,
+ &RedirectExternalReference);
}
-v8::internal::HashMap* Simulator::i_cache_ = NULL;
-
-
-Simulator::Simulator() {
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+ i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ isolate_->set_simulator_i_cache(i_cache_);
}
- Initialize();
+ Initialize(isolate);
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@@ -748,11 +797,14 @@ class Redirection {
: external_function_(external_function),
swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
type_(type),
- next_(list_) {
- Simulator::current()->
- FlushICache(reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- list_ = this;
+ next_(NULL) {
+ Isolate* isolate = Isolate::Current();
+ next_ = isolate->simulator_redirection();
+ Simulator::current(isolate)->
+ FlushICache(isolate->simulator_i_cache(),
+ reinterpret_cast<void*>(&swi_instruction_),
+ Instruction::kInstrSize);
+ isolate->set_simulator_redirection(this);
}
void* address_of_swi_instruction() {
@@ -764,8 +816,9 @@ class Redirection {
static Redirection* Get(void* external_function,
ExternalReference::Type type) {
- Redirection* current;
- for (current = list_; current != NULL; current = current->next_) {
+ Isolate* isolate = Isolate::Current();
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
return new Redirection(external_function, type);
@@ -783,13 +836,9 @@ class Redirection {
uint32_t swi_instruction_;
ExternalReference::Type type_;
Redirection* next_;
- static Redirection* list_;
};
-Redirection* Redirection::list_ = NULL;
-
-
void* Simulator::RedirectExternalReference(void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type);
@@ -798,14 +847,16 @@ void* Simulator::RedirectExternalReference(void* external_function,
// Get the active Simulator for the current thread.
-Simulator* Simulator::current() {
- Initialize();
- Simulator* sim = reinterpret_cast<Simulator*>(
- v8::internal::Thread::GetThreadLocal(simulator_key));
+Simulator* Simulator::current(Isolate* isolate) {
+ v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ ASSERT(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
if (sim == NULL) {
- // TODO(146): delete the simulator object when a thread goes away.
- sim = new Simulator();
- v8::internal::Thread::SetThreadLocal(simulator_key, sim);
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
+ sim = new Simulator(isolate);
+ isolate_data->set_simulator(sim);
}
return sim;
}
@@ -834,6 +885,19 @@ int32_t Simulator::get_register(int reg) const {
}
+double Simulator::get_double_from_register_pair(int reg) {
+ ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ char buffer[2 * sizeof(vfp_register[0])];
+ memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ return(dm_val);
+}
+
+
void Simulator::set_dw_register(int dreg, const int* dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
@@ -899,12 +963,7 @@ void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
// 2*sreg and 2*sreg+1.
char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
-#ifndef BIG_ENDIAN_FLOATING_POINT
memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
-#else
- memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0]));
- memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0]));
-#endif
}
@@ -941,37 +1000,80 @@ double Simulator::get_double_from_d_register(int dreg) {
// Read the bits from the unsigned integer vfp_register[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_register[0])];
-#ifdef BIG_ENDIAN_FLOATING_POINT
- memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
- memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
-#else
memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
-#endif
memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
return(dm_val);
}
-// For use in calls that take two double values, constructed from r0, r1, r2
-// and r3.
+// For use in calls that take two double values, constructed either
+// from r0-r3 or d0 and d1.
void Simulator::GetFpArgs(double* x, double* y) {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[2 * sizeof(registers_[0])];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(buffer));
- memcpy(x, buffer, sizeof(buffer));
- // Registers 2 and 3 -> y.
- memcpy(buffer, registers_ + 2, sizeof(buffer));
- memcpy(y, buffer, sizeof(buffer));
+ if (use_eabi_hardfloat()) {
+ *x = vfp_register[0];
+ *y = vfp_register[1];
+ } else {
+ // We use a char buffer to get around the strict-aliasing rules which
+ // otherwise allow the compiler to optimize away the copy.
+ char buffer[sizeof(*x)];
+ // Registers 0 and 1 -> x.
+ memcpy(buffer, registers_, sizeof(*x));
+ memcpy(x, buffer, sizeof(*x));
+ // Registers 2 and 3 -> y.
+ memcpy(buffer, registers_ + 2, sizeof(*y));
+ memcpy(y, buffer, sizeof(*y));
+ }
+}
+
+// For use in calls that take one double value, constructed either
+// from r0 and r1 or d0.
+void Simulator::GetFpArgs(double* x) {
+ if (use_eabi_hardfloat()) {
+ *x = vfp_register[0];
+ } else {
+ // We use a char buffer to get around the strict-aliasing rules which
+ // otherwise allow the compiler to optimize away the copy.
+ char buffer[sizeof(*x)];
+ // Registers 0 and 1 -> x.
+ memcpy(buffer, registers_, sizeof(*x));
+ memcpy(x, buffer, sizeof(*x));
+ }
+}
+
+
+// For use in calls that take one double value constructed either
+// from r0 and r1 or d0 and one integer value.
+void Simulator::GetFpArgs(double* x, int32_t* y) {
+ if (use_eabi_hardfloat()) {
+ *x = vfp_register[0];
+ *y = registers_[1];
+ } else {
+ // We use a char buffer to get around the strict-aliasing rules which
+ // otherwise allow the compiler to optimize away the copy.
+ char buffer[sizeof(*x)];
+ // Registers 0 and 1 -> x.
+ memcpy(buffer, registers_, sizeof(*x));
+ memcpy(x, buffer, sizeof(*x));
+ // Register 2 -> y.
+ memcpy(buffer, registers_ + 2, sizeof(*y));
+ memcpy(y, buffer, sizeof(*y));
+ }
}
+// The return value is either in r0/r1 or d0.
void Simulator::SetFpResult(const double& result) {
- char buffer[2 * sizeof(registers_[0])];
- memcpy(buffer, &result, sizeof(buffer));
- // result -> registers 0 and 1.
- memcpy(registers_, buffer, sizeof(buffer));
+ if (use_eabi_hardfloat()) {
+ char buffer[2 * sizeof(vfp_register[0])];
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to d0.
+ memcpy(vfp_register, buffer, sizeof(buffer));
+ } else {
+ char buffer[2 * sizeof(registers_[0])];
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to r0 and r1.
+ memcpy(registers_, buffer, sizeof(buffer));
+ }
}
@@ -1225,12 +1327,13 @@ void Simulator::SetVFlag(bool val) {
// Calculate C flag value for additions.
-bool Simulator::CarryFrom(int32_t left, int32_t right) {
+bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
uint32_t uright = static_cast<uint32_t>(right);
uint32_t urest = 0xffffffffU - uleft;
- return (uright > urest);
+ return (uright > urest) ||
+ (carry && (((uright + 1) > urest) || (uright > (urest - 1))));
}
@@ -1465,36 +1568,34 @@ static int count_bits(int bit_vector) {
}
-// Addressing Mode 4 - Load and Store Multiple
-void Simulator::HandleRList(Instruction* instr, bool load) {
+void Simulator::ProcessPUW(Instruction* instr,
+ int num_regs,
+ int reg_size,
+ intptr_t* start_address,
+ intptr_t* end_address) {
int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
- int rlist = instr->RlistValue();
- int num_regs = count_bits(rlist);
-
- intptr_t start_address = 0;
- intptr_t end_address = 0;
switch (instr->PUField()) {
case da_x: {
UNIMPLEMENTED();
break;
}
case ia_x: {
- start_address = rn_val;
- end_address = rn_val + (num_regs * 4) - 4;
- rn_val = rn_val + (num_regs * 4);
+ *start_address = rn_val;
+ *end_address = rn_val + (num_regs * reg_size) - reg_size;
+ rn_val = rn_val + (num_regs * reg_size);
break;
}
case db_x: {
- start_address = rn_val - (num_regs * 4);
- end_address = rn_val - 4;
- rn_val = start_address;
+ *start_address = rn_val - (num_regs * reg_size);
+ *end_address = rn_val - reg_size;
+ rn_val = *start_address;
break;
}
case ib_x: {
- start_address = rn_val + 4;
- end_address = rn_val + (num_regs * 4);
- rn_val = end_address;
+ *start_address = rn_val + reg_size;
+ *end_address = rn_val + (num_regs * reg_size);
+ rn_val = *end_address;
break;
}
default: {
@@ -1505,6 +1606,17 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
if (instr->HasW()) {
set_register(rn, rn_val);
}
+}
+
+// Addressing Mode 4 - Load and Store Multiple
+void Simulator::HandleRList(Instruction* instr, bool load) {
+ int rlist = instr->RlistValue();
+ int num_regs = count_bits(rlist);
+
+ intptr_t start_address = 0;
+ intptr_t end_address = 0;
+ ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
+
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
int reg = 0;
while (rlist != 0) {
@@ -1523,6 +1635,57 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
}
+// Addressing Mode 6 - Load and Store Multiple Coprocessor registers.
+void Simulator::HandleVList(Instruction* instr) {
+ VFPRegPrecision precision =
+ (instr->SzValue() == 0) ? kSinglePrecision : kDoublePrecision;
+ int operand_size = (precision == kSinglePrecision) ? 4 : 8;
+
+ bool load = (instr->VLValue() == 0x1);
+
+ int vd;
+ int num_regs;
+ vd = instr->VFPDRegValue(precision);
+ if (precision == kSinglePrecision) {
+ num_regs = instr->Immed8Value();
+ } else {
+ num_regs = instr->Immed8Value() / 2;
+ }
+
+ intptr_t start_address = 0;
+ intptr_t end_address = 0;
+ ProcessPUW(instr, num_regs, operand_size, &start_address, &end_address);
+
+ intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+ for (int reg = vd; reg < vd + num_regs; reg++) {
+ if (precision == kSinglePrecision) {
+ if (load) {
+ set_s_register_from_sinteger(
+ reg, ReadW(reinterpret_cast<int32_t>(address), instr));
+ } else {
+ WriteW(reinterpret_cast<int32_t>(address),
+ get_sinteger_from_s_register(reg), instr);
+ }
+ address += 1;
+ } else {
+ if (load) {
+ set_s_register_from_sinteger(
+ 2 * reg, ReadW(reinterpret_cast<int32_t>(address), instr));
+ set_s_register_from_sinteger(
+ 2 * reg + 1, ReadW(reinterpret_cast<int32_t>(address + 1), instr));
+ } else {
+ WriteW(reinterpret_cast<int32_t>(address),
+ get_sinteger_from_s_register(2 * reg), instr);
+ WriteW(reinterpret_cast<int32_t>(address + 1),
+ get_sinteger_from_s_register(2 * reg + 1), instr);
+ }
+ address += 2;
+ }
+ }
+ ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
+}
+
+
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair which is essentially two 32-bit values stuffed into a
@@ -1533,7 +1696,8 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
int32_t arg3,
- int32_t arg4);
+ int32_t arg4,
+ int32_t arg5);
typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
@@ -1564,28 +1728,94 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int32_t arg2 = get_register(r2);
int32_t arg3 = get_register(r3);
int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
- int32_t arg4 = *stack_pointer;
+ int32_t arg4 = stack_pointer[0];
+ int32_t arg5 = stack_pointer[1];
+ bool fp_call =
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+ if (use_eabi_hardfloat()) {
+ // With the hard floating point calling convention, double
+ // arguments are passed in VFP registers. Fetch the arguments
+ // from there and call the builtin using soft floating point
+ // convention.
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ arg0 = vfp_register[0];
+ arg1 = vfp_register[1];
+ arg2 = vfp_register[2];
+ arg3 = vfp_register[3];
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ arg0 = vfp_register[0];
+ arg1 = vfp_register[1];
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ arg0 = vfp_register[0];
+ arg1 = vfp_register[1];
+ arg2 = get_register(0);
+ break;
+ default:
+ break;
+ }
+ }
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function());
- if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ if (fp_call) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- double x, y;
- GetFpArgs(&x, &y);
- PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(target), x, y);
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ double dval0, dval1;
+ int32_t ival;
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ GetFpArgs(&dval0, &dval1);
+ PrintF("Call to host function at %p with args %f, %f",
+ FUNCTION_ADDR(target), dval0, dval1);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ GetFpArgs(&dval0);
+ PrintF("Call to host function at %p with arg %f",
+ FUNCTION_ADDR(target), dval0);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ GetFpArgs(&dval0, &ival);
+ PrintF("Call to host function at %p with args %f, %d",
+ FUNCTION_ADDR(target), dval0, ival);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
- double result = target(arg0, arg1, arg2, arg3);
- SetFpResult(result);
+ if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ double result = target(arg0, arg1, arg2, arg3);
+ SetFpResult(result);
+ } else {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ int32_t lo_res = static_cast<int32_t>(result);
+ int32_t hi_res = static_cast<int32_t>(result >> 32);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08x\n", lo_res);
+ }
+ set_register(r0, lo_res);
+ set_register(r1, hi_res);
+ }
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
SimulatorRuntimeDirectApiCall target =
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
@@ -1627,20 +1857,22 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
- "Call to host function at %p args %08x, %08x, %08x, %08x, %0xc",
+ "Call to host function at %p"
+ "args %08x, %08x, %08x, %08x, %08x, %08x",
FUNCTION_ADDR(target),
arg0,
arg1,
arg2,
arg3,
- arg4);
+ arg4,
+ arg5);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
@@ -1654,7 +1886,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
}
case kBreakpoint: {
- Debugger dbg(this);
+ ArmDebugger dbg(this);
dbg.Debug();
break;
}
@@ -1668,7 +1900,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// Stop if it is enabled, otherwise go on jumping over the stop
// and the message address.
if (isEnabledStop(code)) {
- Debugger dbg(this);
+ ArmDebugger dbg(this);
dbg.Stop(instr);
} else {
set_pc(get_pc() + 2 * Instruction::kInstrSize);
@@ -1976,7 +2208,7 @@ void Simulator::DecodeType01(Instruction* instr) {
break;
}
case BKPT: {
- Debugger dbg(this);
+ ArmDebugger dbg(this);
PrintF("Simulator hit BKPT.\n");
dbg.Debug();
break;
@@ -2088,8 +2320,15 @@ void Simulator::DecodeType01(Instruction* instr) {
}
case ADC: {
- Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
- Format(instr, "adc'cond's 'rd, 'rn, 'imm");
+ // Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "adc'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val + shifter_operand + GetCarry();
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(CarryFrom(rn_val, shifter_operand, GetCarry()));
+ SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
+ }
break;
}
@@ -2467,6 +2706,8 @@ void Simulator::DecodeType7(Instruction* instr) {
// vmov :Rt = Sn
// vcvt: Dd = Sm
// vcvt: Sd = Dm
+// Dd = vabs(Dm)
+// Dd = vneg(Dm)
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
@@ -2502,6 +2743,11 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dm_value = get_double_from_d_register(vm);
double dd_value = fabs(dm_value);
set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
+ // vneg
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = -dm_value;
+ set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
@@ -2895,9 +3141,17 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
break;
}
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB:
+ // Load/store multiple single from memory: vldm/vstm.
+ HandleVList(instr);
+ break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
} else if (instr->CoprocessorValue() == 0xB) {
switch (instr->OpcodeValue()) {
@@ -2944,9 +3198,14 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
break;
}
+ case 0x4:
+ case 0x5:
+ case 0x9:
+ // Load/store multiple double from memory: vldm/vstm.
+ HandleVList(instr);
+ break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
} else {
UNIMPLEMENTED(); // Not used by V8.
@@ -2957,7 +3216,7 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
- CheckICache(instr);
+ CheckICache(isolate_->simulator_i_cache(), instr);
}
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
@@ -3040,7 +3299,7 @@ void Simulator::Execute() {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
- Debugger dbg(this);
+ ArmDebugger dbg(this);
dbg.Debug();
} else {
InstructionDecode(instr);
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index bdf1f8a10..391ef69f5 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -49,25 +49,28 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, Address, int);
+ void*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- (FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6))
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (FUNCTION_CAST<arm_regexp_matcher>(entry)( \
+ p0, p1, p2, p3, NULL, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- (reinterpret_cast<TryCatch*>(try_catch_address))
+ reinterpret_cast<TryCatch*>(try_catch_address)
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on arm uses the C stack, we
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
return c_limit;
}
@@ -123,7 +126,7 @@ class CachePage {
class Simulator {
public:
- friend class Debugger;
+ friend class ArmDebugger;
enum Register {
no_reg = -1,
r0 = 0, r1, r2, r3, r4, r5, r6, r7,
@@ -142,18 +145,19 @@ class Simulator {
num_d_registers = 16
};
- Simulator();
+ explicit Simulator(Isolate* isolate);
~Simulator();
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
- static Simulator* current();
+ static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the ARM
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
void set_dw_register(int dreg, const int* dbl);
// Support for VFP.
@@ -177,7 +181,7 @@ class Simulator {
void Execute();
// Call on program start.
- static void Initialize();
+ static void Initialize(Isolate* isolate);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@@ -191,12 +195,22 @@ class Simulator {
uintptr_t PopAddress();
// ICache checking.
- static void FlushICache(void* start, size_t size);
+ static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+ size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
bool has_bad_pc() const;
+ // EABI variant for double arguments in use.
+ bool use_eabi_hardfloat() {
+#if USE_EABI_HARDFLOAT
+ return true;
+#else
+ return false;
+#endif
+ }
+
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
@@ -220,13 +234,17 @@ class Simulator {
void SetNZFlags(int32_t val);
void SetCFlag(bool val);
void SetVFlag(bool val);
- bool CarryFrom(int32_t left, int32_t right);
+ bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
bool BorrowFrom(int32_t left, int32_t right);
bool OverflowFrom(int32_t alu_out,
int32_t left,
int32_t right,
bool addition);
+ inline int GetCarry() {
+ return c_flag_ ? 1 : 0;
+ };
+
// Support for VFP.
void Compute_FPSCR_Flags(double val1, double val2);
void Copy_FPSCR_to_APSR();
@@ -234,7 +252,13 @@ class Simulator {
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out);
+ void ProcessPUW(Instruction* instr,
+ int num_regs,
+ int operand_size,
+ intptr_t* start_address,
+ intptr_t* end_address);
void HandleRList(Instruction* instr, bool load);
+ void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr);
// Stop helper functions.
@@ -287,18 +311,20 @@ class Simulator {
void InstructionDecode(Instruction* instr);
// ICache.
- static void CheckICache(Instruction* instr);
- static void FlushOnePage(intptr_t start, int size);
- static CachePage* GetCachePage(void* page);
+ static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+ int size);
+ static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
// Runtime call support.
static void* RedirectExternalReference(
void* external_function,
v8::internal::ExternalReference::Type type);
- // For use in calls that take two double values, constructed from r0, r1, r2
- // and r3.
+ // For use in calls that take double value arguments.
void GetFpArgs(double* x, double* y);
+ void GetFpArgs(double* x);
+ void GetFpArgs(double* x, int32_t* y);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
@@ -333,15 +359,16 @@ class Simulator {
char* stack_;
bool pc_modified_;
int icount_;
- static bool initialized_;
// Icache simulation
- static v8::internal::HashMap* i_cache_;
+ v8::internal::HashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
+ v8::internal::Isolate* isolate_;
+
// A stop is watched if its code is less than kNumOfWatchedStops.
// Only watched stops support enabling/disabling and the counter feature.
static const uint32_t kNumOfWatchedStops = 256;
@@ -364,15 +391,16 @@ class Simulator {
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current()->Call( \
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- Simulator::current()->Call(entry, 8, p0, p1, p2, p3, NULL, p4, p5, p6)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ Simulator::current(Isolate::Current())->Call( \
+ entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == \
- NULL ? NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
// The simulator has its own stack. Thus it has a different stack limit from
@@ -382,17 +410,18 @@ class Simulator {
// trouble down the line.
class SimulatorStack : public v8::internal::AllStatic {
public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return Simulator::current()->StackLimit();
+ static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current();
+ Simulator* sim = Simulator::current(Isolate::Current());
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
- Simulator::current()->PopAddress();
+ Simulator::current(Isolate::Current())->PopAddress();
}
};
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 60a11f3ce..7ea000edb 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "stub-cache.h"
namespace v8 {
@@ -39,15 +39,16 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void ProbeTable(MacroAssembler* masm,
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register name,
Register offset,
Register scratch,
Register scratch2) {
- ExternalReference key_offset(SCTableReference::keyReference(table));
- ExternalReference value_offset(SCTableReference::valueReference(table));
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
@@ -94,15 +95,17 @@ static void ProbeTable(MacroAssembler* masm,
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register scratch0,
- Register scratch1) {
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ String* name,
+ Register scratch0,
+ Register scratch1) {
ASSERT(name->IsSymbol());
- __ IncrementCounter(&Counters::negative_lookups, 1, scratch0, scratch1);
- __ IncrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
Label done;
@@ -118,7 +121,7 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
// Check that receiver is a JSObject.
__ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(lt, miss_label);
// Load properties array.
@@ -134,71 +137,21 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
// Restore the temporarily used register.
__ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- // Compute the capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- // Generate an unrolled loop that performs a few probes before
- // giving up.
- static const int kProbes = 4;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = 0; i < kProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch1;
- // Capacity is smi 2^n.
- __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
- __ sub(index, index, Operand(1));
- __ and_(index, index, Operand(
- Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- Register entity_name = scratch1;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- Register tmp = properties;
- __ add(tmp, properties, Operand(index, LSL, 1));
- __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- ASSERT(!tmp.is(entity_name));
- __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
- __ cmp(entity_name, tmp);
- if (i != kProbes - 1) {
- __ b(eq, &done);
-
- // Stop if found the property.
- __ cmp(entity_name, Operand(Handle<String>(name)));
- __ b(eq, miss_label);
-
- // Check if the entry name is not a symbol.
- __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ ldrb(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ tst(entity_name, Operand(kIsSymbolMask));
- __ b(eq, miss_label);
-
- // Restore the properties.
- __ ldr(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- } else {
- // Give up probing if still not found the undefined value.
- __ b(ne, miss_label);
- }
- }
+
+ MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+ masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
+ if (result->IsFailure()) return result;
+
__ bind(&done);
- __ DecrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ return result;
}
@@ -209,6 +162,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register scratch,
Register extra,
Register extra2) {
+ Isolate* isolate = masm->isolate();
Label miss;
// Make sure that code is valid. The shifting code relies on the
@@ -235,8 +189,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!extra2.is(no_reg));
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
@@ -248,7 +201,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
- ProbeTable(masm, flags, kPrimary, name, scratch, extra, extra2);
+ ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, Operand(name));
@@ -258,7 +211,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
- ProbeTable(masm, flags, kSecondary, name, scratch, extra, extra2);
+ ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
@@ -286,13 +239,15 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ Isolate* isolate = masm->isolate();
// Check we're still in the same context.
__ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ Move(ip, Top::global());
+ __ Move(ip, isolate->global());
__ cmp(prototype, ip);
__ b(ne, miss);
// Get the global function with the given index.
- JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
+ JSFunction* function =
+ JSFunction::cast(isolate->global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -326,8 +281,7 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss_label);
+ __ JumpIfSmi(receiver, miss_label);
// Check that the object is a JS array.
__ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
@@ -349,8 +303,7 @@ static void GenerateStringCheck(MacroAssembler* masm,
Label* smi,
Label* non_string_object) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, smi);
+ __ JumpIfSmi(receiver, smi);
// Check that the object is a string.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
@@ -425,8 +378,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Label exit;
// Check that the receiver isn't a smi.
- __ tst(receiver_reg, Operand(kSmiTagMask));
- __ b(eq, miss_label);
+ __ JumpIfSmi(receiver_reg, miss_label);
// Check that the map of the receiver hasn't changed.
__ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
@@ -450,8 +402,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ mov(r2, Operand(Handle<Map>(transition)));
__ Push(r2, r0);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)),
- 3, 1);
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3,
+ 1);
return;
}
@@ -473,8 +427,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ str(r0, FieldMemOperand(receiver_reg, offset));
// Skip updating write barrier if storing a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
+ __ JumpIfSmi(r0, &exit);
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
@@ -487,8 +440,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ str(r0, FieldMemOperand(scratch, offset));
// Skip updating write barrier if storing a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
+ __ JumpIfSmi(r0, &exit);
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
@@ -505,9 +457,9 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
Code* code = NULL;
if (kind == Code::LOAD_IC) {
- code = Builtins::builtin(Builtins::LoadIC_Miss);
+ code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
} else {
- code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+ code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
}
Handle<Code> ic(code);
@@ -518,7 +470,8 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
static void GenerateCallFunction(MacroAssembler* masm,
Object* object,
const ParameterCount& arguments,
- Label* miss) {
+ Label* miss,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r0: receiver
// -- r1: function to call
@@ -537,7 +490,10 @@ static void GenerateCallFunction(MacroAssembler* masm,
}
// Invoke the function.
- __ InvokeFunction(r1, arguments, JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
@@ -548,7 +504,7 @@ static void PushInterceptorArguments(MacroAssembler* masm,
JSObject* holder_obj) {
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!Heap::InNewSpace(interceptor));
+ ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
Register scratch = name;
__ mov(scratch, Operand(Handle<Object>(interceptor)));
__ push(scratch);
@@ -567,7 +523,8 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
+ masm->isolate());
__ mov(r0, Operand(5));
__ mov(r1, Operand(ref));
@@ -616,7 +573,7 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
// Pass the additional arguments FastHandleApiCall expects.
Object* call_data = optimization.api_call_info()->data();
Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (Heap::InNewSpace(call_data)) {
+ if (masm->isolate()->heap()->InNewSpace(call_data)) {
__ Move(r0, api_call_info_handle);
__ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
} else {
@@ -656,8 +613,9 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
// garbage collection but instead return the allocation failure
// object.
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
- ExternalReference ref =
- ExternalReference(&fun, ExternalReference::DIRECT_API_CALL);
+ ExternalReference ref = ExternalReference(&fun,
+ ExternalReference::DIRECT_API_CALL,
+ masm->isolate());
return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
@@ -665,10 +623,12 @@ class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
const ParameterCount& arguments,
- Register name)
+ Register name,
+ Code::ExtraICState extra_ic_state)
: stub_compiler_(stub_compiler),
arguments_(arguments),
- name_(name) {}
+ name_(name),
+ extra_ic_state_(extra_ic_state) {}
MaybeObject* Compile(MacroAssembler* masm,
JSObject* object,
@@ -710,7 +670,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name,
holder,
miss);
- return Heap::undefined_value();
+ return masm->isolate()->heap()->undefined_value();
}
}
@@ -729,6 +689,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
+ Counters* counters = masm->isolate()->counters();
+
int depth1 = kInvalidProtoDepth;
int depth2 = kInvalidProtoDepth;
bool can_do_fast_api_call = false;
@@ -746,11 +708,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
(depth2 != kInvalidProtoDepth);
}
- __ IncrementCounter(&Counters::call_const_interceptor, 1,
+ __ IncrementCounter(counters->call_const_interceptor(), 1,
scratch1, scratch2);
if (can_do_fast_api_call) {
- __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1,
+ __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
scratch1, scratch2);
ReserveSpaceForFastApiCall(masm, scratch1);
}
@@ -794,8 +756,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
arguments_.immediate());
if (result->IsFailure()) return result;
} else {
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
__ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION);
+ JUMP_FUNCTION, call_kind);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -811,7 +776,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
FreeSpaceForFastApiCall(masm);
}
- return Heap::undefined_value();
+ return masm->isolate()->heap()->undefined_value();
}
void CompileRegular(MacroAssembler* masm,
@@ -840,9 +805,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
interceptor_holder);
__ CallExternalReference(
- ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
- 5);
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
+ masm->isolate()),
+ 5);
// Restore the name_ register.
__ pop(name_);
@@ -877,6 +842,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
StubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
+ Code::ExtraICState extra_ic_state_;
};
@@ -1080,7 +1046,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = Heap::LookupSymbol(name);
+ MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
Object* lookup_result = NULL; // Initialization to please compiler.
if (!maybe_lookup_result->ToObject(&lookup_result)) {
set_failure(Failure::cast(maybe_lookup_result));
@@ -1091,16 +1057,21 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
ASSERT(current->property_dictionary()->FindEntry(name) ==
StringDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
+ MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch1,
+ scratch2);
+ if (negative_lookup->IsFailure()) {
+ set_failure(Failure::cast(negative_lookup));
+ return reg;
+ }
+
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else if (Heap::InNewSpace(prototype)) {
+ } else if (heap()->InNewSpace(prototype)) {
// Get the map of the current object.
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(scratch1, Operand(Handle<Map>(current->map())));
@@ -1154,7 +1125,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
__ b(ne, miss);
// Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
+ LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1188,8 +1159,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1210,8 +1180,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1235,8 +1204,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1248,7 +1216,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ push(receiver);
__ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
Handle<AccessorInfo> callback_handle(callback);
- if (Heap::InNewSpace(callback_handle->data())) {
+ if (heap()->InNewSpace(callback_handle->data())) {
__ Move(scratch3, callback_handle);
__ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
} else {
@@ -1273,7 +1241,9 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
// object.
const int kStackUnwindSpace = 4;
ExternalReference ref =
- ExternalReference(&fun, ExternalReference::DIRECT_GETTER_CALL);
+ ExternalReference(&fun,
+ ExternalReference::DIRECT_GETTER_CALL,
+ masm()->isolate());
return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
@@ -1402,7 +1372,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
}
ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
+ masm()->isolate());
__ TailCallExternalReference(ref, 5, 1);
}
} else { // !compile_followup_inline
@@ -1414,8 +1385,9 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
PushInterceptorArguments(masm(), receiver, holder_reg,
name_reg, interceptor_holder);
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
+ masm()->isolate());
__ TailCallExternalReference(ref, 5, 1);
}
}
@@ -1445,8 +1417,7 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(r0, miss);
}
// Check that the maps haven't changed.
@@ -1462,14 +1433,13 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (Heap::InNewSpace(function)) {
+ if (heap()->InNewSpace(function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(r1, miss);
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, miss);
@@ -1486,8 +1456,10 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
- kind_);
+ MaybeObject* maybe_obj =
+ isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+ kind_,
+ extra_ic_state_);
Object* obj;
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@@ -1512,21 +1484,18 @@ MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
// Get the receiver of the function from the stack into r0.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r0, &miss);
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
- GenerateCallFunction(masm(), object, arguments(), &miss);
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
// Handle call cache miss.
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(FIELD, name);
@@ -1547,7 +1516,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
Label miss;
@@ -1581,8 +1550,11 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable.
- __ CheckMap(elements, r0,
- Heap::kFixedArrayMapRootIndex, &call_builtin, true);
+ __ CheckMap(elements,
+ r0,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
Label exit, with_write_barrier, attempt_to_grow_elements;
@@ -1633,10 +1605,11 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ b(&call_builtin);
}
+ Isolate* isolate = masm()->isolate();
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate);
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate);
const int kAllocationDelta = 4;
// Load top and check if it is the end of elements.
@@ -1676,17 +1649,16 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ Ret();
}
__ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
+ masm()->isolate()),
argc + 1,
1);
}
// Handle call cache miss.
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1707,7 +1679,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
Label miss, return_undefined, call_builtin;
@@ -1731,7 +1703,11 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable.
- __ CheckMap(elements, r0, Heap::kFixedArrayMapRootIndex, &call_builtin, true);
+ __ CheckMap(elements,
+ r0,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
// Get the array's length into r4 and calculate new length.
__ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1763,16 +1739,15 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
__ Ret();
__ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
+ masm()->isolate()),
argc + 1,
1);
// Handle call cache miss.
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1794,7 +1769,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
const int argc = arguments().immediate();
@@ -1803,7 +1778,9 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
@@ -1855,10 +1832,8 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// Restore function name in r2.
__ Move(r2, Handle<String>(name));
__ bind(&name_miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1880,7 +1855,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
const int argc = arguments().immediate();
@@ -1889,7 +1864,9 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
@@ -1943,10 +1920,8 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// Restore function name in r2.
__ Move(r2, Handle<String>(name));
__ bind(&name_miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1971,7 +1946,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
@@ -1980,8 +1955,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
&miss);
@@ -1998,8 +1972,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Check the code is a smi.
Label slow;
STATIC_ASSERT(kSmiTag == 0);
- __ tst(code, Operand(kSmiTagMask));
- __ b(ne, &slow);
+ __ JumpIfNotSmi(code, &slow);
// Convert the smi code to uint16.
__ and_(code, code, Operand(Smi::FromInt(0xffff)));
@@ -2015,14 +1988,12 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
@@ -2042,14 +2013,17 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(VFP3)) return Heap::undefined_value();
+ if (!CpuFeatures::IsSupported(VFP3)) {
+ return heap()->undefined_value();
+ }
+
CpuFeatures::Scope scope_vfp3(VFP3);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss, slow;
GenerateNameCheck(name, &miss);
@@ -2077,7 +2051,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ Drop(argc + 1, eq);
__ Ret(eq);
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true);
+ __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return;
@@ -2162,12 +2136,12 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ bind(&slow);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
- MaybeObject* obj = GenerateMissBranch();
- if (obj->IsFailure()) return obj;
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
@@ -2191,7 +2165,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
@@ -2200,8 +2174,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
&miss);
@@ -2238,7 +2211,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Check if the argument is a heap number and load its exponent and
// sign.
__ bind(&not_smi);
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true);
+ __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
__ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive,
@@ -2264,20 +2237,72 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
+MaybeObject* CallStubCompiler::CompileFastApiCall(
+ const CallOptimization& optimization,
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ Counters* counters = isolate()->counters();
+
+ ASSERT(optimization.is_simple_api_call());
+ // Bail out if object is a global object as we don't want to
+ // repatch it to global receiver.
+ if (object->IsGlobalObject()) return heap()->undefined_value();
+ if (cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSObject()) return heap()->undefined_value();
+ int depth = optimization.GetPrototypeDepthOfExpectedType(
+ JSObject::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+
+ Label miss, miss_before_stack_reserved;
+
+ GenerateNameCheck(name, &miss_before_stack_reserved);
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(r1, &miss_before_stack_reserved);
+
+ __ IncrementCounter(counters->call_const(), 1, r0, r3);
+ __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
+
+ ReserveSpaceForFastApiCall(masm(), r0);
+
+ // Check that the maps haven't changed and find a Holder as a side effect.
+ CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+ depth, &miss);
+
+ MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
+ if (result->IsFailure()) return result;
+
+ __ bind(&miss);
+ FreeSpaceForFastApiCall(masm());
+
+ __ bind(&miss_before_stack_reserved);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
+}
+
+
MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@@ -2287,22 +2312,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// -- r2 : name
// -- lr : return address
// -----------------------------------
- SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function_info->builtin_function_id();
+ if (HasCustomCallGenerator(function)) {
MaybeObject* maybe_result = CompileCustomCall(
- id, object, holder, NULL, function, name);
+ object, holder, NULL, function, name);
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
// undefined means bail out to regular compiler.
- if (!result->IsUndefined()) {
- return result;
- }
+ if (!result->IsUndefined()) return result;
}
- Label miss_in_smi_check;
+ Label miss;
- GenerateNameCheck(name, &miss_in_smi_check);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack
const int argc = arguments().immediate();
@@ -2310,40 +2331,26 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss_in_smi_check);
+ __ JumpIfSmi(r1, &miss);
}
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- CallOptimization optimization(function);
- int depth = kInvalidProtoDepth;
- Label miss;
-
+ SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
- __ IncrementCounter(&Counters::call_const, 1, r0, r3);
-
- if (optimization.is_simple_api_call() && !object->IsGlobalObject()) {
- depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- }
-
- if (depth != kInvalidProtoDepth) {
- __ IncrementCounter(&Counters::call_const_fast_api, 1, r0, r3);
- ReserveSpaceForFastApiCall(masm(), r0);
- }
+ __ IncrementCounter(masm()->isolate()->counters()->call_const(),
+ 1, r0, r3);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
- depth, &miss);
+ &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
if (object->IsGlobalObject()) {
- ASSERT(depth == kInvalidProtoDepth);
__ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ str(r3, MemOperand(sp, argc * kPointerSize));
}
@@ -2357,7 +2364,7 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
} else {
// Check that the object is a two-byte string or a symbol.
__ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(hs, &miss);
+ __ b(ge, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
@@ -2374,8 +2381,7 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
} else {
Label fast;
// Check that the object is a smi or a heap number.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &fast);
+ __ JumpIfSmi(r1, &fast);
__ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
__ b(ne, &miss);
__ bind(&fast);
@@ -2416,24 +2422,15 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- if (depth != kInvalidProtoDepth) {
- MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
- } else {
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
- }
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
// Handle call cache miss.
__ bind(&miss);
- if (depth != kInvalidProtoDepth) {
- FreeSpaceForFastApiCall(masm());
- }
-
- __ bind(&miss_in_smi_check);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -2461,7 +2458,7 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Get the receiver from the stack.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), r2);
+ CallInterceptorCompiler compiler(this, arguments(), r2, extra_ic_state_);
MaybeObject* result = compiler.Compile(masm(),
object,
holder,
@@ -2481,14 +2478,12 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Restore receiver.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
- GenerateCallFunction(masm(), object, arguments(), &miss);
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
// Handle call cache miss.
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(INTERCEPTOR, name);
@@ -2505,11 +2500,9 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -- lr : return address
// -----------------------------------
- SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function_info->builtin_function_id();
+ if (HasCustomCallGenerator(function)) {
MaybeObject* maybe_result = CompileCustomCall(
- id, object, holder, cell, function, name);
+ object, holder, cell, function, name);
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
// undefined means bail out to regular compiler.
@@ -2538,28 +2531,31 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- __ IncrementCounter(&Counters::call_global_inline, 1, r3, r4);
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
if (V8::UseCrankshaft()) {
// TODO(kasperl): For now, we always call indirectly through the
// code field in the function to allow recompilation to take effect
// without changing any of the call sites.
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION);
+ __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
} else {
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
+ JUMP_FUNCTION, call_kind);
}
// Handle call cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(NORMAL, name);
@@ -2585,7 +2581,7 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
r1, r2, r3,
&miss);
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2605,8 +2601,7 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
Label miss;
// Check that the object isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
// Check that the map of the object hasn't changed.
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -2628,12 +2623,13 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
+ masm()->isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2652,8 +2648,7 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
Label miss;
// Check that the object isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
// Check that the map of the object hasn't changed.
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -2676,12 +2671,13 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
+ masm()->isolate());
__ TailCallExternalReference(store_ic_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2718,13 +2714,14 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
- __ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3);
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
__ Ret();
// Handle store cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r4, r3);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ IncrementCounter(counters->named_store_global_inline_miss(), 1, r4, r3);
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2742,8 +2739,7 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
Label miss;
// Check that receiver is not a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r0, &miss);
// Check the maps of the full prototype chain.
CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
@@ -2771,7 +2767,7 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, Heap::empty_string());
+ return GetCode(NONEXISTENT, heap()->empty_string());
}
@@ -2887,8 +2883,7 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r0, &miss);
}
// Check that the map of the global has not changed.
@@ -2906,11 +2901,12 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
__ mov(r0, r4);
- __ IncrementCounter(&Counters::named_load_global_stub, 1, r1, r3);
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(&Counters::named_load_global_stub_miss, 1, r1, r3);
+ __ IncrementCounter(counters->named_load_global_stub_miss(), 1, r1, r3);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
@@ -3055,7 +3051,9 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
// -- r1 : receiver
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_string_length, 1, r2, r3);
+
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
// Check the key is the cached one.
__ cmp(r0, Operand(Handle<String>(name)));
@@ -3063,7 +3061,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_string_length, 1, r2, r3);
+ __ DecrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3079,7 +3077,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_function_prototype, 1, r2, r3);
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
// Check the name hasn't changed.
__ cmp(r0, Operand(Handle<String>(name)));
@@ -3087,91 +3086,63 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_function_prototype, 1, r2, r3);
+ __ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
return GetCode(CALLBACKS, name);
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- // Check that the map matches.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r2, Operand(Handle<Map>(receiver->map())));
- __ b(ne, &miss);
-
- // Check that the key is a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &miss);
-
- // Get the elements array.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ AssertFastElements(r2);
-
- // Check that the key is within bounds.
- __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ cmp(r0, Operand(r3));
- __ b(hs, &miss);
-
- // Load the result and make sure it's not the hole.
- __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(r4,
- MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &miss);
- __ mov(r0, r4);
- __ Ret();
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+ Code* stub;
+ MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ __ DispatchMap(r1,
+ r2,
+ Handle<Map>(receiver_map),
+ Handle<Code>(stub),
+ DO_SMI_CHECK);
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, NULL);
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label miss;
+ __ JumpIfSmi(r1, &miss);
- // Check that the map matches.
- __ CheckMap(r1, r2, Handle<Map>(receiver->map()), &miss, false);
-
- GenerateFastPixelArrayLoad(masm(),
- r1,
- r0,
- r2,
- r3,
- r4,
- r5,
- r0,
- &miss,
- &miss,
- &miss);
+ int receiver_count = receiver_maps->length();
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map(receiver_maps->at(current));
+ Handle<Code> code(handler_ics->at(current));
+ __ mov(ip, Operand(map));
+ __ cmp(r2, ip);
+ __ Jump(code, RelocInfo::CODE_TARGET, eq);
+ }
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss));
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
}
@@ -3187,7 +3158,8 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_store_field, 1, r3, r4);
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
// Check that the name has not changed.
__ cmp(r1, Operand(Handle<String>(name)));
@@ -3203,9 +3175,8 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
&miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_store_field, 1, r3, r4);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
-
+ __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
+ Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -3213,69 +3184,24 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
- JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : scratch
- // -- r4 : scratch (elements)
// -----------------------------------
- Label miss;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register scratch = r3;
- Register elements_reg = r4;
-
- // Check that the receiver isn't a smi.
- __ tst(receiver_reg, Operand(kSmiTagMask));
- __ b(eq, &miss);
-
- // Check that the map matches.
- __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(Handle<Map>(receiver->map())));
- __ b(ne, &miss);
-
- // Check that the key is a smi.
- __ tst(key_reg, Operand(kSmiTagMask));
- __ b(ne, &miss);
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ ldr(scratch, FieldMemOperand(elements_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(Handle<Map>(Factory::fixed_array_map())));
- __ b(ne, &miss);
-
- // Check that the key is within bounds.
- if (receiver->IsJSArray()) {
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis.
- __ cmp(key_reg, scratch);
- __ b(hs, &miss);
-
- __ add(scratch,
- elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ str(value_reg,
- MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ RecordWrite(scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
- receiver_reg , elements_reg);
-
- // value_reg (r0) is preserved.
- // Done.
- __ Ret();
-
- __ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ Code* stub;
+ MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ __ DispatchMap(r2,
+ r3,
+ Handle<Map>(receiver_map),
+ Handle<Code>(stub),
+ DO_SMI_CHECK);
+
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -3283,44 +3209,35 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
}
-MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
- JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
- // -- r3 : scratch
- // -- r4 : scratch
- // -- r5 : scratch
- // -- r6 : scratch
// -- lr : return address
+ // -- r3 : scratch
// -----------------------------------
Label miss;
-
- // Check that the map matches.
- __ CheckMap(r2, r6, Handle<Map>(receiver->map()), &miss, false);
-
- GenerateFastPixelArrayStore(masm(),
- r2,
- r1,
- r0,
- r3,
- r4,
- r5,
- r6,
- true,
- true,
- &miss,
- &miss,
- NULL,
- &miss);
+ __ JumpIfSmi(r2, &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map(receiver_maps->at(current));
+ Handle<Code> code(handler_ics->at(current));
+ __ mov(ip, Operand(map));
+ __ cmp(r3, ip);
+ __ Jump(code, RelocInfo::CODE_TARGET, eq);
+ }
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
- __ Jump(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
}
@@ -3350,8 +3267,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// r1: constructor function
// r7: undefined
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &generic_stub_call);
+ __ JumpIfSmi(r2, &generic_stub_call);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ b(ne, &generic_stub_call);
@@ -3452,85 +3368,80 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// Remove caller arguments and receiver from the stack and return.
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
__ add(sp, sp, Operand(kPointerSize));
- __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2);
- __ IncrementCounter(&Counters::constructed_objects_stub, 1, r1, r2);
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->constructed_objects(), 1, r1, r2);
+ __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2);
__ Jump(lr);
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
- Handle<Code> generic_construct_stub(code);
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(code, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode();
}
-static bool IsElementTypeSigned(ExternalArrayType array_type) {
- switch (array_type) {
- case kExternalByteArray:
- case kExternalShortArray:
- case kExternalIntArray:
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) {
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
return true;
- case kExternalUnsignedByteArray:
- case kExternalUnsignedShortArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
return false;
- default:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
return false;
}
+ return false;
}
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
- ExternalArrayType array_type, Code::Flags flags) {
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+ MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- Label slow, failed_allocation;
+ Label miss_force_generic, slow, failed_allocation;
Register key = r0;
Register receiver = r1;
- // Check that the object isn't a smi
- __ JumpIfSmi(receiver, &slow);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ JumpIfNotSmi(key, &slow);
-
- // Check that the object is a JS object. Load map into r2.
- __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &slow);
-
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &slow);
+ __ JumpIfNotSmi(key, &miss_force_generic);
- // Check that the elements array is the appropriate type of
- // ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
- __ cmp(r2, ip);
- __ b(ne, &slow);
+ // r3: elements array
// Check that the index is in range.
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(ip, Operand(key, ASR, kSmiTagSize));
// Unsigned comparison catches both negative and too-large values.
- __ b(lo, &slow);
+ __ b(lo, &miss_force_generic);
- // r3: elements array
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage
@@ -3539,24 +3450,25 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
Register value = r2;
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(value, MemOperand(r3, key, LSR, 1));
break;
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ ldrb(value, MemOperand(r3, key, LSR, 1));
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ ldrsh(value, MemOperand(r3, key, LSL, 0));
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ ldrh(value, MemOperand(r3, key, LSL, 0));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 1));
@@ -3565,18 +3477,36 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
__ ldr(value, MemOperand(r3, key, LSL, 1));
}
break;
- default:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ add(r2, r3, Operand(key, LSL, 2));
+ __ vldr(d0, r2, 0);
+ } else {
+ __ add(r4, r3, Operand(key, LSL, 2));
+ // r4: pointer to the beginning of the double we want to load.
+ __ ldr(r2, MemOperand(r4, 0));
+ __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
+ }
+ break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
// For integer array types:
// r2: value
- // For floating-point array type
+ // For float array type:
// s0: value (if VFP3 is supported)
// r2: value (if VFP3 is not supported)
+ // For double array type:
+ // d0: value (if VFP3 is supported)
+ // r2/r3: value (if VFP3 is not supported)
- if (array_type == kExternalIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
@@ -3604,10 +3534,23 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
__ vstr(d0, r3, HeapNumber::kValueOffset);
__ Ret();
} else {
- WriteInt32ToHeapNumberStub stub(value, r0, r3);
- __ TailCallStub(&stub);
+ Register dst1 = r1;
+ Register dst2 = r3;
+ FloatingPointHelper::Destination dest =
+ FloatingPointHelper::kCoreRegisters;
+ FloatingPointHelper::ConvertIntToDouble(masm,
+ value,
+ dest,
+ d0,
+ dst1,
+ dst2,
+ r9,
+ s0);
+ __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ Ret();
}
- } else if (array_type == kExternalUnsignedIntArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
@@ -3650,12 +3593,12 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
__ bind(&box_int_0);
// Integer does not have leading zeros.
- GenerateUInt2Double(masm(), hiword, loword, r4, 0);
+ GenerateUInt2Double(masm, hiword, loword, r4, 0);
__ b(&done);
__ bind(&box_int_1);
// Integer has one leading zero.
- GenerateUInt2Double(masm(), hiword, loword, r4, 1);
+ GenerateUInt2Double(masm, hiword, loword, r4, 1);
__ bind(&done);
@@ -3672,7 +3615,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
__ mov(r0, r4);
__ Ret();
}
- } else if (array_type == kExternalFloatArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(VFP3)) {
@@ -3742,6 +3685,31 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
__ mov(r0, r3);
__ Ret();
}
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ sub(r1, r2, Operand(kHeapObjectTag));
+ __ vstr(d0, r1, HeapNumber::kValueOffset);
+
+ __ mov(r0, r2);
+ __ Ret();
+ } else {
+ // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r5, r6, r7, &slow);
+
+ __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
+ __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
+ __ mov(r0, r4);
+ __ Ret();
+ }
} else {
// Tag integer as smi and return it.
@@ -3751,7 +3719,9 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// Slow case, key and receiver still in r0 and r1.
__ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
+ 1, r2, r3);
// ---------- S t a t e --------------
// -- lr : return address
@@ -3763,19 +3733,23 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
- return GetCode(flags);
+ __ bind(&miss_force_generic);
+ Code* stub = masm->isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_MissForceGeneric);
+ __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
}
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
- ExternalArrayType array_type, Code::Flags flags) {
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+ MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
- Label slow, check_heap_number;
+ Label slow, check_heap_number, miss_force_generic;
// Register usage.
Register value = r0;
@@ -3783,65 +3757,84 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
Register receiver = r2;
// r3 mostly holds the elements array or the destination external array.
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
- // Check that the object is a JS object. Load map into r3.
- __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
- __ b(le, &slow);
-
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &slow);
+ __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the key is a smi.
- __ JumpIfNotSmi(key, &slow);
-
- // Check that the elements array is the appropriate type of ExternalArray.
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
- __ cmp(r4, ip);
- __ b(ne, &slow);
+ __ JumpIfNotSmi(key, &miss_force_generic);
- // Check that the index is in range.
- __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
+ // Check that the index is in range
+ __ SmiUntag(r4, key);
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(r4, ip);
// Unsigned comparison catches both negative and too-large values.
- __ b(hs, &slow);
+ __ b(hs, &miss_force_generic);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// r3: external array.
// r4: key (integer).
- __ JumpIfNotSmi(value, &check_heap_number);
- __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
+ if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ // Double to pixel conversion is only implemented in the runtime for now.
+ __ JumpIfNotSmi(value, &slow);
+ } else {
+ __ JumpIfNotSmi(value, &check_heap_number);
+ }
+ __ SmiUntag(r5, value);
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r4: key (integer).
// r5: value (integer).
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ // Clamp the value to [0..255].
+ __ Usat(r5, 8, Operand(r5));
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
- StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9);
+ StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
break;
- default:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ __ add(r3, r3, Operand(r4, LSL, 3));
+ // r3: effective address of the double element
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(VFP3)) {
+ destination = FloatingPointHelper::kVFPRegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+ FloatingPointHelper::ConvertIntToDouble(
+ masm, r5, destination,
+ d0, r6, r7, // These are: double_dst, dst1, dst2.
+ r4, s2); // These are: scratch2, single_scratch.
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vstr(d0, r3, 0);
+ } else {
+ __ str(r6, MemOperand(r3, 0));
+ __ str(r7, MemOperand(r3, Register::kSizeInBytes));
+ }
+ break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3849,224 +3842,341 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
+ if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ // r3: external array.
+ // r4: index (integer).
+ __ bind(&check_heap_number);
+ __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
- // r3: external array.
- // r4: index (integer).
- __ bind(&check_heap_number);
- __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
+ __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
- // r3: base pointer of external storage.
- // r4: key (integer).
+ // r3: base pointer of external storage.
+ // r4: key (integer).
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ // vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ add(r5, r3, Operand(r4, LSL, 2));
+ __ vcvt_f32_f64(s0, d0);
+ __ vstr(s0, r5, 0);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ add(r5, r3, Operand(r4, LSL, 3));
+ __ vstr(d0, r5, 0);
+ } else {
+ // Hoisted load. vldr requires offset to be a multiple of 4 so we can
+ // not include -kHeapObjectTag into it.
+ __ sub(r5, value, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ EmitECMATruncate(r5, d0, s2, r6, r7, r9);
+
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
- if (array_type == kExternalFloatArray) {
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(r4, LSL, 2));
- __ vcvt_f32_f64(s0, d0);
- __ vstr(s0, r5, 0);
+ // Entry registers are intact, r0 holds the value which is the return
+ // value.
+ __ Ret();
} else {
- // Need to perform float-to-int conversion.
- // Test for NaN or infinity (both give zero).
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset));
-
- // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, value, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
-
- __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs and Infinities have all-one exponents so they sign extend to -1.
- __ cmp(r6, Operand(-1));
- __ mov(r5, Operand(0), LeaveCC, eq);
-
- // Not infinity or NaN simply convert to int.
- if (IsElementTypeSigned(array_type)) {
- __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
+ // VFP3 is not available do manual conversions.
+ __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ Label done, nan_or_infinity_or_zero;
+ static const int kMantissaInHiWordShift =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaInLoWordShift =
+ kBitsPerInt - kMantissaInHiWordShift;
+
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
+ // and infinities. All these should be converted to 0.
+ __ mov(r7, Operand(HeapNumber::kExponentMask));
+ __ and_(r9, r5, Operand(r7), SetCC);
+ __ b(eq, &nan_or_infinity_or_zero);
+
+ __ teq(r9, Operand(r7));
+ __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
+ __ b(eq, &nan_or_infinity_or_zero);
+
+ // Rebias exponent.
+ __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+ __ add(r9,
+ r9,
+ Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
+
+ __ cmp(r9, Operand(kBinary32MaxExponent));
+ __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
+ __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
+ __ b(gt, &done);
+
+ __ cmp(r9, Operand(kBinary32MinExponent));
+ __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
+ __ b(lt, &done);
+
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
+ __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
+ __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
+
+ __ bind(&done);
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ // Entry registers are intact, r0 holds the value which is the return
+ // value.
+ __ Ret();
+
+ __ bind(&nan_or_infinity_or_zero);
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r9, r9, r7);
+ __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
+ __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
+ __ b(&done);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ add(r7, r3, Operand(r4, LSL, 3));
+ // r7: effective address of destination element.
+ __ str(r6, MemOperand(r7, 0));
+ __ str(r5, MemOperand(r7, Register::kSizeInBytes));
+ __ Ret();
} else {
- __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
- }
- __ vmov(r5, s0, ne);
-
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- default:
- UNREACHABLE();
- break;
+ bool is_signed_type = IsElementTypeSigned(elements_kind);
+ int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
+ int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
+
+ Label done, sign;
+
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
+ // and infinities. All these should be converted to 0.
+ __ mov(r7, Operand(HeapNumber::kExponentMask));
+ __ and_(r9, r5, Operand(r7), SetCC);
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ __ b(eq, &done);
+
+ __ teq(r9, Operand(r7));
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ __ b(eq, &done);
+
+ // Unbias exponent.
+ __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+ __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
+ // If exponent is negative then result is 0.
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
+ __ b(mi, &done);
+
+ // If exponent is too big then result is minimal value.
+ __ cmp(r9, Operand(meaningfull_bits - 1));
+ __ mov(r5, Operand(min_value), LeaveCC, ge);
+ __ b(ge, &done);
+
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+
+ __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+ __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
+ __ b(pl, &sign);
+
+ __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
+ __ mov(r5, Operand(r5, LSL, r9));
+ __ rsb(r9, r9, Operand(meaningfull_bits));
+ __ orr(r5, r5, Operand(r6, LSR, r9));
+
+ __ bind(&sign);
+ __ teq(r7, Operand(0, RelocInfo::NONE));
+ __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+
+ __ bind(&done);
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
}
}
+ }
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
- } else {
- // VFP3 is not available do manual conversions.
- __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+ // Slow case, key and receiver still in r0 and r1.
+ __ bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
+ 1, r2, r3);
- if (array_type == kExternalFloatArray) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET);
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
+ // Miss case, call the runtime.
+ __ bind(&miss_force_generic);
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ b(eq, &nan_or_infinity_or_zero);
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
- __ teq(r9, Operand(r7));
- __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
- __ b(eq, &nan_or_infinity_or_zero);
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
- // Rebias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ add(r9,
- r9,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ cmp(r9, Operand(kBinary32MaxExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
- __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
- __ b(gt, &done);
-
- __ cmp(r9, Operand(kBinary32MinExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
- __ b(lt, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
- __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
- __ bind(&done);
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label miss_force_generic;
- __ bind(&nan_or_infinity_or_zero);
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r9, r9, r7);
- __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
- __ b(&done);
- } else {
- bool is_signed_type = IsElementTypeSigned(array_type);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- __ teq(r9, Operand(r7));
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- // Unbias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
- // If exponent is negative then result is 0.
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
- __ b(mi, &done);
-
- // If exponent is too big then result is minimal value.
- __ cmp(r9, Operand(meaningfull_bits - 1));
- __ mov(r5, Operand(min_value), LeaveCC, ge);
- __ b(ge, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
- __ b(pl, &sign);
-
- __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
- __ mov(r5, Operand(r5, LSL, r9));
- __ rsb(r9, r9, Operand(meaningfull_bits));
- __ orr(r5, r5, Operand(r6, LSR, r9));
-
- __ bind(&sign);
- __ teq(r7, Operand(0, RelocInfo::NONE));
- __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
- __ bind(&done);
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- }
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(r0, &miss_force_generic);
- // Slow case: call runtime.
- __ bind(&slow);
+ // Get the elements array.
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ AssertFastElements(r2);
- // Entry registers are intact.
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
+ // Check that the key is within bounds.
+ __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ cmp(r0, Operand(r3));
+ __ b(hs, &miss_force_generic);
+
+ // Load the result and make sure it's not the hole.
+ __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ ldr(r4,
+ MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r4, ip);
+ __ b(eq, &miss_force_generic);
+ __ mov(r0, r4);
+ __ Ret();
+
+ __ bind(&miss_force_generic);
+ Code* stub = masm->isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_MissForceGeneric);
+ __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+ bool is_js_array) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -- r3 : scratch
+ // -- r4 : scratch (elements)
// -----------------------------------
+ Label miss_force_generic;
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
+ Register value_reg = r0;
+ Register key_reg = r1;
+ Register receiver_reg = r2;
+ Register scratch = r3;
+ Register elements_reg = r4;
- __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ mov(r0, Operand(Smi::FromInt(
- Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
- __ Push(r1, r0);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(r0, &miss_force_generic);
+
+ // Get the elements array and make sure it is a fast element array, not 'cow'.
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ CheckMap(elements_reg,
+ scratch,
+ Heap::kFixedArrayMapRootIndex,
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
- return GetCode(flags);
+ // Check that the key is within bounds.
+ if (is_js_array) {
+ __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ } else {
+ __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ }
+ // Compare smis.
+ __ cmp(key_reg, scratch);
+ __ b(hs, &miss_force_generic);
+
+ __ add(scratch,
+ elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ str(value_reg,
+ MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ RecordWrite(scratch,
+ Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
+ receiver_reg , elements_reg);
+
+ // value_reg (r0) is preserved.
+ // Done.
+ __ Ret();
+
+ __ bind(&miss_force_generic);
+ Handle<Code> ic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/arm/virtual-frame-arm-inl.h b/deps/v8/src/arm/virtual-frame-arm-inl.h
deleted file mode 100644
index 6a7902aff..000000000
--- a/deps/v8/src/arm/virtual-frame-arm-inl.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_ARM_INL_H_
-#define V8_VIRTUAL_FRAME_ARM_INL_H_
-
-#include "assembler-arm.h"
-#include "virtual-frame-arm.h"
-
-namespace v8 {
-namespace internal {
-
-// These VirtualFrame methods should actually be in a virtual-frame-arm-inl.h
-// file if such a thing existed.
-MemOperand VirtualFrame::ParameterAt(int index) {
- // Index -1 corresponds to the receiver.
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index <= parameter_count());
- return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
-}
-
- // The receiver frame slot.
-MemOperand VirtualFrame::Receiver() {
- return ParameterAt(-1);
-}
-
-
-void VirtualFrame::Forget(int count) {
- SpillAll();
- LowerHeight(count);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_VIRTUAL_FRAME_ARM_INL_H_
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
deleted file mode 100644
index 544e405db..000000000
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ /dev/null
@@ -1,843 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::PopToR1R0() {
- // Shuffle things around so the top of stack is in r0 and r1.
- MergeTOSTo(R0_R1_TOS);
- // Pop the two registers off the stack so they are detached from the frame.
- LowerHeight(2);
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::PopToR1() {
- // Shuffle things around so the top of stack is only in r1.
- MergeTOSTo(R1_TOS);
- // Pop the register off the stack so it is detached from the frame.
- LowerHeight(1);
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::PopToR0() {
- // Shuffle things around so the top of stack only in r0.
- MergeTOSTo(R0_TOS);
- // Pop the register off the stack so it is detached from the frame.
- LowerHeight(1);
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
- if (Equals(expected)) return;
- ASSERT((expected->tos_known_smi_map_ & tos_known_smi_map_) ==
- expected->tos_known_smi_map_);
- ASSERT(expected->IsCompatibleWith(this));
- MergeTOSTo(expected->top_of_stack_state_, cond);
- ASSERT(register_allocation_map_ == expected->register_allocation_map_);
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) {
- if (Equals(expected)) return;
- tos_known_smi_map_ &= expected->tos_known_smi_map_;
- MergeTOSTo(expected->top_of_stack_state_, cond);
- ASSERT(register_allocation_map_ == expected->register_allocation_map_);
-}
-
-
-void VirtualFrame::MergeTOSTo(
- VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) {
-#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
- switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
- case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
- break;
- case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
- __ pop(r0, cond);
- break;
- case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
- __ pop(r1, cond);
- break;
- case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
- __ pop(r0, cond);
- __ pop(r1, cond);
- break;
- case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
- __ pop(r1, cond);
- __ pop(r0, cond);
- break;
- case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
- __ push(r0, cond);
- break;
- case CASE_NUMBER(R0_TOS, R0_TOS):
- break;
- case CASE_NUMBER(R0_TOS, R1_TOS):
- __ mov(r1, r0, LeaveCC, cond);
- break;
- case CASE_NUMBER(R0_TOS, R0_R1_TOS):
- __ pop(r1, cond);
- break;
- case CASE_NUMBER(R0_TOS, R1_R0_TOS):
- __ mov(r1, r0, LeaveCC, cond);
- __ pop(r0, cond);
- break;
- case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
- __ push(r1, cond);
- break;
- case CASE_NUMBER(R1_TOS, R0_TOS):
- __ mov(r0, r1, LeaveCC, cond);
- break;
- case CASE_NUMBER(R1_TOS, R1_TOS):
- break;
- case CASE_NUMBER(R1_TOS, R0_R1_TOS):
- __ mov(r0, r1, LeaveCC, cond);
- __ pop(r1, cond);
- break;
- case CASE_NUMBER(R1_TOS, R1_R0_TOS):
- __ pop(r0, cond);
- break;
- case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
- __ Push(r1, r0, cond);
- break;
- case CASE_NUMBER(R0_R1_TOS, R0_TOS):
- __ push(r1, cond);
- break;
- case CASE_NUMBER(R0_R1_TOS, R1_TOS):
- __ push(r1, cond);
- __ mov(r1, r0, LeaveCC, cond);
- break;
- case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
- break;
- case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
- __ Swap(r0, r1, ip, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
- __ Push(r0, r1, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, R0_TOS):
- __ push(r0, cond);
- __ mov(r0, r1, LeaveCC, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, R1_TOS):
- __ push(r0, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
- __ Swap(r0, r1, ip, cond);
- break;
- case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
- break;
- default:
- UNREACHABLE();
-#undef CASE_NUMBER
- }
- // A conditional merge will be followed by a conditional branch and the
- // fall-through code will have an unchanged virtual frame state. If the
- // merge is unconditional ('al'ways) then it might be followed by a fall
- // through. We need to update the virtual frame state to match the code we
- // are falling into. The final case is an unconditional merge followed by an
- // unconditional branch, in which case it doesn't matter what we do to the
- // virtual frame state, because the virtual frame will be invalidated.
- if (cond == al) {
- top_of_stack_state_ = expected_top_of_stack_state;
- }
-}
-
-
-void VirtualFrame::Enter() {
- Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
- // Verify that r1 contains a JS function. The following code relies
- // on r2 being available for use.
- if (FLAG_debug_code) {
- Label map_check, done;
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &map_check);
- __ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
- __ bind(&map_check);
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(eq, &done);
- __ stop("VirtualFrame::Enter - r1 is not a function (map check).");
- __ bind(&done);
- }
-#endif // DEBUG
-
- // We are about to push four values to the frame.
- Adjust(4);
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
-}
-
-
-void VirtualFrame::Exit() {
- Comment cmnt(masm(), "[ Exit JS frame");
- // Record the location of the JS exit code for patching when setting
- // break point.
- __ RecordJSReturn();
-
- // Drop the execution stack down to the frame pointer and restore the caller
- // frame pointer and return address.
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- int count = local_count();
- if (count > 0) {
- Comment cmnt(masm(), "[ Allocate space for locals");
- Adjust(count);
- // Initialize stack slots with 'undefined' value.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- if (count < kLocalVarBound) {
- // For less locals the unrolled loop is more compact.
- for (int i = 0; i < count; i++) {
- __ push(ip);
- }
- } else {
- // For more locals a loop in generated code is more compact.
- Label alloc_locals_loop;
- __ mov(r1, Operand(count));
- __ bind(&alloc_locals_loop);
- __ push(ip);
- __ sub(r1, r1, Operand(1), SetCC);
- __ b(ne, &alloc_locals_loop);
- }
- } else {
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- }
- // Check the stack for overflow or a break request.
- masm()->cmp(sp, Operand(r2));
- StackCheckStub stub;
- // Call the stub if lower.
- masm()->mov(ip,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- masm()->Call(ip, lo);
-}
-
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- UNIMPLEMENTED();
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- // Grow the expression stack by handler size less one (the return
- // address in lr is already counted by a call instruction).
- Adjust(kHandlerSize - 1);
- __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-void VirtualFrame::CallJSFunction(int arg_count) {
- // InvokeFunction requires function in r1.
- PopToR1();
- SpillAll();
-
- // +1 for receiver.
- Forget(arg_count + 1);
- ASSERT(cgen()->HasValidEntryRegisters());
- ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION);
- // Restore the context.
- __ ldr(cp, Context());
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- SpillAll();
- Forget(arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(f, arg_count);
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- SpillAll();
- Forget(arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(id, arg_count);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ DebugBreak();
-}
-#endif
-
-
-void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- int arg_count) {
- Forget(arg_count);
- __ InvokeBuiltin(id, flags);
-}
-
-
-void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- PopToR0();
- SpillAll();
- __ mov(r2, Operand(name));
- CallCodeObject(ic, mode, 0);
-}
-
-
-void VirtualFrame::CallStoreIC(Handle<String> name,
- bool is_contextual,
- StrictModeFlag strict_mode) {
- Handle<Code> ic(Builtins::builtin(
- (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- PopToR0();
- RelocInfo::Mode mode;
- if (is_contextual) {
- SpillAll();
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- mode = RelocInfo::CODE_TARGET_CONTEXT;
- } else {
- EmitPop(r1);
- SpillAll();
- mode = RelocInfo::CODE_TARGET;
- }
- __ mov(r2, Operand(name));
- CallCodeObject(ic, mode, 0);
-}
-
-
-void VirtualFrame::CallKeyedLoadIC() {
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- PopToR1R0();
- SpillAll();
- CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-}
-
-
-void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
- Handle<Code> ic(Builtins::builtin(
- (strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- PopToR1R0();
- SpillAll();
- EmitPop(r2);
- CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- int dropped_args) {
- switch (code->kind()) {
- case Code::CALL_IC:
- case Code::KEYED_CALL_IC:
- case Code::FUNCTION:
- break;
- case Code::KEYED_LOAD_IC:
- case Code::LOAD_IC:
- case Code::KEYED_STORE_IC:
- case Code::STORE_IC:
- ASSERT(dropped_args == 0);
- break;
- case Code::BUILTIN:
- ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
- break;
- default:
- UNREACHABLE();
- break;
- }
- Forget(dropped_args);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
-}
-
-
-// NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS.
-const bool VirtualFrame::kR0InUse[TOS_STATES] =
- { false, true, false, true, true };
-const bool VirtualFrame::kR1InUse[TOS_STATES] =
- { false, false, true, true, true };
-const int VirtualFrame::kVirtualElements[TOS_STATES] =
- { 0, 1, 1, 2, 2 };
-const Register VirtualFrame::kTopRegister[TOS_STATES] =
- { r0, r0, r1, r1, r0 };
-const Register VirtualFrame::kBottomRegister[TOS_STATES] =
- { r0, r0, r1, r0, r1 };
-const Register VirtualFrame::kAllocatedRegisters[
- VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 };
-// Popping is done by the transition implied by kStateAfterPop. Of course if
-// there were no stack slots allocated to registers then the physical SP must
-// be adjusted.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
- { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS };
-// Pushing is done by the transition implied by kStateAfterPush. Of course if
-// the maximum number of registers was already allocated to the top of stack
-// slots then one register must be physically pushed onto the stack.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
- { R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS };
-
-
-bool VirtualFrame::SpilledScope::is_spilled_ = false;
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- ASSERT(height() >= count);
- // Discard elements from the virtual frame and free any registers.
- int num_virtual_elements = kVirtualElements[top_of_stack_state_];
- while (num_virtual_elements > 0) {
- Pop();
- num_virtual_elements--;
- count--;
- if (count == 0) return;
- }
- if (count == 0) return;
- __ add(sp, sp, Operand(count * kPointerSize));
- LowerHeight(count);
-}
-
-
-void VirtualFrame::Pop() {
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- __ add(sp, sp, Operand(kPointerSize));
- } else {
- top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
- }
- LowerHeight(1);
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- ASSERT(!is_used(RegisterAllocator::ToNumber(reg)));
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- __ pop(reg);
- } else {
- __ mov(reg, kTopRegister[top_of_stack_state_]);
- top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
- }
- LowerHeight(1);
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR0() {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r0, MemOperand(sp, 0));
- break;
- case R0_TOS:
- __ push(r0);
- break;
- case R1_TOS:
- __ push(r1);
- __ mov(r0, r1);
- break;
- case R0_R1_TOS:
- __ Push(r1, r0);
- break;
- case R1_R0_TOS:
- __ Push(r0, r1);
- __ mov(r0, r1);
- break;
- default:
- UNREACHABLE();
- }
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR1() {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r1, MemOperand(sp, 0));
- break;
- case R0_TOS:
- __ push(r0);
- __ mov(r1, r0);
- break;
- case R1_TOS:
- __ push(r1);
- break;
- case R0_R1_TOS:
- __ Push(r1, r0);
- __ mov(r1, r0);
- break;
- case R1_R0_TOS:
- __ Push(r0, r1);
- break;
- default:
- UNREACHABLE();
- }
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR1R0() {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r1, MemOperand(sp, 0));
- __ ldr(r0, MemOperand(sp, kPointerSize));
- break;
- case R0_TOS:
- __ push(r0);
- __ mov(r1, r0);
- __ ldr(r0, MemOperand(sp, kPointerSize));
- break;
- case R1_TOS:
- __ push(r1);
- __ ldr(r0, MemOperand(sp, kPointerSize));
- break;
- case R0_R1_TOS:
- __ Push(r1, r0);
- __ Swap(r0, r1, ip);
- break;
- case R1_R0_TOS:
- __ Push(r0, r1);
- break;
- default:
- UNREACHABLE();
- }
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-Register VirtualFrame::Peek() {
- AssertIsNotSpilled();
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- Register answer = kTopRegister[top_of_stack_state_];
- __ pop(answer);
- return answer;
- } else {
- return kTopRegister[top_of_stack_state_];
- }
-}
-
-
-Register VirtualFrame::Peek2() {
- AssertIsNotSpilled();
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- case R0_TOS:
- case R0_R1_TOS:
- MergeTOSTo(R0_R1_TOS);
- return r1;
- case R1_TOS:
- case R1_R0_TOS:
- MergeTOSTo(R1_R0_TOS);
- return r0;
- default:
- UNREACHABLE();
- return no_reg;
- }
-}
-
-
-void VirtualFrame::Dup() {
- if (SpilledScope::is_spilled()) {
- __ ldr(ip, MemOperand(sp, 0));
- __ push(ip);
- } else {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r0, MemOperand(sp, 0));
- top_of_stack_state_ = R0_TOS;
- break;
- case R0_TOS:
- __ mov(r1, r0);
- // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_TOS:
- __ mov(r0, r1);
- // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R0_R1_TOS:
- __ push(r1);
- __ mov(r1, r0);
- // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_R0_TOS:
- __ push(r0);
- __ mov(r0, r1);
- // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
- top_of_stack_state_ = R0_R1_TOS;
- break;
- default:
- UNREACHABLE();
- }
- }
- RaiseHeight(1, tos_known_smi_map_ & 1);
-}
-
-
-void VirtualFrame::Dup2() {
- if (SpilledScope::is_spilled()) {
- __ ldr(ip, MemOperand(sp, kPointerSize));
- __ push(ip);
- __ ldr(ip, MemOperand(sp, kPointerSize));
- __ push(ip);
- } else {
- switch (top_of_stack_state_) {
- case NO_TOS_REGISTERS:
- __ ldr(r0, MemOperand(sp, 0));
- __ ldr(r1, MemOperand(sp, kPointerSize));
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R0_TOS:
- __ push(r0);
- __ ldr(r1, MemOperand(sp, kPointerSize));
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_TOS:
- __ push(r1);
- __ ldr(r0, MemOperand(sp, kPointerSize));
- top_of_stack_state_ = R1_R0_TOS;
- break;
- case R0_R1_TOS:
- __ Push(r1, r0);
- top_of_stack_state_ = R0_R1_TOS;
- break;
- case R1_R0_TOS:
- __ Push(r0, r1);
- top_of_stack_state_ = R1_R0_TOS;
- break;
- default:
- UNREACHABLE();
- }
- }
- RaiseHeight(2, tos_known_smi_map_ & 3);
-}
-
-
-Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
- ASSERT(but_not_to_this_one.is(r0) ||
- but_not_to_this_one.is(r1) ||
- but_not_to_this_one.is(no_reg));
- LowerHeight(1);
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- if (but_not_to_this_one.is(r0)) {
- __ pop(r1);
- return r1;
- } else {
- __ pop(r0);
- return r0;
- }
- } else {
- Register answer = kTopRegister[top_of_stack_state_];
- ASSERT(!answer.is(but_not_to_this_one));
- top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
- return answer;
- }
-}
-
-
-void VirtualFrame::EnsureOneFreeTOSRegister() {
- if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) {
- __ push(kBottomRegister[top_of_stack_state_]);
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
- }
- ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
- RaiseHeight(1, info.IsSmi() ? 1 : 0);
- if (reg.is(cp)) {
- // If we are pushing cp then we are about to make a call and things have to
- // be pushed to the physical stack. There's nothing to be gained my moving
- // to a TOS register and then pushing that, we might as well push to the
- // physical stack immediately.
- MergeTOSTo(NO_TOS_REGISTERS);
- __ push(reg);
- return;
- }
- if (SpilledScope::is_spilled()) {
- ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
- __ push(reg);
- return;
- }
- if (top_of_stack_state_ == NO_TOS_REGISTERS) {
- if (reg.is(r0)) {
- top_of_stack_state_ = R0_TOS;
- return;
- }
- if (reg.is(r1)) {
- top_of_stack_state_ = R1_TOS;
- return;
- }
- }
- EnsureOneFreeTOSRegister();
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- Register dest = kTopRegister[top_of_stack_state_];
- __ Move(dest, reg);
-}
-
-
-void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
- if (this_far_down < kTOSKnownSmiMapSize) {
- tos_known_smi_map_ &= ~(1 << this_far_down);
- }
- if (this_far_down == 0) {
- Pop();
- Register dest = GetTOSRegister();
- if (dest.is(reg)) {
- // We already popped one item off the top of the stack. If the only
- // free register is the one we were asked to push then we have been
- // asked to push a register that was already in use, which cannot
- // happen. It therefore folows that there are two free TOS registers:
- ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
- dest = dest.is(r0) ? r1 : r0;
- }
- __ mov(dest, reg);
- EmitPush(dest);
- } else if (this_far_down == 1) {
- int virtual_elements = kVirtualElements[top_of_stack_state_];
- if (virtual_elements < 2) {
- __ str(reg, ElementAt(this_far_down));
- } else {
- ASSERT(virtual_elements == 2);
- ASSERT(!reg.is(r0));
- ASSERT(!reg.is(r1));
- Register dest = kBottomRegister[top_of_stack_state_];
- __ mov(dest, reg);
- }
- } else {
- ASSERT(this_far_down >= 2);
- ASSERT(kVirtualElements[top_of_stack_state_] <= 2);
- __ str(reg, ElementAt(this_far_down));
- }
-}
-
-
-Register VirtualFrame::GetTOSRegister() {
- if (SpilledScope::is_spilled()) return r0;
-
- EnsureOneFreeTOSRegister();
- return kTopRegister[kStateAfterPush[top_of_stack_state_]];
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
- RaiseHeight(1, info.IsSmi() ? 1 : 0);
- if (SpilledScope::is_spilled()) {
- __ mov(r0, operand);
- __ push(r0);
- return;
- }
- EnsureOneFreeTOSRegister();
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- __ mov(kTopRegister[top_of_stack_state_], operand);
-}
-
-
-void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
- RaiseHeight(1, info.IsSmi() ? 1 : 0);
- if (SpilledScope::is_spilled()) {
- __ ldr(r0, operand);
- __ push(r0);
- return;
- }
- EnsureOneFreeTOSRegister();
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- __ ldr(kTopRegister[top_of_stack_state_], operand);
-}
-
-
-void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
- RaiseHeight(1, 0);
- if (SpilledScope::is_spilled()) {
- __ LoadRoot(r0, index);
- __ push(r0);
- return;
- }
- EnsureOneFreeTOSRegister();
- top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
- __ LoadRoot(kTopRegister[top_of_stack_state_], index);
-}
-
-
-void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
- ASSERT(SpilledScope::is_spilled());
- Adjust(count);
- __ stm(db_w, sp, src_regs);
-}
-
-
-void VirtualFrame::SpillAll() {
- switch (top_of_stack_state_) {
- case R1_R0_TOS:
- masm()->push(r0);
- // Fall through.
- case R1_TOS:
- masm()->push(r1);
- top_of_stack_state_ = NO_TOS_REGISTERS;
- break;
- case R0_R1_TOS:
- masm()->push(r1);
- // Fall through.
- case R0_TOS:
- masm()->push(r0);
- top_of_stack_state_ = NO_TOS_REGISTERS;
- // Fall through.
- case NO_TOS_REGISTERS:
- break;
- default:
- UNREACHABLE();
- break;
- }
- ASSERT(register_allocation_map_ == 0); // Not yet implemented.
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
deleted file mode 100644
index 76470bdc5..000000000
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ /dev/null
@@ -1,520 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_
-#define V8_ARM_VIRTUAL_FRAME_ARM_H_
-
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// This dummy class is only used to create invalid virtual frames.
-extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
-
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
- class RegisterAllocationScope;
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, and keeps it spilled.
- class SpilledScope BASE_EMBEDDED {
- public:
- explicit SpilledScope(VirtualFrame* frame)
- : old_is_spilled_(is_spilled_) {
- if (frame != NULL) {
- if (!is_spilled_) {
- frame->SpillAll();
- } else {
- frame->AssertIsSpilled();
- }
- }
- is_spilled_ = true;
- }
- ~SpilledScope() {
- is_spilled_ = old_is_spilled_;
- }
- static bool is_spilled() { return is_spilled_; }
-
- private:
- static bool is_spilled_;
- int old_is_spilled_;
-
- SpilledScope() { }
-
- friend class RegisterAllocationScope;
- };
-
- class RegisterAllocationScope BASE_EMBEDDED {
- public:
- // A utility class to introduce a scope where the virtual frame
- // is not spilled, ie. where register allocation occurs. Eventually
- // when RegisterAllocationScope is ubiquitous it can be removed
- // along with the (by then unused) SpilledScope class.
- inline explicit RegisterAllocationScope(CodeGenerator* cgen);
- inline ~RegisterAllocationScope();
-
- private:
- CodeGenerator* cgen_;
- bool old_is_spilled_;
-
- RegisterAllocationScope() { }
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct an invalid virtual frame, used by JumpTargets.
- inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- inline CodeGenerator* cgen() const;
- inline MacroAssembler* masm();
-
- // The number of elements on the virtual frame.
- int element_count() const { return element_count_; }
-
- // The height of the virtual expression stack.
- inline int height() const;
-
- bool is_used(int num) {
- switch (num) {
- case 0: { // r0.
- return kR0InUse[top_of_stack_state_];
- }
- case 1: { // r1.
- return kR1InUse[top_of_stack_state_];
- }
- case 2:
- case 3:
- case 4:
- case 5:
- case 6: { // r2 to r6.
- ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
- ASSERT(num >= kFirstAllocatedRegister);
- if ((register_allocation_map_ &
- (1 << (num - kFirstAllocatedRegister))) == 0) {
- return false;
- } else {
- return true;
- }
- }
- default: {
- ASSERT(num < kFirstAllocatedRegister ||
- num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
- return false;
- }
- }
- }
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget elements from the top of the frame to match an actual frame (eg,
- // the frame after a runtime call). No code is emitted except to bring the
- // frame to a spilled state.
- void Forget(int count);
-
- // Spill all values from the frame to memory.
- void SpillAll();
-
- void AssertIsSpilled() const {
- ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
- ASSERT(register_allocation_map_ == 0);
- }
-
- void AssertIsNotSpilled() {
- ASSERT(!SpilledScope::is_spilled());
- }
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- UNIMPLEMENTED();
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references). Unimplemented.
- Register SpillAnyRegister();
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(VirtualFrame* expected, Condition cond = al);
- void MergeTo(const VirtualFrame* expected, Condition cond = al);
-
- // Checks whether this frame can be branched to by the other frame.
- bool IsCompatibleWith(const VirtualFrame* other) const {
- return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
- }
-
- inline void ForgetTypeInfo() {
- tos_known_smi_map_ = 0;
- }
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by elements in the virtual frame. This
- // avoids generating unnecessary merge code when jumping to the
- // shared return site. No spill code emitted. Value to return should be in r0.
- inline void PrepareForReturn();
-
- // Number of local variables after when we use a loop for allocating.
- static const int kLocalVarBound = 5;
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // The current top of the expression stack as an assembly operand.
- MemOperand Top() {
- AssertIsSpilled();
- return MemOperand(sp, 0);
- }
-
- // An element of the expression stack as an assembly operand.
- MemOperand ElementAt(int index) {
- int adjusted_index = index - kVirtualElements[top_of_stack_state_];
- ASSERT(adjusted_index >= 0);
- return MemOperand(sp, adjusted_index * kPointerSize);
- }
-
- bool KnownSmiAt(int index) {
- if (index >= kTOSKnownSmiMapSize) return false;
- return (tos_known_smi_map_ & (1 << index)) != 0;
- }
-
- // A frame-allocated local as an assembly operand.
- inline MemOperand LocalAt(int index);
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // The function frame slot.
- MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
-
- // The context frame slot.
- MemOperand Context() { return MemOperand(fp, kContextOffset); }
-
- // A parameter as an assembly operand.
- inline MemOperand ParameterAt(int index);
-
- // The receiver frame slot.
- inline MemOperand Receiver();
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- inline void CallStub(CodeStub* stub, int arg_count);
-
- // Call JS function from top of the stack with arguments
- // taken from the stack.
- void CallJSFunction(int arg_count);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- void CallRuntime(Runtime::Function* f, int arg_count);
- void CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void DebugBreak();
-#endif
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flag,
- int arg_count);
-
- // Call load IC. Receiver is on the stack and is consumed. Result is returned
- // in r0.
- void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
-
- // Call store IC. If the load is contextual, value is found on top of the
- // frame. If not, value and receiver are on the frame. Both are consumed.
- // Result is returned in r0.
- void CallStoreIC(Handle<String> name, bool is_contextual,
- StrictModeFlag strict_mode);
-
- // Call keyed load IC. Key and receiver are on the stack. Both are consumed.
- // Result is returned in r0.
- void CallKeyedLoadIC();
-
- // Call keyed store IC. Value, key and receiver are on the stack. All three
- // are consumed. Result is returned in r0.
- void CallKeyedStoreIC(StrictModeFlag strict_mode);
-
- // Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments to the IC stub are implicit,
- // and depend on the type of IC stub.
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- int dropped_args);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
-
- // Drop one element.
- void Drop() { Drop(1); }
-
- // Pop an element from the top of the expression stack. Discards
- // the result.
- void Pop();
-
- // Pop an element from the top of the expression stack. The register
- // will be one normally used for the top of stack register allocation
- // so you can't hold on to it if you push on the stack.
- Register PopToRegister(Register but_not_to_this_one = no_reg);
-
- // Look at the top of the stack. The register returned is aliased and
- // must be copied to a scratch register before modification.
- Register Peek();
-
- // Look at the value beneath the top of the stack. The register returned is
- // aliased and must be copied to a scratch register before modification.
- Register Peek2();
-
- // Duplicate the top of stack.
- void Dup();
-
- // Duplicate the two elements on top of stack.
- void Dup2();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in r0.
- void SpillAllButCopyTOSToR0();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in r1.
- void SpillAllButCopyTOSToR1();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in r1
- // and the next value on the stack in r0.
- void SpillAllButCopyTOSToR1R0();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
-
- // Takes the top two elements and puts them in r0 (top element) and r1
- // (second element).
- void PopToR1R0();
-
- // Takes the top element and puts it in r1.
- void PopToR1();
-
- // Takes the top element and puts it in r0.
- void PopToR0();
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPushRoot(Heap::RootListIndex index);
-
- // Overwrite the nth thing on the stack. If the nth position is in a
- // register then this turns into a mov, otherwise an str. Afterwards
- // you can still use the register even if it is a register that can be
- // used for TOS (r0 or r1).
- void SetElementAt(Register reg, int this_far_down);
-
- // Get a register which is free and which must be immediately used to
- // push on the top of the stack.
- Register GetTOSRegister();
-
- // Push multiple registers on the stack and the virtual frame
- // Register are selected by setting bit in src_regs and
- // are pushed in decreasing order: r15 .. r0.
- void EmitPushMultiple(int count, int src_regs);
-
- static Register scratch0() { return r7; }
- static Register scratch1() { return r9; }
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- // 5 states for the top of stack, which can be in memory or in r0 and r1.
- enum TopOfStack {
- NO_TOS_REGISTERS,
- R0_TOS,
- R1_TOS,
- R1_R0_TOS,
- R0_R1_TOS,
- TOS_STATES
- };
-
- static const int kMaxTOSRegisters = 2;
-
- static const bool kR0InUse[TOS_STATES];
- static const bool kR1InUse[TOS_STATES];
- static const int kVirtualElements[TOS_STATES];
- static const TopOfStack kStateAfterPop[TOS_STATES];
- static const TopOfStack kStateAfterPush[TOS_STATES];
- static const Register kTopRegister[TOS_STATES];
- static const Register kBottomRegister[TOS_STATES];
-
- // We allocate up to 5 locals in registers.
- static const int kNumberOfAllocatedRegisters = 5;
- // r2 to r6 are allocated to locals.
- static const int kFirstAllocatedRegister = 2;
-
- static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
-
- static Register AllocatedRegister(int r) {
- ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
- return kAllocatedRegisters[r];
- }
-
- // The number of elements on the stack frame.
- int element_count_;
- TopOfStack top_of_stack_state_:3;
- int register_allocation_map_:kNumberOfAllocatedRegisters;
- static const int kTOSKnownSmiMapSize = 4;
- unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
-
- // The index of the element that is at the processor's stack pointer
- // (the sp register). For now since everything is in memory it is given
- // by the number of elements on the not-very-virtual stack frame.
- int stack_pointer() { return element_count_ - 1; }
-
- // The number of frame-allocated locals and parameters respectively.
- inline int parameter_count() const;
- inline int local_count() const;
-
- // The index of the element that is at the processor's frame pointer
- // (the fp register). The parameters, receiver, function, and context
- // are below the frame pointer.
- inline int frame_pointer() const;
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() { return 1; }
-
- // The index of the context slot in the frame. It is immediately
- // below the frame pointer.
- inline int context_index();
-
- // The index of the function slot in the frame. It is below the frame
- // pointer and context slot.
- inline int function_index();
-
- // The index of the first local. Between the frame pointer and the
- // locals lies the return address.
- inline int local0_index() const;
-
- // The index of the base of the expression stack.
- inline int expression_base_index() const;
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- inline int fp_relative(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // If all top-of-stack registers are in use then the lowest one is pushed
- // onto the physical stack and made free.
- void EnsureOneFreeTOSRegister();
-
- // Emit instructions to get the top of stack state from where we are to where
- // we want to be.
- void MergeTOSTo(TopOfStack expected_state, Condition cond = al);
-
- inline bool Equals(const VirtualFrame* other);
-
- inline void LowerHeight(int count) {
- element_count_ -= count;
- if (count >= kTOSKnownSmiMapSize) {
- tos_known_smi_map_ = 0;
- } else {
- tos_known_smi_map_ >>= count;
- }
- }
-
- inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
- ASSERT(count >= 32 || known_smi_map < (1u << count));
- element_count_ += count;
- if (count >= kTOSKnownSmiMapSize) {
- tos_known_smi_map_ = known_smi_map;
- } else {
- tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
- }
- }
-
- friend class JumpTarget;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_VIRTUAL_FRAME_ARM_H_
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 0753f1e2a..34ebd4e22 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -33,7 +33,7 @@
// Global list of arrays visited during toString, toLocaleString and
// join invocations.
-var visited_arrays = new $Array();
+var visited_arrays = new InternalArray();
// Gets a sorted array of array keys. Useful for operations on sparse
@@ -67,13 +67,32 @@ function GetSortedArrayKeys(array, intervals) {
}
+function SparseJoinWithSeparator(array, len, convert, separator) {
+ var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
+ var totalLength = 0;
+ var elements = new InternalArray(keys.length * 2);
+ var previousKey = -1;
+ for (var i = 0; i < keys.length; i++) {
+ var key = keys[i];
+ if (key != previousKey) { // keys may contain duplicates.
+ var e = array[key];
+ if (!IS_STRING(e)) e = convert(e);
+ elements[i * 2] = key;
+ elements[i * 2 + 1] = e;
+ previousKey = key;
+ }
+ }
+ return %SparseJoinWithSeparator(elements, len, separator);
+}
+
+
// Optimized for sparse arrays if separator is ''.
function SparseJoin(array, len, convert) {
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
var last_key = -1;
var keys_length = keys.length;
- var elements = new $Array(keys_length);
+ var elements = new InternalArray(keys_length);
var elements_length = 0;
for (var i = 0; i < keys_length; i++) {
@@ -110,8 +129,12 @@ function Join(array, length, separator, convert) {
// Attempt to convert the elements.
try {
- if (UseSparseVariant(array, length, is_array) && (separator.length == 0)) {
- return SparseJoin(array, length, convert);
+ if (UseSparseVariant(array, length, is_array)) {
+ if (separator.length == 0) {
+ return SparseJoin(array, length, convert);
+ } else {
+ return SparseJoinWithSeparator(array, length, convert, separator);
+ }
}
// Fast case for one-element arrays.
@@ -122,17 +145,15 @@ function Join(array, length, separator, convert) {
}
// Construct an array for the elements.
- var elements = new $Array(length);
+ var elements = new InternalArray(length);
// We pull the empty separator check outside the loop for speed!
if (separator.length == 0) {
var elements_length = 0;
for (var i = 0; i < length; i++) {
var e = array[i];
- if (!IS_UNDEFINED(e)) {
- if (!IS_STRING(e)) e = convert(e);
- elements[elements_length++] = e;
- }
+ if (!IS_STRING(e)) e = convert(e);
+ elements[elements_length++] = e;
}
elements.length = elements_length;
var result = %_FastAsciiArrayJoin(elements, '');
@@ -140,7 +161,7 @@ function Join(array, length, separator, convert) {
return %StringBuilderConcat(elements, elements_length, '');
}
// Non-empty separator case.
- // If the first element is a number then use the heuristic that the
+ // If the first element is a number then use the heuristic that the
// remaining elements are also likely to be numbers.
if (!IS_NUMBER(array[0])) {
for (var i = 0; i < length; i++) {
@@ -148,18 +169,19 @@ function Join(array, length, separator, convert) {
if (!IS_STRING(e)) e = convert(e);
elements[i] = e;
}
- } else {
+ } else {
for (var i = 0; i < length; i++) {
var e = array[i];
- if (IS_NUMBER(e)) elements[i] = %_NumberToString(e);
- else {
- if (!IS_STRING(e)) e = convert(e);
+ if (IS_NUMBER(e)) {
+ e = %_NumberToString(e);
+ } else if (!IS_STRING(e)) {
+ e = convert(e);
+ }
elements[i] = e;
- }
}
- }
+ }
var result = %_FastAsciiArrayJoin(elements, separator);
- if (!IS_UNDEFINED(result)) return result;
+ if (!IS_UNDEFINED(result)) return result;
return %StringBuilderJoin(elements, length, separator);
} finally {
@@ -171,7 +193,7 @@ function Join(array, length, separator, convert) {
function ConvertToString(x) {
- // Assumes x is a non-string.
+ // Assumes x is a non-string.
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
return (IS_NULL_OR_UNDEFINED(x)) ? '' : %ToString(%DefaultString(x));
@@ -241,7 +263,7 @@ function SmartSlice(array, start_i, del_count, len, deleted_elements) {
// special array operations to handle sparse arrays in a sensible fashion.
function SmartMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array.
- var new_array = new $Array(len - del_count + num_additional_args);
+ var new_array = new InternalArray(len - del_count + num_additional_args);
var intervals = %GetArrayKeys(array, len);
var length = intervals.length;
for (var k = 0; k < length; k++) {
@@ -375,6 +397,11 @@ function ArrayToLocaleString() {
function ArrayJoin(separator) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.join"]);
+ }
+
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
@@ -391,6 +418,11 @@ function ArrayJoin(separator) {
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPop() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.pop"]);
+ }
+
var n = TO_UINT32(this.length);
if (n == 0) {
this.length = n;
@@ -407,6 +439,11 @@ function ArrayPop() {
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.push"]);
+ }
+
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
for (var i = 0; i < m; i++) {
@@ -418,8 +455,13 @@ function ArrayPush() {
function ArrayConcat(arg1) { // length == 1
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.concat"]);
+ }
+
var arg_count = %_ArgumentsLength();
- var arrays = new $Array(1 + arg_count);
+ var arrays = new InternalArray(1 + arg_count);
arrays[0] = this;
for (var i = 0; i < arg_count; i++) {
arrays[i + 1] = %_Arguments(i);
@@ -474,6 +516,11 @@ function SparseReverse(array, len) {
function ArrayReverse() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.reverse"]);
+ }
+
var j = TO_UINT32(this.length) - 1;
if (UseSparseVariant(this, j, IS_ARRAY(this))) {
@@ -505,6 +552,11 @@ function ArrayReverse() {
function ArrayShift() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.shift"]);
+ }
+
var len = TO_UINT32(this.length);
if (len === 0) {
@@ -526,6 +578,11 @@ function ArrayShift() {
function ArrayUnshift(arg1) { // length == 1
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.unshift"]);
+ }
+
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
@@ -545,6 +602,11 @@ function ArrayUnshift(arg1) { // length == 1
function ArraySlice(start, end) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.slice"]);
+ }
+
var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start);
var end_i = len;
@@ -569,7 +631,9 @@ function ArraySlice(start, end) {
if (end_i < start_i) return result;
- if (IS_ARRAY(this)) {
+ if (IS_ARRAY(this) &&
+ (end_i > 1000) &&
+ (%EstimateNumberOfElements(this) < end_i)) {
SmartSlice(this, start_i, end_i - start_i, len, result);
} else {
SimpleSlice(this, start_i, end_i - start_i, len, result);
@@ -582,6 +646,11 @@ function ArraySlice(start, end) {
function ArraySplice(start, delete_count) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.splice"]);
+ }
+
var num_arguments = %_ArgumentsLength();
var len = TO_UINT32(this.length);
@@ -653,6 +722,11 @@ function ArraySplice(start, delete_count) {
function ArraySort(comparefn) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.sort"]);
+ }
+
// In-place QuickSort algorithm.
// For short (length <= 22) arrays, insertion sort is used for efficiency.
@@ -914,6 +988,11 @@ function ArraySort(comparefn) {
// preserving the semantics, since the calls to the receiver function can add
// or delete elements from the array.
function ArrayFilter(f, receiver) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.filter"]);
+ }
+
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
@@ -925,7 +1004,9 @@ function ArrayFilter(f, receiver) {
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
- if (f.call(receiver, current, i, this)) result[result_length++] = current;
+ if (f.call(receiver, current, i, this)) {
+ result[result_length++] = current;
+ }
}
}
return result;
@@ -933,6 +1014,11 @@ function ArrayFilter(f, receiver) {
function ArrayForEach(f, receiver) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.forEach"]);
+ }
+
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
@@ -951,6 +1037,11 @@ function ArrayForEach(f, receiver) {
// Executes the function once for each element present in the
// array until it finds one where callback returns true.
function ArraySome(f, receiver) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.some"]);
+ }
+
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
@@ -968,6 +1059,11 @@ function ArraySome(f, receiver) {
function ArrayEvery(f, receiver) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.every"]);
+ }
+
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
@@ -984,24 +1080,36 @@ function ArrayEvery(f, receiver) {
}
function ArrayMap(f, receiver) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.map"]);
+ }
+
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
- var result = new $Array(length);
+ var result = new $Array();
+ var accumulator = new InternalArray(length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
- result[i] = f.call(receiver, current, i, this);
+ accumulator[i] = f.call(receiver, current, i, this);
}
}
+ %MoveArrayContents(accumulator, result);
return result;
}
function ArrayIndexOf(element, index) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.indexOf"]);
+ }
+
var length = TO_UINT32(this.length);
if (length == 0) return -1;
if (IS_UNDEFINED(index)) {
@@ -1059,6 +1167,11 @@ function ArrayIndexOf(element, index) {
function ArrayLastIndexOf(element, index) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.lastIndexOf"]);
+ }
+
var length = TO_UINT32(this.length);
if (length == 0) return -1;
if (%_ArgumentsLength() < 2) {
@@ -1112,6 +1225,11 @@ function ArrayLastIndexOf(element, index) {
function ArrayReduce(callback, current) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.reduce"]);
+ }
+
if (!IS_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
@@ -1134,13 +1252,18 @@ function ArrayReduce(callback, current) {
for (; i < length; i++) {
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
- current = callback.call(null, current, element, i, this);
+ current = callback.call(void 0, current, element, i, this);
}
}
return current;
}
function ArrayReduceRight(callback, current) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.reduceRight"]);
+ }
+
if (!IS_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
@@ -1160,7 +1283,7 @@ function ArrayReduceRight(callback, current) {
for (; i >= 0; i--) {
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
- current = callback.call(null, current, element, i, this);
+ current = callback.call(void 0, current, element, i, this);
}
}
return current;
@@ -1225,6 +1348,20 @@ function SetupArray() {
));
%FinishArrayPrototypeSetup($Array.prototype);
+
+ // The internal Array prototype doesn't need to be fancy, since it's never
+ // exposed to user code, so no hidden prototypes or DONT_ENUM attributes
+ // are necessary.
+ // The null __proto__ ensures that we never inherit any user created
+ // getters or setters from, e.g., Object.prototype.
+ InternalArray.prototype.__proto__ = null;
+ // Adding only the functions that are actually used, and a toString.
+ InternalArray.prototype.join = getFunction("join", ArrayJoin);
+ InternalArray.prototype.pop = getFunction("pop", ArrayPop);
+ InternalArray.prototype.push = getFunction("push", ArrayPush);
+ InternalArray.prototype.toString = function() {
+ return "Internal Array, length " + this.length;
+ };
}
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 68f01dec0..3c7fc1c63 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -55,6 +55,8 @@
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/regexp-macro-assembler-mips.h"
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
@@ -67,10 +69,25 @@ namespace internal {
const double DoubleConstant::min_int = kMinInt;
const double DoubleConstant::one_half = 0.5;
const double DoubleConstant::minus_zero = -0.0;
+const double DoubleConstant::uint8_max_value = 255;
+const double DoubleConstant::zero = 0.0;
+const double DoubleConstant::nan = OS::nan_value();
const double DoubleConstant::negative_infinity = -V8_INFINITY;
const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// -----------------------------------------------------------------------------
+// Implementation of AssemblerBase
+
+AssemblerBase::AssemblerBase(Isolate* isolate)
+ : isolate_(isolate),
+ jit_cookie_(0) {
+ if (FLAG_mask_constants_with_cookie && isolate != NULL) {
+ jit_cookie_ = V8::RandomPrivate(isolate);
+ }
+}
+
+
+// -----------------------------------------------------------------------------
// Implementation of Label
int Label::pos() const {
@@ -84,58 +101,85 @@ int Label::pos() const {
// -----------------------------------------------------------------------------
// Implementation of RelocInfoWriter and RelocIterator
//
+// Relocation information is written backwards in memory, from high addresses
+// towards low addresses, byte by byte. Therefore, in the encodings listed
+// below, the first byte listed it at the highest address, and successive
+// bytes in the record are at progressively lower addresses.
+//
// Encoding
//
// The most common modes are given single-byte encodings. Also, it is
// easy to identify the type of reloc info and skip unwanted modes in
// an iteration.
//
-// The encoding relies on the fact that there are less than 14
-// different relocation modes.
-//
-// embedded_object: [6 bits pc delta] 00
-//
-// code_taget: [6 bits pc delta] 01
+// The encoding relies on the fact that there are fewer than 14
+// different non-compactly encoded relocation modes.
//
-// position: [6 bits pc delta] 10,
-// [7 bits signed data delta] 0
+// The first byte of a relocation record has a tag in its low 2 bits:
+// Here are the record schemes, depending on the low tag and optional higher
+// tags.
//
-// statement_position: [6 bits pc delta] 10,
-// [7 bits signed data delta] 1
+// Low tag:
+// 00: embedded_object: [6-bit pc delta] 00
//
-// any nondata mode: 00 [4 bits rmode] 11, // rmode: 0..13 only
-// 00 [6 bits pc delta]
+// 01: code_target: [6-bit pc delta] 01
//
-// pc-jump: 00 1111 11,
-// 00 [6 bits pc delta]
+// 10: short_data_record: [6-bit pc delta] 10 followed by
+// [6-bit data delta] [2-bit data type tag]
//
-// pc-jump: 01 1111 11,
-// (variable length) 7 - 26 bit pc delta, written in chunks of 7
-// bits, the lowest 7 bits written first.
+// 11: long_record [2-bit high tag][4 bit middle_tag] 11
+// followed by variable data depending on type.
//
-// data-jump + pos: 00 1110 11,
-// signed intptr_t, lowest byte written first
+// 2-bit data type tags, used in short_data_record and data_jump long_record:
+// code_target_with_id: 00
+// position: 01
+// statement_position: 10
+// comment: 11 (not used in short_data_record)
//
-// data-jump + st.pos: 01 1110 11,
-// signed intptr_t, lowest byte written first
+// Long record format:
+// 4-bit middle_tag:
+// 0000 - 1100 : Short record for RelocInfo::Mode middle_tag + 2
+// (The middle_tag encodes rmode - RelocInfo::LAST_COMPACT_ENUM,
+// and is between 0000 and 1100)
+// The format is:
+// 00 [4 bit middle_tag] 11 followed by
+// 00 [6 bit pc delta]
//
-// data-jump + comm.: 10 1110 11,
-// signed intptr_t, lowest byte written first
+// 1101: not used (would allow one more relocation mode to be added)
+// 1110: long_data_record
+// The format is: [2-bit data_type_tag] 1110 11
+// signed intptr_t, lowest byte written first
+// (except data_type code_target_with_id, which
+// is followed by a signed int, not intptr_t.)
//
+// 1111: long_pc_jump
+// The format is:
+// pc-jump: 00 1111 11,
+// 00 [6 bits pc delta]
+// or
+// pc-jump (variable length):
+// 01 1111 11,
+// [7 bits data] 0
+// ...
+// [7 bits data] 1
+// (Bits 6..31 of pc delta, with leading zeroes
+// dropped, and last non-zero chunk tagged with 1.)
+
+
const int kMaxRelocModes = 14;
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
const int kExtraTagBits = 4;
-const int kPositionTypeTagBits = 1;
-const int kSmallDataBits = kBitsPerByte - kPositionTypeTagBits;
+const int kLocatableTypeTagBits = 2;
+const int kSmallDataBits = kBitsPerByte - kLocatableTypeTagBits;
const int kEmbeddedObjectTag = 0;
const int kCodeTargetTag = 1;
-const int kPositionTag = 2;
+const int kLocatableTag = 2;
const int kDefaultTag = 3;
-const int kPCJumpTag = (1 << kExtraTagBits) - 1;
+const int kPCJumpExtraTag = (1 << kExtraTagBits) - 1;
const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
@@ -149,11 +193,12 @@ const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
-const int kDataJumpTag = kPCJumpTag - 1;
+const int kDataJumpExtraTag = kPCJumpExtraTag - 1;
-const int kNonstatementPositionTag = 0;
-const int kStatementPositionTag = 1;
-const int kCommentTag = 2;
+const int kCodeWithIdTag = 0;
+const int kNonstatementPositionTag = 1;
+const int kStatementPositionTag = 2;
+const int kCommentTag = 3;
uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
@@ -161,7 +206,7 @@ uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
// Otherwise write a variable length PC jump for the bits that do
// not fit in the kSmallPCDeltaBits bits.
if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
- WriteExtraTag(kPCJumpTag, kVariableLengthPCJumpTopTag);
+ WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag);
uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
ASSERT(pc_jump > 0);
// Write kChunkBits size chunks of the pc_jump.
@@ -184,7 +229,7 @@ void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
- *--pos_ = static_cast<byte>(data_delta << kPositionTypeTagBits | tag);
+ *--pos_ = static_cast<byte>(data_delta << kLocatableTypeTagBits | tag);
}
@@ -203,11 +248,20 @@ void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
}
+void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
+ WriteExtraTag(kDataJumpExtraTag, top_tag);
+ for (int i = 0; i < kIntSize; i++) {
+ *--pos_ = static_cast<byte>(data_delta);
+ // Signed right shift is arithmetic shift. Tested in test-utils.cc.
+ data_delta = data_delta >> kBitsPerByte;
+ }
+}
+
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
- WriteExtraTag(kDataJumpTag, top_tag);
+ WriteExtraTag(kDataJumpExtraTag, top_tag);
for (int i = 0; i < kIntptrSize; i++) {
*--pos_ = static_cast<byte>(data_delta);
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
+ // Signed right shift is arithmetic shift. Tested in test-utils.cc.
data_delta = data_delta >> kBitsPerByte;
}
}
@@ -217,9 +271,9 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#ifdef DEBUG
byte* begin_pos = pos_;
#endif
- Counters::reloc_info_count.Increment();
ASSERT(rinfo->pc() - last_pc_ >= 0);
- ASSERT(RelocInfo::NUMBER_OF_MODES <= kMaxRelocModes);
+ ASSERT(RelocInfo::NUMBER_OF_MODES - RelocInfo::LAST_COMPACT_ENUM <=
+ kMaxRelocModes);
// Use unsigned delta-encoding for pc.
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
RelocInfo::Mode rmode = rinfo->rmode();
@@ -230,35 +284,48 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
} else if (rmode == RelocInfo::CODE_TARGET) {
WriteTaggedPC(pc_delta, kCodeTargetTag);
ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
+ } else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ // Use signed delta-encoding for id.
+ ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
+ int id_delta = static_cast<int>(rinfo->data()) - last_id_;
+ // Check if delta is small enough to fit in a tagged byte.
+ if (is_intn(id_delta, kSmallDataBits)) {
+ WriteTaggedPC(pc_delta, kLocatableTag);
+ WriteTaggedData(id_delta, kCodeWithIdTag);
+ } else {
+ // Otherwise, use costly encoding.
+ WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+ WriteExtraTaggedIntData(id_delta, kCodeWithIdTag);
+ }
+ last_id_ = static_cast<int>(rinfo->data());
} else if (RelocInfo::IsPosition(rmode)) {
- // Use signed delta-encoding for data.
- intptr_t data_delta = rinfo->data() - last_data_;
- int pos_type_tag = rmode == RelocInfo::POSITION ? kNonstatementPositionTag
- : kStatementPositionTag;
- // Check if data is small enough to fit in a tagged byte.
- // We cannot use is_intn because data_delta is not an int32_t.
- if (data_delta >= -(1 << (kSmallDataBits-1)) &&
- data_delta < 1 << (kSmallDataBits-1)) {
- WriteTaggedPC(pc_delta, kPositionTag);
- WriteTaggedData(data_delta, pos_type_tag);
- last_data_ = rinfo->data();
+ // Use signed delta-encoding for position.
+ ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
+ int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
+ int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
+ : kStatementPositionTag;
+ // Check if delta is small enough to fit in a tagged byte.
+ if (is_intn(pos_delta, kSmallDataBits)) {
+ WriteTaggedPC(pc_delta, kLocatableTag);
+ WriteTaggedData(pos_delta, pos_type_tag);
} else {
// Otherwise, use costly encoding.
- WriteExtraTaggedPC(pc_delta, kPCJumpTag);
- WriteExtraTaggedData(data_delta, pos_type_tag);
- last_data_ = rinfo->data();
+ WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+ WriteExtraTaggedIntData(pos_delta, pos_type_tag);
}
+ last_position_ = static_cast<int>(rinfo->data());
} else if (RelocInfo::IsComment(rmode)) {
// Comments are normally not generated, so we use the costly encoding.
- WriteExtraTaggedPC(pc_delta, kPCJumpTag);
- WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
- last_data_ = rinfo->data();
+ WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+ WriteExtraTaggedData(rinfo->data(), kCommentTag);
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
} else {
+ ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
+ int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
// For all other modes we simply use the mode as the extra tag.
// None of these modes need a data component.
- ASSERT(rmode < kPCJumpTag && rmode < kDataJumpTag);
- WriteExtraTaggedPC(pc_delta, rmode);
+ ASSERT(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag);
+ WriteExtraTaggedPC(pc_delta, saved_mode);
}
last_pc_ = rinfo->pc();
#ifdef DEBUG
@@ -292,12 +359,32 @@ inline void RelocIterator::AdvanceReadPC() {
}
+void RelocIterator::AdvanceReadId() {
+ int x = 0;
+ for (int i = 0; i < kIntSize; i++) {
+ x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
+ }
+ last_id_ += x;
+ rinfo_.data_ = last_id_;
+}
+
+
+void RelocIterator::AdvanceReadPosition() {
+ int x = 0;
+ for (int i = 0; i < kIntSize; i++) {
+ x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
+ }
+ last_position_ += x;
+ rinfo_.data_ = last_position_;
+}
+
+
void RelocIterator::AdvanceReadData() {
intptr_t x = 0;
for (int i = 0; i < kIntptrSize; i++) {
x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
}
- rinfo_.data_ += x;
+ rinfo_.data_ = x;
}
@@ -317,27 +404,33 @@ void RelocIterator::AdvanceReadVariableLengthPCJump() {
}
-inline int RelocIterator::GetPositionTypeTag() {
- return *pos_ & ((1 << kPositionTypeTagBits) - 1);
+inline int RelocIterator::GetLocatableTypeTag() {
+ return *pos_ & ((1 << kLocatableTypeTagBits) - 1);
}
-inline void RelocIterator::ReadTaggedData() {
+inline void RelocIterator::ReadTaggedId() {
int8_t signed_b = *pos_;
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
- rinfo_.data_ += signed_b >> kPositionTypeTagBits;
+ last_id_ += signed_b >> kLocatableTypeTagBits;
+ rinfo_.data_ = last_id_;
}
-inline RelocInfo::Mode RelocIterator::DebugInfoModeFromTag(int tag) {
- if (tag == kStatementPositionTag) {
- return RelocInfo::STATEMENT_POSITION;
- } else if (tag == kNonstatementPositionTag) {
- return RelocInfo::POSITION;
- } else {
- ASSERT(tag == kCommentTag);
- return RelocInfo::COMMENT;
- }
+inline void RelocIterator::ReadTaggedPosition() {
+ int8_t signed_b = *pos_;
+ // Signed right shift is arithmetic shift. Tested in test-utils.cc.
+ last_position_ += signed_b >> kLocatableTypeTagBits;
+ rinfo_.data_ = last_position_;
+}
+
+
+static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
+ ASSERT(tag == kNonstatementPositionTag ||
+ tag == kStatementPositionTag);
+ return (tag == kNonstatementPositionTag) ?
+ RelocInfo::POSITION :
+ RelocInfo::STATEMENT_POSITION;
}
@@ -356,37 +449,64 @@ void RelocIterator::next() {
} else if (tag == kCodeTargetTag) {
ReadTaggedPC();
if (SetMode(RelocInfo::CODE_TARGET)) return;
- } else if (tag == kPositionTag) {
+ } else if (tag == kLocatableTag) {
ReadTaggedPC();
Advance();
- // Check if we want source positions.
- if (mode_mask_ & RelocInfo::kPositionMask) {
- ReadTaggedData();
- if (SetMode(DebugInfoModeFromTag(GetPositionTypeTag()))) return;
+ int locatable_tag = GetLocatableTypeTag();
+ if (locatable_tag == kCodeWithIdTag) {
+ if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
+ ReadTaggedId();
+ return;
+ }
+ } else {
+ // Compact encoding is never used for comments,
+ // so it must be a position.
+ ASSERT(locatable_tag == kNonstatementPositionTag ||
+ locatable_tag == kStatementPositionTag);
+ if (mode_mask_ & RelocInfo::kPositionMask) {
+ ReadTaggedPosition();
+ if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
+ }
}
} else {
ASSERT(tag == kDefaultTag);
int extra_tag = GetExtraTag();
- if (extra_tag == kPCJumpTag) {
+ if (extra_tag == kPCJumpExtraTag) {
int top_tag = GetTopTag();
if (top_tag == kVariableLengthPCJumpTopTag) {
AdvanceReadVariableLengthPCJump();
} else {
AdvanceReadPC();
}
- } else if (extra_tag == kDataJumpTag) {
- // Check if we want debug modes (the only ones with data).
- if (mode_mask_ & RelocInfo::kDebugMask) {
- int top_tag = GetTopTag();
- AdvanceReadData();
- if (SetMode(DebugInfoModeFromTag(top_tag))) return;
+ } else if (extra_tag == kDataJumpExtraTag) {
+ int locatable_tag = GetTopTag();
+ if (locatable_tag == kCodeWithIdTag) {
+ if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
+ AdvanceReadId();
+ return;
+ }
+ Advance(kIntSize);
+ } else if (locatable_tag != kCommentTag) {
+ ASSERT(locatable_tag == kNonstatementPositionTag ||
+ locatable_tag == kStatementPositionTag);
+ if (mode_mask_ & RelocInfo::kPositionMask) {
+ AdvanceReadPosition();
+ if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
+ } else {
+ Advance(kIntSize);
+ }
} else {
- // Otherwise, just skip over the data.
+ ASSERT(locatable_tag == kCommentTag);
+ if (SetMode(RelocInfo::COMMENT)) {
+ AdvanceReadData();
+ return;
+ }
Advance(kIntptrSize);
}
} else {
AdvanceReadPC();
- if (SetMode(static_cast<RelocInfo::Mode>(extra_tag))) return;
+ int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
+ if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return;
}
}
}
@@ -402,6 +522,8 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
end_ = code->relocation_start();
done_ = false;
mode_mask_ = mode_mask;
+ last_id_ = 0;
+ last_position_ = 0;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@@ -415,6 +537,8 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
end_ = pos_ - desc.reloc_size;
done_ = false;
mode_mask_ = mode_mask;
+ last_id_ = 0;
+ last_position_ = 0;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@@ -442,6 +566,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "debug break";
case RelocInfo::CODE_TARGET:
return "code target";
+ case RelocInfo::CODE_TARGET_WITH_ID:
+ return "code target with id";
case RelocInfo::GLOBAL_PROPERTY_CELL:
return "global property cell";
case RelocInfo::RUNTIME_ENTRY:
@@ -488,9 +614,13 @@ void RelocInfo::Print(FILE* out) {
Code* code = Code::GetCodeFromTargetAddress(target_address());
PrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()),
target_address());
+ if (rmode_ == CODE_TARGET_WITH_ID) {
+ PrintF(" (id=%d)", static_cast<int>(data_));
+ }
} else if (IsPosition(rmode_)) {
PrintF(out, " (%" V8_PTR_PREFIX "d)", data());
- } else if (rmode_ == RelocInfo::RUNTIME_ENTRY) {
+ } else if (rmode_ == RelocInfo::RUNTIME_ENTRY &&
+ Isolate::Current()->deoptimizer_data() != NULL) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
target_address(), Deoptimizer::EAGER);
@@ -520,13 +650,14 @@ void RelocInfo::Verify() {
#endif
case CONSTRUCT_CALL:
case CODE_TARGET_CONTEXT:
+ case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
// convert inline target address to code object
Address addr = target_address();
ASSERT(addr != NULL);
// Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr);
- Object* found = Heap::FindCodeObject(addr);
+ Object* found = HEAP->FindCodeObject(addr);
ASSERT(found->IsCode());
ASSERT(code->address() == HeapObject::cast(found)->address());
break;
@@ -552,153 +683,184 @@ void RelocInfo::Verify() {
// -----------------------------------------------------------------------------
// Implementation of ExternalReference
-ExternalReference::ExternalReference(Builtins::CFunctionId id)
- : address_(Redirect(Builtins::c_function_address(id))) {}
+ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate)
+ : address_(Redirect(isolate, Builtins::c_function_address(id))) {}
ExternalReference::ExternalReference(
- ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL)
- : address_(Redirect(fun->address(), type)) {}
+ ApiFunction* fun,
+ Type type = ExternalReference::BUILTIN_CALL,
+ Isolate* isolate = NULL)
+ : address_(Redirect(isolate, fun->address(), type)) {}
-ExternalReference::ExternalReference(Builtins::Name name)
- : address_(Builtins::builtin_address(name)) {}
+ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate)
+ : address_(isolate->builtins()->builtin_address(name)) {}
-ExternalReference::ExternalReference(Runtime::FunctionId id)
- : address_(Redirect(Runtime::FunctionForId(id)->entry)) {}
+ExternalReference::ExternalReference(Runtime::FunctionId id,
+ Isolate* isolate)
+ : address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {}
-ExternalReference::ExternalReference(Runtime::Function* f)
- : address_(Redirect(f->entry)) {}
+ExternalReference::ExternalReference(const Runtime::Function* f,
+ Isolate* isolate)
+ : address_(Redirect(isolate, f->entry)) {}
-ExternalReference::ExternalReference(const IC_Utility& ic_utility)
- : address_(Redirect(ic_utility.address())) {}
+ExternalReference ExternalReference::isolate_address() {
+ return ExternalReference(Isolate::Current());
+}
+
+
+ExternalReference::ExternalReference(const IC_Utility& ic_utility,
+ Isolate* isolate)
+ : address_(Redirect(isolate, ic_utility.address())) {}
#ifdef ENABLE_DEBUGGER_SUPPORT
-ExternalReference::ExternalReference(const Debug_Address& debug_address)
- : address_(debug_address.address()) {}
+ExternalReference::ExternalReference(const Debug_Address& debug_address,
+ Isolate* isolate)
+ : address_(debug_address.address(isolate)) {}
#endif
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
-ExternalReference::ExternalReference(Top::AddressId id)
- : address_(Top::get_address_from_id(id)) {}
+ExternalReference::ExternalReference(Isolate::AddressId id, Isolate* isolate)
+ : address_(isolate->get_address_from_id(id)) {}
ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {}
-ExternalReference ExternalReference::perform_gc_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(Runtime::PerformGC)));
+ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate,
+ FUNCTION_ADDR(Runtime::PerformGC)));
}
-ExternalReference ExternalReference::fill_heap_number_with_random_function() {
- return
- ExternalReference(Redirect(FUNCTION_ADDR(V8::FillHeapNumberWithRandom)));
+ExternalReference ExternalReference::fill_heap_number_with_random_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate,
+ FUNCTION_ADDR(V8::FillHeapNumberWithRandom)));
}
-ExternalReference ExternalReference::delete_handle_scope_extensions() {
- return ExternalReference(Redirect(FUNCTION_ADDR(
- HandleScope::DeleteExtensions)));
+ExternalReference ExternalReference::delete_handle_scope_extensions(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate,
+ FUNCTION_ADDR(HandleScope::DeleteExtensions)));
}
-ExternalReference ExternalReference::random_uint32_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(V8::Random)));
+ExternalReference ExternalReference::random_uint32_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(V8::Random)));
}
-ExternalReference ExternalReference::transcendental_cache_array_address() {
- return ExternalReference(TranscendentalCache::cache_array_address());
+ExternalReference ExternalReference::transcendental_cache_array_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->transcendental_cache()->cache_array_address());
}
-ExternalReference ExternalReference::new_deoptimizer_function() {
+ExternalReference ExternalReference::new_deoptimizer_function(
+ Isolate* isolate) {
return ExternalReference(
- Redirect(FUNCTION_ADDR(Deoptimizer::New)));
+ Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New)));
}
-ExternalReference ExternalReference::compute_output_frames_function() {
+ExternalReference ExternalReference::compute_output_frames_function(
+ Isolate* isolate) {
return ExternalReference(
- Redirect(FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
+ Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
}
-ExternalReference ExternalReference::global_contexts_list() {
- return ExternalReference(Heap::global_contexts_list_address());
+ExternalReference ExternalReference::global_contexts_list(Isolate* isolate) {
+ return ExternalReference(isolate->heap()->global_contexts_list_address());
}
-ExternalReference ExternalReference::keyed_lookup_cache_keys() {
- return ExternalReference(KeyedLookupCache::keys_address());
+ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
+ return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
}
-ExternalReference ExternalReference::keyed_lookup_cache_field_offsets() {
- return ExternalReference(KeyedLookupCache::field_offsets_address());
+ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->keyed_lookup_cache()->field_offsets_address());
}
-ExternalReference ExternalReference::the_hole_value_location() {
- return ExternalReference(Factory::the_hole_value().location());
+ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) {
+ return ExternalReference(isolate->factory()->the_hole_value().location());
}
-ExternalReference ExternalReference::arguments_marker_location() {
- return ExternalReference(Factory::arguments_marker().location());
+ExternalReference ExternalReference::arguments_marker_location(
+ Isolate* isolate) {
+ return ExternalReference(isolate->factory()->arguments_marker().location());
}
-ExternalReference ExternalReference::roots_address() {
- return ExternalReference(Heap::roots_address());
+ExternalReference ExternalReference::roots_address(Isolate* isolate) {
+ return ExternalReference(isolate->heap()->roots_address());
}
-ExternalReference ExternalReference::address_of_stack_limit() {
- return ExternalReference(StackGuard::address_of_jslimit());
+ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
+ return ExternalReference(isolate->stack_guard()->address_of_jslimit());
}
-ExternalReference ExternalReference::address_of_real_stack_limit() {
- return ExternalReference(StackGuard::address_of_real_jslimit());
+ExternalReference ExternalReference::address_of_real_stack_limit(
+ Isolate* isolate) {
+ return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
}
-ExternalReference ExternalReference::address_of_regexp_stack_limit() {
- return ExternalReference(RegExpStack::limit_address());
+ExternalReference ExternalReference::address_of_regexp_stack_limit(
+ Isolate* isolate) {
+ return ExternalReference(isolate->regexp_stack()->limit_address());
}
-ExternalReference ExternalReference::new_space_start() {
- return ExternalReference(Heap::NewSpaceStart());
+ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
+ return ExternalReference(isolate->heap()->NewSpaceStart());
}
-ExternalReference ExternalReference::new_space_mask() {
- return ExternalReference(reinterpret_cast<Address>(Heap::NewSpaceMask()));
+ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
+ Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask());
+ return ExternalReference(mask);
}
-ExternalReference ExternalReference::new_space_allocation_top_address() {
- return ExternalReference(Heap::NewSpaceAllocationTopAddress());
+ExternalReference ExternalReference::new_space_allocation_top_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
}
-ExternalReference ExternalReference::heap_always_allocate_scope_depth() {
- return ExternalReference(Heap::always_allocate_scope_depth_address());
+ExternalReference ExternalReference::heap_always_allocate_scope_depth(
+ Isolate* isolate) {
+ Heap* heap = isolate->heap();
+ return ExternalReference(heap->always_allocate_scope_depth_address());
}
-ExternalReference ExternalReference::new_space_allocation_limit_address() {
- return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
+ExternalReference ExternalReference::new_space_allocation_limit_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
}
@@ -717,8 +879,9 @@ ExternalReference ExternalReference::handle_scope_limit_address() {
}
-ExternalReference ExternalReference::scheduled_exception_address() {
- return ExternalReference(Top::scheduled_exception_address());
+ExternalReference ExternalReference::scheduled_exception_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->scheduled_exception_address());
}
@@ -740,15 +903,34 @@ ExternalReference ExternalReference::address_of_minus_zero() {
}
+ExternalReference ExternalReference::address_of_zero() {
+ return ExternalReference(reinterpret_cast<void*>(
+ const_cast<double*>(&DoubleConstant::zero)));
+}
+
+
+ExternalReference ExternalReference::address_of_uint8_max_value() {
+ return ExternalReference(reinterpret_cast<void*>(
+ const_cast<double*>(&DoubleConstant::uint8_max_value)));
+}
+
+
ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::negative_infinity)));
}
+ExternalReference ExternalReference::address_of_nan() {
+ return ExternalReference(reinterpret_cast<void*>(
+ const_cast<double*>(&DoubleConstant::nan)));
+}
+
+
#ifndef V8_INTERPRETED_REGEXP
-ExternalReference ExternalReference::re_check_stack_guard_state() {
+ExternalReference ExternalReference::re_check_stack_guard_state(
+ Isolate* isolate) {
Address function;
#ifdef V8_TARGET_ARCH_X64
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
@@ -756,19 +938,23 @@ ExternalReference ExternalReference::re_check_stack_guard_state() {
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
+#elif V8_TARGET_ARCH_MIPS
+ function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#else
UNREACHABLE();
#endif
- return ExternalReference(Redirect(function));
+ return ExternalReference(Redirect(isolate, function));
}
-ExternalReference ExternalReference::re_grow_stack() {
+ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
return ExternalReference(
- Redirect(FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
+ Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
}
-ExternalReference ExternalReference::re_case_insensitive_compare_uc16() {
+ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
+ Isolate* isolate) {
return ExternalReference(Redirect(
+ isolate,
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
}
@@ -777,16 +963,21 @@ ExternalReference ExternalReference::re_word_character_map() {
NativeRegExpMacroAssembler::word_character_map_address());
}
-ExternalReference ExternalReference::address_of_static_offsets_vector() {
- return ExternalReference(OffsetsVector::static_offsets_vector_address());
+ExternalReference ExternalReference::address_of_static_offsets_vector(
+ Isolate* isolate) {
+ return ExternalReference(
+ OffsetsVector::static_offsets_vector_address(isolate));
}
-ExternalReference ExternalReference::address_of_regexp_stack_memory_address() {
- return ExternalReference(RegExpStack::memory_address());
+ExternalReference ExternalReference::address_of_regexp_stack_memory_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->regexp_stack()->memory_address());
}
-ExternalReference ExternalReference::address_of_regexp_stack_memory_size() {
- return ExternalReference(RegExpStack::memory_size_address());
+ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
+ Isolate* isolate) {
+ return ExternalReference(isolate->regexp_stack()->memory_size_address());
}
#endif // V8_INTERPRETED_REGEXP
@@ -817,6 +1008,45 @@ static double mod_two_doubles(double x, double y) {
}
+static double math_sin_double(double x) {
+ return sin(x);
+}
+
+
+static double math_cos_double(double x) {
+ return cos(x);
+}
+
+
+static double math_log_double(double x) {
+ return log(x);
+}
+
+
+ExternalReference ExternalReference::math_sin_double_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate,
+ FUNCTION_ADDR(math_sin_double),
+ BUILTIN_FP_CALL));
+}
+
+
+ExternalReference ExternalReference::math_cos_double_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate,
+ FUNCTION_ADDR(math_cos_double),
+ BUILTIN_FP_CALL));
+}
+
+
+ExternalReference ExternalReference::math_log_double_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate,
+ FUNCTION_ADDR(math_log_double),
+ BUILTIN_FP_CALL));
+}
+
+
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
@@ -852,15 +1082,19 @@ double power_double_double(double x, double y) {
}
-ExternalReference ExternalReference::power_double_double_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double),
- FP_RETURN_CALL));
+ExternalReference ExternalReference::power_double_double_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate,
+ FUNCTION_ADDR(power_double_double),
+ BUILTIN_FP_FP_CALL));
}
-ExternalReference ExternalReference::power_double_int_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int),
- FP_RETURN_CALL));
+ExternalReference ExternalReference::power_double_int_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate,
+ FUNCTION_ADDR(power_double_int),
+ BUILTIN_FP_INT_CALL));
}
@@ -871,7 +1105,7 @@ static int native_compare_doubles(double y, double x) {
ExternalReference ExternalReference::double_fp_operation(
- Token::Value operation) {
+ Token::Value operation, Isolate* isolate) {
typedef double BinaryFPOperation(double x, double y);
BinaryFPOperation* function = NULL;
switch (operation) {
@@ -893,29 +1127,28 @@ ExternalReference ExternalReference::double_fp_operation(
default:
UNREACHABLE();
}
- // Passing true as 2nd parameter indicates that they return an fp value.
- return ExternalReference(Redirect(FUNCTION_ADDR(function), FP_RETURN_CALL));
+ return ExternalReference(Redirect(isolate,
+ FUNCTION_ADDR(function),
+ BUILTIN_FP_FP_CALL));
}
-ExternalReference ExternalReference::compare_doubles() {
- return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles),
- BUILTIN_CALL));
+ExternalReference ExternalReference::compare_doubles(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate,
+ FUNCTION_ADDR(native_compare_doubles),
+ BUILTIN_COMPARE_CALL));
}
-ExternalReference::ExternalReferenceRedirector*
- ExternalReference::redirector_ = NULL;
-
-
#ifdef ENABLE_DEBUGGER_SUPPORT
-ExternalReference ExternalReference::debug_break() {
- return ExternalReference(Redirect(FUNCTION_ADDR(Debug::Break)));
+ExternalReference ExternalReference::debug_break(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));
}
-ExternalReference ExternalReference::debug_step_in_fp_address() {
- return ExternalReference(Debug::step_in_fp_addr());
+ExternalReference ExternalReference::debug_step_in_fp_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->debug()->step_in_fp_addr());
}
#endif
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 9b9eea6c8..bced11fec 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -30,19 +30,34 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_
+#include "allocation.h"
#include "gdb-jit.h"
#include "runtime.h"
-#include "top.h"
#include "token.h"
namespace v8 {
namespace internal {
+const unsigned kNoASTId = -1;
+// -----------------------------------------------------------------------------
+// Platform independent assembler base class.
+
+class AssemblerBase: public Malloced {
+ public:
+ explicit AssemblerBase(Isolate* isolate);
+
+ Isolate* isolate() const { return isolate_; }
+ int jit_cookie() { return jit_cookie_; }
+
+ private:
+ Isolate* isolate_;
+ int jit_cookie_;
+};
// -----------------------------------------------------------------------------
// Common double constants.
@@ -52,7 +67,10 @@ class DoubleConstant: public AllStatic {
static const double min_int;
static const double one_half;
static const double minus_zero;
+ static const double zero;
+ static const double uint8_max_value;
static const double negative_infinity;
+ static const double nan;
};
@@ -64,18 +82,32 @@ class DoubleConstant: public AllStatic {
class Label BASE_EMBEDDED {
public:
- INLINE(Label()) { Unuse(); }
- INLINE(~Label()) { ASSERT(!is_linked()); }
+ enum Distance {
+ kNear, kFar
+ };
+
+ INLINE(Label()) {
+ Unuse();
+ UnuseNear();
+ }
- INLINE(void Unuse()) { pos_ = 0; }
+ INLINE(~Label()) {
+ ASSERT(!is_linked());
+ ASSERT(!is_near_linked());
+ }
- INLINE(bool is_bound() const) { return pos_ < 0; }
- INLINE(bool is_unused() const) { return pos_ == 0; }
- INLINE(bool is_linked() const) { return pos_ > 0; }
+ INLINE(void Unuse()) { pos_ = 0; }
+ INLINE(void UnuseNear()) { near_link_pos_ = 0; }
+
+ INLINE(bool is_bound() const) { return pos_ < 0; }
+ INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; }
+ INLINE(bool is_linked() const) { return pos_ > 0; }
+ INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
// Returns the position of bound or linked labels. Cannot be used
// for unused labels.
int pos() const;
+ int near_link_pos() const { return near_link_pos_ - 1; }
private:
// pos_ encodes both the binding state (via its sign)
@@ -86,75 +118,31 @@ class Label BASE_EMBEDDED {
// pos_ > 0 linked label, pos() returns the last reference position
int pos_;
+ // Behaves like |pos_| in the "> 0" case, but for near jumps to this label.
+ int near_link_pos_;
+
void bind_to(int pos) {
pos_ = -pos - 1;
ASSERT(is_bound());
}
- void link_to(int pos) {
- pos_ = pos + 1;
- ASSERT(is_linked());
+ void link_to(int pos, Distance distance = kFar) {
+ if (distance == kNear) {
+ near_link_pos_ = pos + 1;
+ ASSERT(is_near_linked());
+ } else {
+ pos_ = pos + 1;
+ ASSERT(is_linked());
+ }
}
friend class Assembler;
friend class RegexpAssembler;
friend class Displacement;
- friend class ShadowTarget;
friend class RegExpMacroAssemblerIrregexp;
};
// -----------------------------------------------------------------------------
-// NearLabels are labels used for short jumps (in Intel jargon).
-// NearLabels should be used if it can be guaranteed that the jump range is
-// within -128 to +127. We already use short jumps when jumping backwards,
-// so using a NearLabel will only have performance impact if used for forward
-// jumps.
-class NearLabel BASE_EMBEDDED {
- public:
- NearLabel() { Unuse(); }
- ~NearLabel() { ASSERT(!is_linked()); }
-
- void Unuse() {
- pos_ = -1;
- unresolved_branches_ = 0;
-#ifdef DEBUG
- for (int i = 0; i < kMaxUnresolvedBranches; i++) {
- unresolved_positions_[i] = -1;
- }
-#endif
- }
-
- int pos() {
- ASSERT(is_bound());
- return pos_;
- }
-
- bool is_bound() { return pos_ >= 0; }
- bool is_linked() { return !is_bound() && unresolved_branches_ > 0; }
- bool is_unused() { return !is_bound() && unresolved_branches_ == 0; }
-
- void bind_to(int position) {
- ASSERT(!is_bound());
- pos_ = position;
- }
-
- void link_to(int position) {
- ASSERT(!is_bound());
- ASSERT(unresolved_branches_ < kMaxUnresolvedBranches);
- unresolved_positions_[unresolved_branches_++] = position;
- }
-
- private:
- static const int kMaxUnresolvedBranches = 8;
- int pos_;
- int unresolved_branches_;
- int unresolved_positions_[kMaxUnresolvedBranches];
-
- friend class Assembler;
-};
-
-
-// -----------------------------------------------------------------------------
// Relocation information
@@ -197,10 +185,11 @@ class RelocInfo BASE_EMBEDDED {
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
+ CODE_TARGET, // Code target which is not any of the above.
+ CODE_TARGET_WITH_ID,
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores.
DEBUG_BREAK, // Code target for the debugger statement.
- CODE_TARGET, // Code target which is not any of the above.
EMBEDDED_OBJECT,
GLOBAL_PROPERTY_CELL,
@@ -216,10 +205,12 @@ class RelocInfo BASE_EMBEDDED {
// add more as needed
// Pseudo-types
- NUMBER_OF_MODES, // must be no greater than 14 - see RelocInfoWriter
+ NUMBER_OF_MODES, // There are at most 14 modes with noncompact encoding.
NONE, // never recorded
- LAST_CODE_ENUM = CODE_TARGET,
- LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL
+ LAST_CODE_ENUM = DEBUG_BREAK,
+ LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
+ // Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
+ LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID
};
@@ -320,7 +311,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(void set_call_object(Object* target));
INLINE(Object** call_object_address());
- template<typename StaticVisitor> inline void Visit();
+ template<typename StaticVisitor> inline void Visit(Heap* heap);
inline void Visit(ObjectVisitor* v);
// Patch the code with some other code.
@@ -349,7 +340,8 @@ class RelocInfo BASE_EMBEDDED {
static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
- static const int kDebugMask = kPositionMask | 1 << COMMENT;
+ static const int kDataMask =
+ (1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT);
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
@@ -360,6 +352,19 @@ class RelocInfo BASE_EMBEDDED {
byte* pc_;
Mode rmode_;
intptr_t data_;
+#ifdef V8_TARGET_ARCH_MIPS
+ // Code and Embedded Object pointers in mips are stored split
+ // across two consecutive 32-bit instructions. Heap management
+ // routines expect to access these pointers indirectly. The following
+ // location provides a place for these pointers to exist natually
+ // when accessed via the Iterator.
+ Object *reconstructed_obj_ptr_;
+ // External-reference pointers are also split across instruction-pairs
+ // in mips, but are accessed via indirect pointers. This location
+ // provides a place for that pointer to exist naturally. Its address
+ // is returned by RelocInfo::target_reference_address().
+ Address reconstructed_adr_ptr_;
+#endif // V8_TARGET_ARCH_MIPS
friend class RelocIterator;
};
@@ -368,9 +373,14 @@ class RelocInfo BASE_EMBEDDED {
// lower addresses.
class RelocInfoWriter BASE_EMBEDDED {
public:
- RelocInfoWriter() : pos_(NULL), last_pc_(NULL), last_data_(0) {}
- RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc),
- last_data_(0) {}
+ RelocInfoWriter() : pos_(NULL),
+ last_pc_(NULL),
+ last_id_(0),
+ last_position_(0) {}
+ RelocInfoWriter(byte* pos, byte* pc) : pos_(pos),
+ last_pc_(pc),
+ last_id_(0),
+ last_position_(0) {}
byte* pos() const { return pos_; }
byte* last_pc() const { return last_pc_; }
@@ -395,13 +405,15 @@ class RelocInfoWriter BASE_EMBEDDED {
inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
inline void WriteTaggedPC(uint32_t pc_delta, int tag);
inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
+ inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag);
byte* pos_;
byte* last_pc_;
- intptr_t last_data_;
+ int last_id_;
+ int last_position_;
DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
};
@@ -443,12 +455,13 @@ class RelocIterator: public Malloced {
int GetTopTag();
void ReadTaggedPC();
void AdvanceReadPC();
+ void AdvanceReadId();
+ void AdvanceReadPosition();
void AdvanceReadData();
void AdvanceReadVariableLengthPCJump();
- int GetPositionTypeTag();
- void ReadTaggedData();
-
- static RelocInfo::Mode DebugInfoModeFromTag(int tag);
+ int GetLocatableTypeTag();
+ void ReadTaggedId();
+ void ReadTaggedPosition();
// If the given mode is wanted, set it in rinfo_ and return true.
// Else return false. Used for efficiently skipping unwanted modes.
@@ -461,6 +474,8 @@ class RelocIterator: public Malloced {
RelocInfo rinfo_;
bool done_;
int mode_mask_;
+ int last_id_;
+ int last_position_;
DISALLOW_COPY_AND_ASSIGN(RelocIterator);
};
@@ -489,9 +504,21 @@ class ExternalReference BASE_EMBEDDED {
// MaybeObject* f(v8::internal::Arguments).
BUILTIN_CALL, // default
+ // Builtin that takes float arguments and returns an int.
+ // int f(double, double).
+ BUILTIN_COMPARE_CALL,
+
// Builtin call that returns floating point.
// double f(double, double).
- FP_RETURN_CALL,
+ BUILTIN_FP_FP_CALL,
+
+ // Builtin call that returns floating point.
+ // double f(double).
+ BUILTIN_FP_CALL,
+
+ // Builtin call that returns floating point.
+ // double f(double, int).
+ BUILTIN_FP_INT_CALL,
// Direct call to API function callback.
// Handle<Value> f(v8::Arguments&)
@@ -504,117 +531,131 @@ class ExternalReference BASE_EMBEDDED {
typedef void* ExternalReferenceRedirector(void* original, Type type);
- explicit ExternalReference(Builtins::CFunctionId id);
+ ExternalReference(Builtins::CFunctionId id, Isolate* isolate);
- explicit ExternalReference(ApiFunction* ptr, Type type);
+ ExternalReference(ApiFunction* ptr, Type type, Isolate* isolate);
- explicit ExternalReference(Builtins::Name name);
+ ExternalReference(Builtins::Name name, Isolate* isolate);
- explicit ExternalReference(Runtime::FunctionId id);
+ ExternalReference(Runtime::FunctionId id, Isolate* isolate);
- explicit ExternalReference(Runtime::Function* f);
+ ExternalReference(const Runtime::Function* f, Isolate* isolate);
- explicit ExternalReference(const IC_Utility& ic_utility);
+ ExternalReference(const IC_Utility& ic_utility, Isolate* isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
- explicit ExternalReference(const Debug_Address& debug_address);
+ ExternalReference(const Debug_Address& debug_address, Isolate* isolate);
#endif
explicit ExternalReference(StatsCounter* counter);
- explicit ExternalReference(Top::AddressId id);
+ ExternalReference(Isolate::AddressId id, Isolate* isolate);
explicit ExternalReference(const SCTableReference& table_ref);
+ // Isolate::Current() as an external reference.
+ static ExternalReference isolate_address();
+
// One-of-a-kind references. These references are not part of a general
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
- static ExternalReference perform_gc_function();
- static ExternalReference fill_heap_number_with_random_function();
- static ExternalReference random_uint32_function();
- static ExternalReference transcendental_cache_array_address();
- static ExternalReference delete_handle_scope_extensions();
+ static ExternalReference perform_gc_function(Isolate* isolate);
+ static ExternalReference fill_heap_number_with_random_function(
+ Isolate* isolate);
+ static ExternalReference random_uint32_function(Isolate* isolate);
+ static ExternalReference transcendental_cache_array_address(Isolate* isolate);
+ static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
// Deoptimization support.
- static ExternalReference new_deoptimizer_function();
- static ExternalReference compute_output_frames_function();
- static ExternalReference global_contexts_list();
+ static ExternalReference new_deoptimizer_function(Isolate* isolate);
+ static ExternalReference compute_output_frames_function(Isolate* isolate);
+ static ExternalReference global_contexts_list(Isolate* isolate);
// Static data in the keyed lookup cache.
- static ExternalReference keyed_lookup_cache_keys();
- static ExternalReference keyed_lookup_cache_field_offsets();
+ static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
+ static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
// Static variable Factory::the_hole_value.location()
- static ExternalReference the_hole_value_location();
+ static ExternalReference the_hole_value_location(Isolate* isolate);
// Static variable Factory::arguments_marker.location()
- static ExternalReference arguments_marker_location();
+ static ExternalReference arguments_marker_location(Isolate* isolate);
// Static variable Heap::roots_address()
- static ExternalReference roots_address();
+ static ExternalReference roots_address(Isolate* isolate);
// Static variable StackGuard::address_of_jslimit()
- static ExternalReference address_of_stack_limit();
+ static ExternalReference address_of_stack_limit(Isolate* isolate);
// Static variable StackGuard::address_of_real_jslimit()
- static ExternalReference address_of_real_stack_limit();
+ static ExternalReference address_of_real_stack_limit(Isolate* isolate);
// Static variable RegExpStack::limit_address()
- static ExternalReference address_of_regexp_stack_limit();
+ static ExternalReference address_of_regexp_stack_limit(Isolate* isolate);
// Static variables for RegExp.
- static ExternalReference address_of_static_offsets_vector();
- static ExternalReference address_of_regexp_stack_memory_address();
- static ExternalReference address_of_regexp_stack_memory_size();
+ static ExternalReference address_of_static_offsets_vector(Isolate* isolate);
+ static ExternalReference address_of_regexp_stack_memory_address(
+ Isolate* isolate);
+ static ExternalReference address_of_regexp_stack_memory_size(
+ Isolate* isolate);
// Static variable Heap::NewSpaceStart()
- static ExternalReference new_space_start();
- static ExternalReference new_space_mask();
- static ExternalReference heap_always_allocate_scope_depth();
+ static ExternalReference new_space_start(Isolate* isolate);
+ static ExternalReference new_space_mask(Isolate* isolate);
+ static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
// Used for fast allocation in generated code.
- static ExternalReference new_space_allocation_top_address();
- static ExternalReference new_space_allocation_limit_address();
+ static ExternalReference new_space_allocation_top_address(Isolate* isolate);
+ static ExternalReference new_space_allocation_limit_address(Isolate* isolate);
- static ExternalReference double_fp_operation(Token::Value operation);
- static ExternalReference compare_doubles();
- static ExternalReference power_double_double_function();
- static ExternalReference power_double_int_function();
+ static ExternalReference double_fp_operation(Token::Value operation,
+ Isolate* isolate);
+ static ExternalReference compare_doubles(Isolate* isolate);
+ static ExternalReference power_double_double_function(Isolate* isolate);
+ static ExternalReference power_double_int_function(Isolate* isolate);
static ExternalReference handle_scope_next_address();
static ExternalReference handle_scope_limit_address();
static ExternalReference handle_scope_level_address();
- static ExternalReference scheduled_exception_address();
+ static ExternalReference scheduled_exception_address(Isolate* isolate);
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
static ExternalReference address_of_one_half();
static ExternalReference address_of_minus_zero();
+ static ExternalReference address_of_zero();
+ static ExternalReference address_of_uint8_max_value();
static ExternalReference address_of_negative_infinity();
+ static ExternalReference address_of_nan();
+
+ static ExternalReference math_sin_double_function(Isolate* isolate);
+ static ExternalReference math_cos_double_function(Isolate* isolate);
+ static ExternalReference math_log_double_function(Isolate* isolate);
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Function Debug::Break()
- static ExternalReference debug_break();
+ static ExternalReference debug_break(Isolate* isolate);
// Used to check if single stepping is enabled in generated code.
- static ExternalReference debug_step_in_fp_address();
+ static ExternalReference debug_step_in_fp_address(Isolate* isolate);
#endif
#ifndef V8_INTERPRETED_REGEXP
// C functions called from RegExp generated code.
// Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()
- static ExternalReference re_case_insensitive_compare_uc16();
+ static ExternalReference re_case_insensitive_compare_uc16(Isolate* isolate);
// Function RegExpMacroAssembler*::CheckStackGuardState()
- static ExternalReference re_check_stack_guard_state();
+ static ExternalReference re_check_stack_guard_state(Isolate* isolate);
// Function NativeRegExpMacroAssembler::GrowStack()
- static ExternalReference re_grow_stack();
+ static ExternalReference re_grow_stack(Isolate* isolate);
// byte NativeRegExpMacroAssembler::word_character_bitmap
static ExternalReference re_word_character_map();
@@ -623,30 +664,39 @@ class ExternalReference BASE_EMBEDDED {
// This lets you register a function that rewrites all external references.
// Used by the ARM simulator to catch calls to external references.
- static void set_redirector(ExternalReferenceRedirector* redirector) {
- ASSERT(redirector_ == NULL); // We can't stack them.
- redirector_ = redirector;
+ static void set_redirector(Isolate* isolate,
+ ExternalReferenceRedirector* redirector) {
+ // We can't stack them.
+ ASSERT(isolate->external_reference_redirector() == NULL);
+ isolate->set_external_reference_redirector(
+ reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
}
private:
explicit ExternalReference(void* address)
: address_(address) {}
- static ExternalReferenceRedirector* redirector_;
-
- static void* Redirect(void* address,
+ static void* Redirect(Isolate* isolate,
+ void* address,
Type type = ExternalReference::BUILTIN_CALL) {
- if (redirector_ == NULL) return address;
- void* answer = (*redirector_)(address, type);
+ ExternalReferenceRedirector* redirector =
+ reinterpret_cast<ExternalReferenceRedirector*>(
+ isolate->external_reference_redirector());
+ if (redirector == NULL) return address;
+ void* answer = (*redirector)(address, type);
return answer;
}
- static void* Redirect(Address address_arg,
+ static void* Redirect(Isolate* isolate,
+ Address address_arg,
Type type = ExternalReference::BUILTIN_CALL) {
+ ExternalReferenceRedirector* redirector =
+ reinterpret_cast<ExternalReferenceRedirector*>(
+ isolate->external_reference_redirector());
void* address = reinterpret_cast<void*>(address_arg);
- void* answer = (redirector_ == NULL) ?
+ void* answer = (redirector == NULL) ?
address :
- (*redirector_)(address, type);
+ (*redirector)(address, type);
return answer;
}
@@ -785,6 +835,28 @@ static inline int NumberOfBitsSet(uint32_t x) {
double power_double_int(double x, int y);
double power_double_double(double x, double y);
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class CallWrapper {
+ public:
+ CallWrapper() { }
+ virtual ~CallWrapper() { }
+ // Called just before emitting a call. Argument is the size of the generated
+ // call code.
+ virtual void BeforeCall(int call_size) const = 0;
+ // Called just after emitting a call, i.e., at the return site for the call.
+ virtual void AfterCall() const = 0;
+};
+
+class NullCallWrapper : public CallWrapper {
+ public:
+ NullCallWrapper() { }
+ virtual ~NullCallWrapper() { }
+ virtual void BeforeCall(int call_size) const { }
+ virtual void AfterCall() const { }
+};
+
} } // namespace v8::internal
#endif // V8_ASSEMBLER_H_
diff --git a/deps/v8/src/ast-inl.h b/deps/v8/src/ast-inl.h
index eb81c3a83..c2bd61344 100644
--- a/deps/v8/src/ast-inl.h
+++ b/deps/v8/src/ast-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,7 +31,7 @@
#include "v8.h"
#include "ast.h"
-#include "jump-target-inl.h"
+#include "scopes.h"
namespace v8 {
namespace internal {
@@ -62,7 +62,7 @@ BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
IterationStatement::IterationStatement(ZoneStringList* labels)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
body_(NULL),
- continue_target_(JumpTarget::BIDIRECTIONAL),
+ continue_target_(),
osr_entry_id_(GetNextId()) {
}
@@ -102,6 +102,11 @@ ForInStatement::ForInStatement(ZoneStringList* labels)
}
+bool FunctionLiteral::strict_mode() const {
+ return scope()->is_strict_mode();
+}
+
+
} } // namespace v8::internal
#endif // V8_AST_INL_H_
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 772684cf9..8b6cdcee9 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,21 +28,21 @@
#include "v8.h"
#include "ast.h"
-#include "jump-target-inl.h"
#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
+#include "type-info.h"
namespace v8 {
namespace internal {
-unsigned AstNode::current_id_ = 0;
-unsigned AstNode::count_ = 0;
-VariableProxySentinel VariableProxySentinel::this_proxy_(true);
-VariableProxySentinel VariableProxySentinel::identifier_proxy_(false);
-ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
-Property Property::this_property_(VariableProxySentinel::this_proxy(), NULL, 0);
-Call Call::sentinel_(NULL, NULL, 0);
+AstSentinels::AstSentinels()
+ : this_proxy_(true),
+ identifier_proxy_(false),
+ valid_left_hand_side_sentinel_(),
+ this_property_(&this_proxy_, NULL, 0),
+ call_sentinel_(NULL, NULL, 0) {
+}
// ----------------------------------------------------------------------------
@@ -77,20 +77,23 @@ VariableProxy::VariableProxy(Variable* var)
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
inside_with_(false),
- is_trivial_(false) {
+ is_trivial_(false),
+ position_(RelocInfo::kNoPosition) {
BindTo(var);
}
VariableProxy::VariableProxy(Handle<String> name,
bool is_this,
- bool inside_with)
+ bool inside_with,
+ int position)
: name_(name),
var_(NULL),
is_this_(is_this),
inside_with_(inside_with),
- is_trivial_(false) {
- // names must be canonicalized for fast equality checks
+ is_trivial_(false),
+ position_(position) {
+ // Names must be canonicalized for fast equality checks.
ASSERT(name->IsSymbol());
}
@@ -170,7 +173,7 @@ ObjectLiteral::Property::Property(Literal* key, Expression* value) {
key_ = key;
value_ = value;
Object* k = *key->handle();
- if (k->IsSymbol() && Heap::Proto_symbol()->Equals(String::cast(k))) {
+ if (k->IsSymbol() && HEAP->Proto_symbol()->Equals(String::cast(k))) {
kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) {
kind_ = MATERIALIZED_LITERAL;
@@ -249,10 +252,11 @@ void ObjectLiteral::CalculateEmitStore() {
uint32_t hash;
HashMap* table;
void* key;
+ Factory* factory = Isolate::Current()->factory();
if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle));
if (name->AsArrayIndex(&hash)) {
- Handle<Object> key_handle = Factory::NewNumberFromUint(hash);
+ Handle<Object> key_handle = factory->NewNumberFromUint(hash);
key = key_handle.location();
table = &elements;
} else {
@@ -269,7 +273,7 @@ void ObjectLiteral::CalculateEmitStore() {
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str = DoubleToCString(num, buffer);
- Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
+ Handle<String> name = factory->NewStringFromAscii(CStrVector(str));
key = name.location();
hash = name->Hash();
table = &properties;
@@ -287,86 +291,13 @@ void ObjectLiteral::CalculateEmitStore() {
}
-void TargetCollector::AddTarget(BreakTarget* target) {
+void TargetCollector::AddTarget(Label* target) {
// Add the label to the collector, but discard duplicates.
- int length = targets_->length();
+ int length = targets_.length();
for (int i = 0; i < length; i++) {
- if (targets_->at(i) == target) return;
- }
- targets_->Add(target);
-}
-
-
-bool Expression::GuaranteedSmiResult() {
- BinaryOperation* node = AsBinaryOperation();
- if (node == NULL) return false;
- Token::Value op = node->op();
- switch (op) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- case Token::BIT_XOR:
- case Token::SHL:
- return false;
- break;
- case Token::BIT_OR:
- case Token::BIT_AND: {
- Literal* left = node->left()->AsLiteral();
- Literal* right = node->right()->AsLiteral();
- if (left != NULL && left->handle()->IsSmi()) {
- int value = Smi::cast(*left->handle())->value();
- if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
- // Result of bitwise or is always a negative Smi.
- return true;
- }
- if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
- // Result of bitwise and is always a positive Smi.
- return true;
- }
- }
- if (right != NULL && right->handle()->IsSmi()) {
- int value = Smi::cast(*right->handle())->value();
- if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
- // Result of bitwise or is always a negative Smi.
- return true;
- }
- if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
- // Result of bitwise and is always a positive Smi.
- return true;
- }
- }
- return false;
- break;
- }
- case Token::SAR:
- case Token::SHR: {
- Literal* right = node->right()->AsLiteral();
- if (right != NULL && right->handle()->IsSmi()) {
- int value = Smi::cast(*right->handle())->value();
- if ((value & 0x1F) > 1 ||
- (op == Token::SAR && (value & 0x1F) == 1)) {
- return true;
- }
- }
- return false;
- break;
- }
- default:
- UNREACHABLE();
- break;
+ if (targets_[i] == target) return;
}
- return false;
-}
-
-
-void Expression::CopyAnalysisResultsFrom(Expression* other) {
- bitfields_ = other->bitfields_;
- type_ = other->type_;
+ targets_.Add(target);
}
@@ -406,19 +337,192 @@ bool BinaryOperation::ResultOverwriteAllowed() {
}
-BinaryOperation::BinaryOperation(Assignment* assignment) {
- ASSERT(assignment->is_compound());
- op_ = assignment->binary_op();
- left_ = assignment->target();
- right_ = assignment->value();
- pos_ = assignment->position();
- CopyAnalysisResultsFrom(assignment);
+bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
+ Handle<String>* check) {
+ if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
+
+ UnaryOperation* left_unary = left_->AsUnaryOperation();
+ UnaryOperation* right_unary = right_->AsUnaryOperation();
+ Literal* left_literal = left_->AsLiteral();
+ Literal* right_literal = right_->AsLiteral();
+
+ // Check for the pattern: typeof <expression> == <string literal>.
+ if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
+ right_literal != NULL && right_literal->handle()->IsString()) {
+ *expr = left_unary->expression();
+ *check = Handle<String>::cast(right_literal->handle());
+ return true;
+ }
+
+ // Check for the pattern: <string literal> == typeof <expression>.
+ if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
+ left_literal != NULL && left_literal->handle()->IsString()) {
+ *expr = right_unary->expression();
+ *check = Handle<String>::cast(left_literal->handle());
+ return true;
+ }
+
+ return false;
+}
+
+
+bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
+ if (op_ != Token::EQ_STRICT) return false;
+
+ UnaryOperation* left_unary = left_->AsUnaryOperation();
+ UnaryOperation* right_unary = right_->AsUnaryOperation();
+
+ // Check for the pattern: <expression> === void <literal>.
+ if (right_unary != NULL && right_unary->op() == Token::VOID &&
+ right_unary->expression()->AsLiteral() != NULL) {
+ *expr = left_;
+ return true;
+ }
+
+ // Check for the pattern: void <literal> === <expression>.
+ if (left_unary != NULL && left_unary->op() == Token::VOID &&
+ left_unary->expression()->AsLiteral() != NULL) {
+ *expr = right_;
+ return true;
+ }
+
+ return false;
}
// ----------------------------------------------------------------------------
// Inlining support
+bool Declaration::IsInlineable() const {
+ return proxy()->var()->IsStackAllocated() && fun() == NULL;
+}
+
+
+bool TargetCollector::IsInlineable() const {
+ UNREACHABLE();
+ return false;
+}
+
+
+bool Slot::IsInlineable() const {
+ UNREACHABLE();
+ return false;
+}
+
+
+bool ForInStatement::IsInlineable() const {
+ return false;
+}
+
+
+bool EnterWithContextStatement::IsInlineable() const {
+ return false;
+}
+
+
+bool ExitContextStatement::IsInlineable() const {
+ return false;
+}
+
+
+bool SwitchStatement::IsInlineable() const {
+ return false;
+}
+
+
+bool TryStatement::IsInlineable() const {
+ return false;
+}
+
+
+bool TryCatchStatement::IsInlineable() const {
+ return false;
+}
+
+
+bool TryFinallyStatement::IsInlineable() const {
+ return false;
+}
+
+
+bool DebuggerStatement::IsInlineable() const {
+ return false;
+}
+
+
+bool Throw::IsInlineable() const {
+ return exception()->IsInlineable();
+}
+
+
+bool MaterializedLiteral::IsInlineable() const {
+ // TODO(1322): Allow materialized literals.
+ return false;
+}
+
+
+bool FunctionLiteral::IsInlineable() const {
+ // TODO(1322): Allow materialized literals.
+ return false;
+}
+
+
+bool ThisFunction::IsInlineable() const {
+ return false;
+}
+
+
+bool SharedFunctionInfoLiteral::IsInlineable() const {
+ return false;
+}
+
+
+bool ValidLeftHandSideSentinel::IsInlineable() const {
+ UNREACHABLE();
+ return false;
+}
+
+
+bool ForStatement::IsInlineable() const {
+ return (init() == NULL || init()->IsInlineable())
+ && (cond() == NULL || cond()->IsInlineable())
+ && (next() == NULL || next()->IsInlineable())
+ && body()->IsInlineable();
+}
+
+
+bool WhileStatement::IsInlineable() const {
+ return cond()->IsInlineable()
+ && body()->IsInlineable();
+}
+
+
+bool DoWhileStatement::IsInlineable() const {
+ return cond()->IsInlineable()
+ && body()->IsInlineable();
+}
+
+
+bool ContinueStatement::IsInlineable() const {
+ return true;
+}
+
+
+bool BreakStatement::IsInlineable() const {
+ return true;
+}
+
+
+bool EmptyStatement::IsInlineable() const {
+ return true;
+}
+
+
+bool Literal::IsInlineable() const {
+ return true;
+}
+
+
bool Block::IsInlineable() const {
const int count = statements_.length();
for (int i = 0; i < count; ++i) {
@@ -434,8 +538,9 @@ bool ExpressionStatement::IsInlineable() const {
bool IfStatement::IsInlineable() const {
- return condition()->IsInlineable() && then_statement()->IsInlineable() &&
- else_statement()->IsInlineable();
+ return condition()->IsInlineable()
+ && then_statement()->IsInlineable()
+ && else_statement()->IsInlineable();
}
@@ -486,6 +591,17 @@ bool CallNew::IsInlineable() const {
bool CallRuntime::IsInlineable() const {
+ // Don't try to inline JS runtime calls because we don't (currently) even
+ // optimize them.
+ if (is_jsruntime()) return false;
+ // Don't inline the %_ArgumentsLength or %_Arguments because their
+ // implementation will not work. There is no stack frame to get them
+ // from.
+ if (function()->intrinsic_type == Runtime::INLINE &&
+ (name()->IsEqualTo(CStrVector("_ArgumentsLength")) ||
+ name()->IsEqualTo(CStrVector("_Arguments")))) {
+ return false;
+ }
const int count = arguments()->length();
for (int i = 0; i < count; ++i) {
if (!arguments()->at(i)->IsInlineable()) return false;
@@ -524,14 +640,14 @@ bool CountOperation::IsInlineable() const {
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
// Record type feedback from the oracle in the AST.
- is_monomorphic_ = oracle->LoadIsMonomorphic(this);
+ is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
if (key()->IsPropertyName()) {
- if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) {
+ if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_ArrayLength)) {
is_array_length_ = true;
- } else if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_StringLength)) {
+ } else if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_StringLength)) {
is_string_length_ = true;
} else if (oracle->LoadIsBuiltin(this,
- Builtins::LoadIC_FunctionPrototype)) {
+ Builtins::kLoadIC_FunctionPrototype)) {
is_function_prototype_ = true;
} else {
Literal* lit_key = key()->AsLiteral();
@@ -540,8 +656,13 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
ZoneMapList* types = oracle->LoadReceiverTypes(this, name);
receiver_types_ = types;
}
+ } else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
+ is_string_access_ = true;
} else if (is_monomorphic_) {
monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this);
+ } else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
+ receiver_types_ = new ZoneMapList(kMaxKeyedPolymorphism);
+ oracle->CollectKeyedReceiverTypes(this->id(), receiver_types_);
}
}
@@ -549,7 +670,7 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
Property* prop = target()->AsProperty();
ASSERT(prop != NULL);
- is_monomorphic_ = oracle->StoreIsMonomorphic(this);
+ is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
ASSERT(lit_key != NULL && lit_key->handle()->IsString());
@@ -557,8 +678,23 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
ZoneMapList* types = oracle->StoreReceiverTypes(this, name);
receiver_types_ = types;
} else if (is_monomorphic_) {
- // Record receiver type for monomorphic keyed loads.
+ // Record receiver type for monomorphic keyed stores.
monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
+ } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
+ receiver_types_ = new ZoneMapList(kMaxKeyedPolymorphism);
+ oracle->CollectKeyedReceiverTypes(this->id(), receiver_types_);
+ }
+}
+
+
+void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
+ if (is_monomorphic_) {
+ // Record receiver type for monomorphic keyed stores.
+ monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
+ } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
+ receiver_types_ = new ZoneMapList(kMaxKeyedPolymorphism);
+ oracle->CollectKeyedReceiverTypes(this->id(), receiver_types_);
}
}
@@ -613,38 +749,36 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
- Handle<String> name) {
+ LookupResult* lookup) {
target_ = Handle<JSFunction>::null();
cell_ = Handle<JSGlobalPropertyCell>::null();
- LookupResult lookup;
- global->Lookup(*name, &lookup);
- if (lookup.IsProperty() &&
- lookup.type() == NORMAL &&
- lookup.holder() == *global) {
- cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(&lookup));
- if (cell_->value()->IsJSFunction()) {
- Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
- // If the function is in new space we assume it's more likely to
- // change and thus prefer the general IC code.
- if (!Heap::InNewSpace(*candidate) &&
- CanCallWithoutIC(candidate, arguments()->length())) {
- target_ = candidate;
- return true;
- }
+ ASSERT(lookup->IsProperty() &&
+ lookup->type() == NORMAL &&
+ lookup->holder() == *global);
+ cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup));
+ if (cell_->value()->IsJSFunction()) {
+ Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
+ // If the function is in new space we assume it's more likely to
+ // change and thus prefer the general IC code.
+ if (!HEAP->InNewSpace(*candidate) &&
+ CanCallWithoutIC(candidate, arguments()->length())) {
+ target_ = candidate;
+ return true;
}
}
return false;
}
-void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
+ CallKind call_kind) {
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
// Specialize for the receiver types seen at runtime.
Literal* key = property->key()->AsLiteral();
ASSERT(key != NULL && key->handle()->IsString());
Handle<String> name = Handle<String>::cast(key->handle());
- receiver_types_ = oracle->CallReceiverTypes(this, name);
+ receiver_types_ = oracle->CallReceiverTypes(this, name, call_kind);
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
if (receiver_types_ != NULL) {
@@ -691,7 +825,7 @@ void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
bool AstVisitor::CheckStackOverflow() {
if (stack_overflow_) return true;
- StackLimitCheck check;
+ StackLimitCheck check(isolate_);
if (!check.HasOverflowed()) return false;
return (stack_overflow_ = true);
}
@@ -1062,6 +1196,9 @@ CaseClause::CaseClause(Expression* label,
: label_(label),
statements_(statements),
position_(pos),
- compare_type_(NONE) {}
+ compare_type_(NONE),
+ compare_id_(AstNode::GetNextId()),
+ entry_id_(AstNode::GetNextId()) {
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 2aee5d72a..045404a49 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,10 +28,10 @@
#ifndef V8_AST_H_
#define V8_AST_H_
+#include "allocation.h"
#include "execution.h"
#include "factory.h"
#include "jsregexp.h"
-#include "jump-target.h"
#include "runtime.h"
#include "token.h"
#include "variables.h"
@@ -60,8 +60,8 @@ namespace internal {
V(ContinueStatement) \
V(BreakStatement) \
V(ReturnStatement) \
- V(WithEnterStatement) \
- V(WithExitStatement) \
+ V(EnterWithContextStatement) \
+ V(ExitContextStatement) \
V(SwitchStatement) \
V(DoWhileStatement) \
V(WhileStatement) \
@@ -80,7 +80,6 @@ namespace internal {
V(RegExpLiteral) \
V(ObjectLiteral) \
V(ArrayLiteral) \
- V(CatchExtensionObject) \
V(Assignment) \
V(Throw) \
V(Property) \
@@ -88,7 +87,6 @@ namespace internal {
V(CallNew) \
V(CallRuntime) \
V(UnaryOperation) \
- V(IncrementOperation) \
V(CountOperation) \
V(BinaryOperation) \
V(CompareOperation) \
@@ -134,8 +132,12 @@ class AstNode: public ZoneObject {
#undef DECLARE_TYPE_ENUM
static const int kNoNumber = -1;
+ static const int kFunctionEntryId = 2; // Using 0 could disguise errors.
- AstNode() : id_(GetNextId()) { count_++; }
+ AstNode() {
+ Isolate* isolate = Isolate::Current();
+ isolate->set_ast_node_count(isolate->ast_node_count() + 1);
+ }
virtual ~AstNode() { }
@@ -157,24 +159,21 @@ class AstNode: public ZoneObject {
virtual Slot* AsSlot() { return NULL; }
// True if the node is simple enough for us to inline calls containing it.
- virtual bool IsInlineable() const { return false; }
+ virtual bool IsInlineable() const = 0;
- static int Count() { return count_; }
- static void ResetIds() { current_id_ = 0; }
- unsigned id() const { return id_; }
+ static int Count() { return Isolate::Current()->ast_node_count(); }
+ static void ResetIds() { Isolate::Current()->set_ast_node_id(0); }
protected:
- static unsigned GetNextId() { return current_id_++; }
+ static unsigned GetNextId() { return ReserveIdRange(1); }
static unsigned ReserveIdRange(int n) {
- unsigned tmp = current_id_;
- current_id_ += n;
+ Isolate* isolate = Isolate::Current();
+ unsigned tmp = isolate->ast_node_id();
+ isolate->set_ast_node_id(tmp + n);
return tmp;
}
- private:
- static unsigned current_id_;
- static unsigned count_;
- unsigned id_;
+ friend class CaseClause; // Generates AST IDs.
};
@@ -211,7 +210,12 @@ class Expression: public AstNode {
kTest
};
- Expression() : bitfields_(0) {}
+ Expression() : id_(GetNextId()), test_id_(GetNextId()) {}
+
+ virtual int position() const {
+ UNREACHABLE();
+ return 0;
+ }
virtual Expression* AsExpression() { return this; }
@@ -257,70 +261,12 @@ class Expression: public AstNode {
return Handle<Map>();
}
- // Static type information for this expression.
- StaticType* type() { return &type_; }
-
- // True if the expression is a loop condition.
- bool is_loop_condition() const {
- return LoopConditionField::decode(bitfields_);
- }
- void set_is_loop_condition(bool flag) {
- bitfields_ = (bitfields_ & ~LoopConditionField::mask()) |
- LoopConditionField::encode(flag);
- }
-
- // The value of the expression is guaranteed to be a smi, because the
- // top operation is a bit operation with a mask, or a shift.
- bool GuaranteedSmiResult();
-
- // AST analysis results.
- void CopyAnalysisResultsFrom(Expression* other);
-
- // True if the expression rooted at this node can be compiled by the
- // side-effect free compiler.
- bool side_effect_free() { return SideEffectFreeField::decode(bitfields_); }
- void set_side_effect_free(bool is_side_effect_free) {
- bitfields_ &= ~SideEffectFreeField::mask();
- bitfields_ |= SideEffectFreeField::encode(is_side_effect_free);
- }
-
- // Will the use of this expression treat -0 the same as 0 in all cases?
- // If so, we can return 0 instead of -0 if we want to, to optimize code.
- bool no_negative_zero() { return NoNegativeZeroField::decode(bitfields_); }
- void set_no_negative_zero(bool no_negative_zero) {
- bitfields_ &= ~NoNegativeZeroField::mask();
- bitfields_ |= NoNegativeZeroField::encode(no_negative_zero);
- }
-
- // Will ToInt32 (ECMA 262-3 9.5) or ToUint32 (ECMA 262-3 9.6)
- // be applied to the value of this expression?
- // If so, we may be able to optimize the calculation of the value.
- bool to_int32() { return ToInt32Field::decode(bitfields_); }
- void set_to_int32(bool to_int32) {
- bitfields_ &= ~ToInt32Field::mask();
- bitfields_ |= ToInt32Field::encode(to_int32);
- }
-
- // How many bitwise logical or shift operators are used in this expression?
- int num_bit_ops() { return NumBitOpsField::decode(bitfields_); }
- void set_num_bit_ops(int num_bit_ops) {
- bitfields_ &= ~NumBitOpsField::mask();
- num_bit_ops = Min(num_bit_ops, kMaxNumBitOps);
- bitfields_ |= NumBitOpsField::encode(num_bit_ops);
- }
+ unsigned id() const { return id_; }
+ unsigned test_id() const { return test_id_; }
private:
- static const int kMaxNumBitOps = (1 << 5) - 1;
-
- uint32_t bitfields_;
- StaticType type_;
-
- // Using template BitField<type, start, size>.
- class SideEffectFreeField : public BitField<bool, 0, 1> {};
- class NoNegativeZeroField : public BitField<bool, 1, 1> {};
- class ToInt32Field : public BitField<bool, 2, 1> {};
- class NumBitOpsField : public BitField<int, 3, 5> {};
- class LoopConditionField: public BitField<bool, 8, 1> {};
+ unsigned id_;
+ unsigned test_id_;
};
@@ -333,10 +279,7 @@ class ValidLeftHandSideSentinel: public Expression {
public:
virtual bool IsValidLeftHandSide() { return true; }
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
- static ValidLeftHandSideSentinel* instance() { return &instance_; }
-
- private:
- static ValidLeftHandSideSentinel instance_;
+ virtual bool IsInlineable() const;
};
@@ -355,7 +298,7 @@ class BreakableStatement: public Statement {
virtual BreakableStatement* AsBreakableStatement() { return this; }
// Code generation
- BreakTarget* break_target() { return &break_target_; }
+ Label* break_target() { return &break_target_; }
// Testers.
bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
@@ -370,7 +313,7 @@ class BreakableStatement: public Statement {
private:
ZoneStringList* labels_;
Type type_;
- BreakTarget break_target_;
+ Label break_target_;
int entry_id_;
int exit_id_;
};
@@ -421,6 +364,7 @@ class Declaration: public AstNode {
VariableProxy* proxy() const { return proxy_; }
Variable::Mode mode() const { return mode_; }
FunctionLiteral* fun() const { return fun_; } // may be NULL
+ virtual bool IsInlineable() const;
private:
VariableProxy* proxy_;
@@ -441,7 +385,7 @@ class IterationStatement: public BreakableStatement {
virtual int ContinueId() const = 0;
// Code generation
- BreakTarget* continue_target() { return &continue_target_; }
+ Label* continue_target() { return &continue_target_; }
protected:
explicit inline IterationStatement(ZoneStringList* labels);
@@ -452,7 +396,7 @@ class IterationStatement: public BreakableStatement {
private:
Statement* body_;
- BreakTarget continue_target_;
+ Label continue_target_;
int osr_entry_id_;
};
@@ -479,6 +423,8 @@ class DoWhileStatement: public IterationStatement {
virtual int ContinueId() const { return continue_id_; }
int BackEdgeId() const { return back_edge_id_; }
+ virtual bool IsInlineable() const;
+
private:
Expression* cond_;
int condition_position_;
@@ -505,6 +451,7 @@ class WhileStatement: public IterationStatement {
void set_may_have_function_literal(bool value) {
may_have_function_literal_ = value;
}
+ virtual bool IsInlineable() const;
// Bailout support.
virtual int ContinueId() const { return EntryId(); }
@@ -552,6 +499,7 @@ class ForStatement: public IterationStatement {
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
Variable* loop_variable() { return loop_variable_; }
void set_loop_variable(Variable* var) { loop_variable_ = var; }
+ virtual bool IsInlineable() const;
private:
Statement* init_;
@@ -579,6 +527,7 @@ class ForInStatement: public IterationStatement {
Expression* each() const { return each_; }
Expression* enumerable() const { return enumerable_; }
+ virtual bool IsInlineable() const;
// Bailout support.
int AssignmentId() const { return assignment_id_; }
@@ -619,6 +568,7 @@ class ContinueStatement: public Statement {
DECLARE_NODE_TYPE(ContinueStatement)
IterationStatement* target() const { return target_; }
+ virtual bool IsInlineable() const;
private:
IterationStatement* target_;
@@ -633,6 +583,7 @@ class BreakStatement: public Statement {
DECLARE_NODE_TYPE(BreakStatement)
BreakableStatement* target() const { return target_; }
+ virtual bool IsInlineable() const;
private:
BreakableStatement* target_;
@@ -654,28 +605,27 @@ class ReturnStatement: public Statement {
};
-class WithEnterStatement: public Statement {
+class EnterWithContextStatement: public Statement {
public:
- explicit WithEnterStatement(Expression* expression, bool is_catch_block)
- : expression_(expression), is_catch_block_(is_catch_block) { }
+ explicit EnterWithContextStatement(Expression* expression)
+ : expression_(expression) { }
- DECLARE_NODE_TYPE(WithEnterStatement)
+ DECLARE_NODE_TYPE(EnterWithContextStatement)
Expression* expression() const { return expression_; }
- bool is_catch_block() const { return is_catch_block_; }
+ virtual bool IsInlineable() const;
private:
Expression* expression_;
- bool is_catch_block_;
};
-class WithExitStatement: public Statement {
+class ExitContextStatement: public Statement {
public:
- WithExitStatement() { }
+ virtual bool IsInlineable() const;
- DECLARE_NODE_TYPE(WithExitStatement)
+ DECLARE_NODE_TYPE(ExitContextStatement)
};
@@ -688,12 +638,15 @@ class CaseClause: public ZoneObject {
CHECK(!is_default());
return label_;
}
- JumpTarget* body_target() { return &body_target_; }
+ Label* body_target() { return &body_target_; }
ZoneList<Statement*>* statements() const { return statements_; }
- int position() { return position_; }
+ int position() const { return position_; }
void set_position(int pos) { position_ = pos; }
+ int EntryId() { return entry_id_; }
+ int CompareId() { return compare_id_; }
+
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
@@ -701,11 +654,13 @@ class CaseClause: public ZoneObject {
private:
Expression* label_;
- JumpTarget body_target_;
+ Label body_target_;
ZoneList<Statement*>* statements_;
int position_;
enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
CompareTypeFeedback compare_type_;
+ int compare_id_;
+ int entry_id_;
};
@@ -722,6 +677,7 @@ class SwitchStatement: public BreakableStatement {
Expression* tag() const { return tag_; }
ZoneList<CaseClause*>* cases() const { return cases_; }
+ virtual bool IsInlineable() const;
private:
Expression* tag_;
@@ -742,6 +698,7 @@ class IfStatement: public Statement {
: condition_(condition),
then_statement_(then_statement),
else_statement_(else_statement),
+ if_id_(GetNextId()),
then_id_(GetNextId()),
else_id_(GetNextId()) {
}
@@ -757,6 +714,7 @@ class IfStatement: public Statement {
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
+ int IfId() const { return if_id_; }
int ThenId() const { return then_id_; }
int ElseId() const { return else_id_; }
@@ -764,6 +722,7 @@ class IfStatement: public Statement {
Expression* condition_;
Statement* then_statement_;
Statement* else_statement_;
+ int if_id_;
int then_id_;
int else_id_;
};
@@ -773,23 +732,22 @@ class IfStatement: public Statement {
// stack in the compiler; this should probably be reworked.
class TargetCollector: public AstNode {
public:
- explicit TargetCollector(ZoneList<BreakTarget*>* targets)
- : targets_(targets) {
- }
+ TargetCollector(): targets_(0) { }
// Adds a jump target to the collector. The collector stores a pointer not
// a copy of the target to make binding work, so make sure not to pass in
// references to something on the stack.
- void AddTarget(BreakTarget* target);
+ void AddTarget(Label* target);
// Virtual behaviour. TargetCollectors are never part of the AST.
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
virtual TargetCollector* AsTargetCollector() { return this; }
- ZoneList<BreakTarget*>* targets() { return targets_; }
+ ZoneList<Label*>* targets() { return &targets_; }
+ virtual bool IsInlineable() const;
private:
- ZoneList<BreakTarget*>* targets_;
+ ZoneList<Label*> targets_;
};
@@ -798,36 +756,36 @@ class TryStatement: public Statement {
explicit TryStatement(Block* try_block)
: try_block_(try_block), escaping_targets_(NULL) { }
- void set_escaping_targets(ZoneList<BreakTarget*>* targets) {
+ void set_escaping_targets(ZoneList<Label*>* targets) {
escaping_targets_ = targets;
}
Block* try_block() const { return try_block_; }
- ZoneList<BreakTarget*>* escaping_targets() const { return escaping_targets_; }
+ ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
+ virtual bool IsInlineable() const;
private:
Block* try_block_;
- ZoneList<BreakTarget*>* escaping_targets_;
+ ZoneList<Label*>* escaping_targets_;
};
class TryCatchStatement: public TryStatement {
public:
- TryCatchStatement(Block* try_block,
- VariableProxy* catch_var,
- Block* catch_block)
+ TryCatchStatement(Block* try_block, Handle<String> name, Block* catch_block)
: TryStatement(try_block),
- catch_var_(catch_var),
+ name_(name),
catch_block_(catch_block) {
}
DECLARE_NODE_TYPE(TryCatchStatement)
- VariableProxy* catch_var() const { return catch_var_; }
Block* catch_block() const { return catch_block_; }
+ Handle<String> name() const { return name_; }
+ virtual bool IsInlineable() const;
private:
- VariableProxy* catch_var_;
+ Handle<String> name_;
Block* catch_block_;
};
@@ -841,6 +799,7 @@ class TryFinallyStatement: public TryStatement {
DECLARE_NODE_TYPE(TryFinallyStatement)
Block* finally_block() const { return finally_block_; }
+ virtual bool IsInlineable() const;
private:
Block* finally_block_;
@@ -850,6 +809,7 @@ class TryFinallyStatement: public TryStatement {
class DebuggerStatement: public Statement {
public:
DECLARE_NODE_TYPE(DebuggerStatement)
+ virtual bool IsInlineable() const;
};
@@ -857,7 +817,7 @@ class EmptyStatement: public Statement {
public:
DECLARE_NODE_TYPE(EmptyStatement)
- virtual bool IsInlineable() const { return true; }
+ virtual bool IsInlineable() const;
};
@@ -868,7 +828,6 @@ class Literal: public Expression {
DECLARE_NODE_TYPE(Literal)
virtual bool IsTrivial() { return true; }
- virtual bool IsInlineable() const { return true; }
virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
// Check if this literal is identical to the other literal.
@@ -893,13 +852,21 @@ class Literal: public Expression {
virtual bool ToBooleanIsFalse() { return handle_->ToBoolean()->IsFalse(); }
// Identity testers.
- bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
- bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
+ bool IsNull() const {
+ ASSERT(!handle_.is_null());
+ return handle_->IsNull();
+ }
+ bool IsTrue() const {
+ ASSERT(!handle_.is_null());
+ return handle_->IsTrue();
+ }
bool IsFalse() const {
- return handle_.is_identical_to(Factory::false_value());
+ ASSERT(!handle_.is_null());
+ return handle_->IsFalse();
}
Handle<Object> handle() const { return handle_; }
+ virtual bool IsInlineable() const;
private:
Handle<Object> handle_;
@@ -921,6 +888,7 @@ class MaterializedLiteral: public Expression {
bool is_simple() const { return is_simple_; }
int depth() const { return depth_; }
+ virtual bool IsInlineable() const;
private:
int literal_index_;
@@ -970,11 +938,13 @@ class ObjectLiteral: public MaterializedLiteral {
int literal_index,
bool is_simple,
bool fast_elements,
- int depth)
+ int depth,
+ bool has_function)
: MaterializedLiteral(literal_index, is_simple, depth),
constant_properties_(constant_properties),
properties_(properties),
- fast_elements_(fast_elements) {}
+ fast_elements_(fast_elements),
+ has_function_(has_function) {}
DECLARE_NODE_TYPE(ObjectLiteral)
@@ -985,16 +955,24 @@ class ObjectLiteral: public MaterializedLiteral {
bool fast_elements() const { return fast_elements_; }
+ bool has_function() { return has_function_; }
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
void CalculateEmitStore();
+ enum Flags {
+ kNoFlags = 0,
+ kFastElements = 1,
+ kHasFunction = 1 << 1
+ };
+
private:
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
bool fast_elements_;
+ bool has_function_;
};
@@ -1047,26 +1025,6 @@ class ArrayLiteral: public MaterializedLiteral {
};
-// Node for constructing a context extension object for a catch block.
-// The catch context extension object has one property, the catch
-// variable, which should be DontDelete.
-class CatchExtensionObject: public Expression {
- public:
- CatchExtensionObject(Literal* key, VariableProxy* value)
- : key_(key), value_(value) {
- }
-
- DECLARE_NODE_TYPE(CatchExtensionObject)
-
- Literal* key() const { return key_; }
- VariableProxy* value() const { return value_; }
-
- private:
- Literal* key_;
- VariableProxy* value_;
-};
-
-
class VariableProxy: public Expression {
public:
explicit VariableProxy(Variable* var);
@@ -1074,16 +1032,7 @@ class VariableProxy: public Expression {
DECLARE_NODE_TYPE(VariableProxy)
// Type testing & conversion
- virtual Property* AsProperty() {
- return var_ == NULL ? NULL : var_->AsProperty();
- }
-
- Variable* AsVariable() {
- if (this == NULL || var_ == NULL) return NULL;
- Expression* rewrite = var_->rewrite();
- if (rewrite == NULL || rewrite->AsSlot() != NULL) return var_;
- return NULL;
- }
+ Variable* AsVariable() { return (this == NULL) ? NULL : var_; }
virtual bool IsValidLeftHandSide() {
return var_ == NULL ? true : var_->IsValidLeftHandSide();
@@ -1110,6 +1059,7 @@ class VariableProxy: public Expression {
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
bool inside_with() const { return inside_with_; }
+ int position() const { return position_; }
void MarkAsTrivial() { is_trivial_ = true; }
@@ -1122,8 +1072,12 @@ class VariableProxy: public Expression {
bool is_this_;
bool inside_with_;
bool is_trivial_;
+ int position_;
- VariableProxy(Handle<String> name, bool is_this, bool inside_with);
+ VariableProxy(Handle<String> name,
+ bool is_this,
+ bool inside_with,
+ int position = RelocInfo::kNoPosition);
explicit VariableProxy(bool is_this);
friend class Scope;
@@ -1133,15 +1087,11 @@ class VariableProxy: public Expression {
class VariableProxySentinel: public VariableProxy {
public:
virtual bool IsValidLeftHandSide() { return !is_this(); }
- static VariableProxySentinel* this_proxy() { return &this_proxy_; }
- static VariableProxySentinel* identifier_proxy() {
- return &identifier_proxy_;
- }
private:
explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
- static VariableProxySentinel this_proxy_;
- static VariableProxySentinel identifier_proxy_;
+
+ friend class AstSentinels;
};
@@ -1185,6 +1135,7 @@ class Slot: public Expression {
Type type() const { return type_; }
int index() const { return index_; }
bool is_arguments() const { return var_->is_arguments(); }
+ virtual bool IsInlineable() const;
private:
Variable* var_;
@@ -1209,8 +1160,8 @@ class Property: public Expression {
is_monomorphic_(false),
is_array_length_(false),
is_string_length_(false),
- is_function_prototype_(false),
- is_arguments_access_(false) { }
+ is_string_access_(false),
+ is_function_prototype_(false) { }
DECLARE_NODE_TYPE(Property)
@@ -1219,19 +1170,13 @@ class Property: public Expression {
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
- int position() const { return pos_; }
+ virtual int position() const { return pos_; }
bool is_synthetic() const { return type_ == SYNTHETIC; }
bool IsStringLength() const { return is_string_length_; }
+ bool IsStringAccess() const { return is_string_access_; }
bool IsFunctionPrototype() const { return is_function_prototype_; }
- // Marks that this is actually an argument rewritten to a keyed property
- // accessing the argument through the arguments shadow object.
- void set_is_arguments_access(bool is_arguments_access) {
- is_arguments_access_ = is_arguments_access;
- }
- bool is_arguments_access() const { return is_arguments_access_; }
-
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() { return is_monomorphic_; }
@@ -1241,10 +1186,6 @@ class Property: public Expression {
return monomorphic_receiver_type_;
}
- // Returns a property singleton property access on 'this'. Used
- // during preparsing.
- static Property* this_property() { return &this_property_; }
-
private:
Expression* obj_;
Expression* key_;
@@ -1255,12 +1196,9 @@ class Property: public Expression {
bool is_monomorphic_ : 1;
bool is_array_length_ : 1;
bool is_string_length_ : 1;
+ bool is_string_access_ : 1;
bool is_function_prototype_ : 1;
- bool is_arguments_access_ : 1;
Handle<Map> monomorphic_receiver_type_;
-
- // Dummy property used during preparsing.
- static Property this_property_;
};
@@ -1282,9 +1220,10 @@ class Call: public Expression {
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- int position() { return pos_; }
+ virtual int position() const { return pos_; }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle,
+ CallKind call_kind);
virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
virtual bool IsMonomorphic() { return is_monomorphic_; }
CheckType check_type() const { return check_type_; }
@@ -1293,13 +1232,11 @@ class Call: public Expression {
Handle<JSGlobalPropertyCell> cell() { return cell_; }
bool ComputeTarget(Handle<Map> type, Handle<String> name);
- bool ComputeGlobalTarget(Handle<GlobalObject> global, Handle<String> name);
+ bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
// Bailout support.
int ReturnId() const { return return_id_; }
- static Call* sentinel() { return &sentinel_; }
-
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
bool return_is_recorded_;
@@ -1318,8 +1255,36 @@ class Call: public Expression {
Handle<JSGlobalPropertyCell> cell_;
int return_id_;
+};
+
+
+class AstSentinels {
+ public:
+ ~AstSentinels() { }
+
+ // Returns a property singleton property access on 'this'. Used
+ // during preparsing.
+ Property* this_property() { return &this_property_; }
+ VariableProxySentinel* this_proxy() { return &this_proxy_; }
+ VariableProxySentinel* identifier_proxy() { return &identifier_proxy_; }
+ ValidLeftHandSideSentinel* valid_left_hand_side_sentinel() {
+ return &valid_left_hand_side_sentinel_;
+ }
+ Call* call_sentinel() { return &call_sentinel_; }
+ EmptyStatement* empty_statement() { return &empty_statement_; }
+
+ private:
+ AstSentinels();
+ VariableProxySentinel this_proxy_;
+ VariableProxySentinel identifier_proxy_;
+ ValidLeftHandSideSentinel valid_left_hand_side_sentinel_;
+ Property this_property_;
+ Call call_sentinel_;
+ EmptyStatement empty_statement_;
+
+ friend class Isolate;
- static Call sentinel_;
+ DISALLOW_COPY_AND_ASSIGN(AstSentinels);
};
@@ -1334,7 +1299,7 @@ class CallNew: public Expression {
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- int position() { return pos_; }
+ virtual int position() const { return pos_; }
private:
Expression* expression_;
@@ -1350,7 +1315,7 @@ class CallNew: public Expression {
class CallRuntime: public Expression {
public:
CallRuntime(Handle<String> name,
- Runtime::Function* function,
+ const Runtime::Function* function,
ZoneList<Expression*>* arguments)
: name_(name), function_(function), arguments_(arguments) { }
@@ -1359,21 +1324,21 @@ class CallRuntime: public Expression {
virtual bool IsInlineable() const;
Handle<String> name() const { return name_; }
- Runtime::Function* function() const { return function_; }
+ const Runtime::Function* function() const { return function_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
bool is_jsruntime() const { return function_ == NULL; }
private:
Handle<String> name_;
- Runtime::Function* function_;
+ const Runtime::Function* function_;
ZoneList<Expression*>* arguments_;
};
class UnaryOperation: public Expression {
public:
- UnaryOperation(Token::Value op, Expression* expression)
- : op_(op), expression_(expression) {
+ UnaryOperation(Token::Value op, Expression* expression, int pos)
+ : op_(op), expression_(expression), pos_(pos) {
ASSERT(Token::IsUnaryOp(op));
}
@@ -1385,10 +1350,12 @@ class UnaryOperation: public Expression {
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
+ virtual int position() const { return pos_; }
private:
Token::Value op_;
Expression* expression_;
+ int pos_;
};
@@ -1405,9 +1372,6 @@ class BinaryOperation: public Expression {
: AstNode::kNoNumber;
}
- // Create the binary operation corresponding to a compound assignment.
- explicit BinaryOperation(Assignment* assignment);
-
DECLARE_NODE_TYPE(BinaryOperation)
virtual bool IsInlineable() const;
@@ -1417,7 +1381,7 @@ class BinaryOperation: public Expression {
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- int position() const { return pos_; }
+ virtual int position() const { return pos_; }
// Bailout support.
int RightId() const { return right_id_; }
@@ -1433,59 +1397,55 @@ class BinaryOperation: public Expression {
};
-class IncrementOperation: public Expression {
- public:
- IncrementOperation(Token::Value op, Expression* expr)
- : op_(op), expression_(expr) {
- ASSERT(Token::IsCountOp(op));
- }
-
- DECLARE_NODE_TYPE(IncrementOperation)
-
- Token::Value op() const { return op_; }
- bool is_increment() { return op_ == Token::INC; }
- Expression* expression() const { return expression_; }
-
- private:
- Token::Value op_;
- Expression* expression_;
- int pos_;
-};
-
-
class CountOperation: public Expression {
public:
- CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
- : is_prefix_(is_prefix), increment_(increment), pos_(pos),
- assignment_id_(GetNextId()) {
- }
+ CountOperation(Token::Value op, bool is_prefix, Expression* expr, int pos)
+ : op_(op),
+ is_prefix_(is_prefix),
+ expression_(expr),
+ pos_(pos),
+ assignment_id_(GetNextId()),
+ count_id_(GetNextId()),
+ receiver_types_(NULL) { }
DECLARE_NODE_TYPE(CountOperation)
bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; }
- Token::Value op() const { return increment_->op(); }
+ Token::Value op() const { return op_; }
Token::Value binary_op() {
return (op() == Token::INC) ? Token::ADD : Token::SUB;
}
- Expression* expression() const { return increment_->expression(); }
- IncrementOperation* increment() const { return increment_; }
- int position() const { return pos_; }
+ Expression* expression() const { return expression_; }
+ virtual int position() const { return pos_; }
virtual void MarkAsStatement() { is_prefix_ = true; }
virtual bool IsInlineable() const;
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ virtual bool IsMonomorphic() { return is_monomorphic_; }
+ virtual Handle<Map> GetMonomorphicReceiverType() {
+ return monomorphic_receiver_type_;
+ }
+ virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
+
// Bailout support.
int AssignmentId() const { return assignment_id_; }
+ int CountId() const { return count_id_; }
private:
+ Token::Value op_;
bool is_prefix_;
- IncrementOperation* increment_;
+ bool is_monomorphic_;
+ Expression* expression_;
int pos_;
int assignment_id_;
+ int count_id_;
+ Handle<Map> monomorphic_receiver_type_;
+ ZoneMapList* receiver_types_;
};
@@ -1504,7 +1464,7 @@ class CompareOperation: public Expression {
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- int position() const { return pos_; }
+ virtual int position() const { return pos_; }
virtual bool IsInlineable() const;
@@ -1513,6 +1473,10 @@ class CompareOperation: public Expression {
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
+ // Match special cases.
+ bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
+ bool IsLiteralCompareUndefined(Expression** expr);
+
private:
Token::Value op_;
Expression* left_;
@@ -1599,7 +1563,7 @@ class Assignment: public Expression {
Token::Value op() const { return op_; }
Expression* target() const { return target_; }
Expression* value() const { return value_; }
- int position() { return pos_; }
+ virtual int position() const { return pos_; }
BinaryOperation* binary_operation() const { return binary_operation_; }
// This check relies on the definition order of token in token.h.
@@ -1652,7 +1616,8 @@ class Throw: public Expression {
DECLARE_NODE_TYPE(Throw)
Expression* exception() const { return exception_; }
- int position() const { return pos_; }
+ virtual int position() const { return pos_; }
+ virtual bool IsInlineable() const;
private:
Expression* exception_;
@@ -1673,8 +1638,7 @@ class FunctionLiteral: public Expression {
int start_position,
int end_position,
bool is_expression,
- bool contains_loops,
- bool strict_mode)
+ bool has_duplicate_parameters)
: name_(name),
scope_(scope),
body_(body),
@@ -1686,13 +1650,12 @@ class FunctionLiteral: public Expression {
num_parameters_(num_parameters),
start_position_(start_position),
end_position_(end_position),
- is_expression_(is_expression),
- contains_loops_(contains_loops),
- strict_mode_(strict_mode),
function_token_position_(RelocInfo::kNoPosition),
- inferred_name_(Heap::empty_string()),
- try_full_codegen_(false),
- pretenure_(false) { }
+ inferred_name_(HEAP->empty_string()),
+ is_expression_(is_expression),
+ pretenure_(false),
+ has_duplicate_parameters_(has_duplicate_parameters) {
+ }
DECLARE_NODE_TYPE(FunctionLiteral)
@@ -1704,8 +1667,7 @@ class FunctionLiteral: public Expression {
int start_position() const { return start_position_; }
int end_position() const { return end_position_; }
bool is_expression() const { return is_expression_; }
- bool contains_loops() const { return contains_loops_; }
- bool strict_mode() const { return strict_mode_; }
+ bool strict_mode() const;
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
@@ -1729,11 +1691,11 @@ class FunctionLiteral: public Expression {
inferred_name_ = inferred_name;
}
- bool try_full_codegen() { return try_full_codegen_; }
- void set_try_full_codegen(bool flag) { try_full_codegen_ = flag; }
-
bool pretenure() { return pretenure_; }
void set_pretenure(bool value) { pretenure_ = value; }
+ virtual bool IsInlineable() const;
+
+ bool has_duplicate_parameters() { return has_duplicate_parameters_; }
private:
Handle<String> name_;
@@ -1746,13 +1708,11 @@ class FunctionLiteral: public Expression {
int num_parameters_;
int start_position_;
int end_position_;
- bool is_expression_;
- bool contains_loops_;
- bool strict_mode_;
int function_token_position_;
Handle<String> inferred_name_;
- bool try_full_codegen_;
+ bool is_expression_;
bool pretenure_;
+ bool has_duplicate_parameters_;
};
@@ -1767,6 +1727,7 @@ class SharedFunctionInfoLiteral: public Expression {
Handle<SharedFunctionInfo> shared_function_info() const {
return shared_function_info_;
}
+ virtual bool IsInlineable() const;
private:
Handle<SharedFunctionInfo> shared_function_info_;
@@ -1776,6 +1737,7 @@ class SharedFunctionInfoLiteral: public Expression {
class ThisFunction: public Expression {
public:
DECLARE_NODE_TYPE(ThisFunction)
+ virtual bool IsInlineable() const;
};
@@ -1944,6 +1906,7 @@ class RegExpCharacterClass: public RegExpTree {
uc16 standard_type() { return set_.standard_set_type(); }
ZoneList<CharacterRange>* ranges() { return set_.ranges(); }
bool is_negated() { return is_negated_; }
+
private:
CharacterSet set_;
bool is_negated_;
@@ -2028,6 +1991,7 @@ class RegExpQuantifier: public RegExpTree {
bool is_non_greedy() { return type_ == NON_GREEDY; }
bool is_greedy() { return type_ == GREEDY; }
RegExpTree* body() { return body_; }
+
private:
RegExpTree* body_;
int min_;
@@ -2060,6 +2024,7 @@ class RegExpCapture: public RegExpTree {
int index() { return index_; }
static int StartRegister(int index) { return index * 2; }
static int EndRegister(int index) { return index * 2 + 1; }
+
private:
RegExpTree* body_;
int index_;
@@ -2090,6 +2055,7 @@ class RegExpLookahead: public RegExpTree {
bool is_positive() { return is_positive_; }
int capture_count() { return capture_count_; }
int capture_from() { return capture_from_; }
+
private:
RegExpTree* body_;
bool is_positive_;
@@ -2138,7 +2104,7 @@ class RegExpEmpty: public RegExpTree {
class AstVisitor BASE_EMBEDDED {
public:
- AstVisitor() : stack_overflow_(false) { }
+ AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { }
virtual ~AstVisitor() { }
// Stack overflow check and dynamic dispatch.
@@ -2168,10 +2134,15 @@ class AstVisitor BASE_EMBEDDED {
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
+ protected:
+ Isolate* isolate() { return isolate_; }
+
private:
+ Isolate* isolate_;
bool stack_overflow_;
};
+
} } // namespace v8::internal
#endif // V8_AST_H_
diff --git a/deps/v8/src/atomicops.h b/deps/v8/src/atomicops.h
index 72a0d0fb5..e2057ed07 100644
--- a/deps/v8/src/atomicops.h
+++ b/deps/v8/src/atomicops.h
@@ -158,6 +158,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
#include "atomicops_internals_arm_gcc.h"
+#elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
+#include "atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
diff --git a/deps/v8/src/atomicops_internals_mips_gcc.h b/deps/v8/src/atomicops_internals_mips_gcc.h
new file mode 100644
index 000000000..5113de289
--- /dev/null
+++ b/deps/v8/src/atomicops_internals_mips_gcc.h
@@ -0,0 +1,169 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
+
+namespace v8 {
+namespace internal {
+
+// Atomically execute:
+// result = *ptr;
+// if (*ptr == old_value)
+// *ptr = new_value;
+// return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ __asm__ __volatile__("1:\n"
+ "ll %0, %1\n" // prev = *ptr
+ "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
+ "nop\n" // delay slot nop
+ "sc %2, %1\n" // *ptr = new_value (with atomic check)
+ "beqz %2, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ "2:\n"
+ : "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
+ : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+ : "memory");
+ return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 temp, old;
+ __asm__ __volatile__("1:\n"
+ "ll %1, %2\n" // old = *ptr
+ "move %0, %3\n" // temp = new_value
+ "sc %0, %2\n" // *ptr = temp (with atomic check)
+ "beqz %0, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+ : "r" (new_value), "m" (*ptr)
+ : "memory");
+
+ return old;
+}
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 temp, temp2;
+
+ __asm__ __volatile__("1:\n"
+ "ll %0, %2\n" // temp = *ptr
+ "addu %0, %3\n" // temp = temp + increment
+ "move %1, %0\n" // temp2 = temp
+ "sc %0, %2\n" // *ptr = temp (with atomic check)
+ "beqz %0, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+ : "Ir" (increment), "m" (*ptr)
+ : "memory");
+ // temp2 now holds the final value.
+ return temp2;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
+ ATOMICOPS_COMPILER_BARRIER();
+ return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ ATOMICOPS_COMPILER_BARRIER();
+ return x;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ ATOMICOPS_COMPILER_BARRIER();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void MemoryBarrier() {
+ ATOMICOPS_COMPILER_BARRIER();
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+} } // namespace v8::internal
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/deps/v8/src/atomicops_internals_x86_gcc.cc b/deps/v8/src/atomicops_internals_x86_gcc.cc
index a57256476..181c20247 100644
--- a/deps/v8/src/atomicops_internals_x86_gcc.cc
+++ b/deps/v8/src/atomicops_internals_x86_gcc.cc
@@ -57,6 +57,9 @@
#if defined(cpuid) // initialize the struct only on x86
+namespace v8 {
+namespace internal {
+
// Set the flags so that code will run correctly and conservatively, so even
// if we haven't been initialized yet, we're probably single threaded, and our
// default values should hopefully be pretty safe.
@@ -65,8 +68,14 @@ struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // no SSE2
};
+} } // namespace v8::internal
+
+namespace {
+
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
-static void AtomicOps_Internalx86CPUFeaturesInit() {
+void AtomicOps_Internalx86CPUFeaturesInit() {
+ using v8::internal::AtomicOps_Internalx86CPUFeatures;
+
uint32_t eax;
uint32_t ebx;
uint32_t ecx;
@@ -107,8 +116,6 @@ static void AtomicOps_Internalx86CPUFeaturesInit() {
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
}
-namespace {
-
class AtomicOpsx86Initializer {
public:
AtomicOpsx86Initializer() {
diff --git a/deps/v8/src/atomicops_internals_x86_gcc.h b/deps/v8/src/atomicops_internals_x86_gcc.h
index 3f17fa0dc..6e55b5018 100644
--- a/deps/v8/src/atomicops_internals_x86_gcc.h
+++ b/deps/v8/src/atomicops_internals_x86_gcc.h
@@ -30,6 +30,9 @@
#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_
+namespace v8 {
+namespace internal {
+
// This struct is not part of the public API of this module; clients may not
// use it.
// Features of this x86. Values may not be correct before main() is run,
@@ -43,9 +46,6 @@ extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-namespace v8 {
-namespace internal {
-
// 32-bit low-level operations on any platform.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 415d2dd8c..8e34b9cf5 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,100 +44,60 @@
namespace v8 {
namespace internal {
-// A SourceCodeCache uses a FixedArray to store pairs of
-// (AsciiString*, JSFunction*), mapping names of native code files
-// (runtime.js, etc.) to precompiled functions. Instead of mapping
-// names to functions it might make sense to let the JS2C tool
-// generate an index for each native JS file.
-class SourceCodeCache BASE_EMBEDDED {
- public:
- explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
-
- void Initialize(bool create_heap_objects) {
- cache_ = create_heap_objects ? Heap::empty_fixed_array() : NULL;
- }
-
- void Iterate(ObjectVisitor* v) {
- v->VisitPointer(BitCast<Object**>(&cache_));
- }
-
-
- bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
- for (int i = 0; i < cache_->length(); i+=2) {
- SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
- if (str->IsEqualTo(name)) {
- *handle = Handle<SharedFunctionInfo>(
- SharedFunctionInfo::cast(cache_->get(i + 1)));
- return true;
- }
- }
- return false;
- }
-
-
- void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
- HandleScope scope;
- int length = cache_->length();
- Handle<FixedArray> new_array =
- Factory::NewFixedArray(length + 2, TENURED);
- cache_->CopyTo(0, *new_array, 0, cache_->length());
- cache_ = *new_array;
- Handle<String> str = Factory::NewStringFromAscii(name, TENURED);
- cache_->set(length, *str);
- cache_->set(length + 1, *shared);
- Script::cast(shared->script())->set_type(Smi::FromInt(type_));
- }
-
- private:
- Script::Type type_;
- FixedArray* cache_;
- DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
-};
-
-static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
-// This is for delete, not delete[].
-static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
-// This is for delete[]
-static List<char*>* delete_these_arrays_on_tear_down = NULL;
-
-NativesExternalStringResource::NativesExternalStringResource(const char* source)
- : data_(source), length_(StrLength(source)) {
- if (delete_these_non_arrays_on_tear_down == NULL) {
- delete_these_non_arrays_on_tear_down = new List<char*>(2);
+NativesExternalStringResource::NativesExternalStringResource(
+ Bootstrapper* bootstrapper,
+ const char* source,
+ size_t length)
+ : data_(source), length_(length) {
+ if (bootstrapper->delete_these_non_arrays_on_tear_down_ == NULL) {
+ bootstrapper->delete_these_non_arrays_on_tear_down_ = new List<char*>(2);
}
// The resources are small objects and we only make a fixed number of
// them, but let's clean them up on exit for neatness.
- delete_these_non_arrays_on_tear_down->
+ bootstrapper->delete_these_non_arrays_on_tear_down_->
Add(reinterpret_cast<char*>(this));
}
+Bootstrapper::Bootstrapper()
+ : nesting_(0),
+ extensions_cache_(Script::TYPE_EXTENSION),
+ delete_these_non_arrays_on_tear_down_(NULL),
+ delete_these_arrays_on_tear_down_(NULL) {
+}
+
+
Handle<String> Bootstrapper::NativesSourceLookup(int index) {
ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
- if (Heap::natives_source_cache()->get(index)->IsUndefined()) {
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ if (heap->natives_source_cache()->get(index)->IsUndefined()) {
if (!Snapshot::IsEnabled() || FLAG_new_snapshot) {
// We can use external strings for the natives.
+ Vector<const char> source = Natives::GetRawScriptSource(index);
NativesExternalStringResource* resource =
- new NativesExternalStringResource(
- Natives::GetScriptSource(index).start());
+ new NativesExternalStringResource(this,
+ source.start(),
+ source.length());
Handle<String> source_code =
- Factory::NewExternalStringFromAscii(resource);
- Heap::natives_source_cache()->set(index, *source_code);
+ factory->NewExternalStringFromAscii(resource);
+ heap->natives_source_cache()->set(index, *source_code);
} else {
// Old snapshot code can't cope with external strings at all.
Handle<String> source_code =
- Factory::NewStringFromAscii(Natives::GetScriptSource(index));
- Heap::natives_source_cache()->set(index, *source_code);
+ factory->NewStringFromAscii(Natives::GetRawScriptSource(index));
+ heap->natives_source_cache()->set(index, *source_code);
}
}
- Handle<Object> cached_source(Heap::natives_source_cache()->get(index));
+ Handle<Object> cached_source(heap->natives_source_cache()->get(index));
return Handle<String>::cast(cached_source);
}
void Bootstrapper::Initialize(bool create_heap_objects) {
- extensions_cache.Initialize(create_heap_objects);
+ extensions_cache_.Initialize(create_heap_objects);
GCExtension::Register();
ExternalizeStringExtension::Register();
}
@@ -146,45 +106,46 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
char* memory = new char[bytes];
if (memory != NULL) {
- if (delete_these_arrays_on_tear_down == NULL) {
- delete_these_arrays_on_tear_down = new List<char*>(2);
+ if (delete_these_arrays_on_tear_down_ == NULL) {
+ delete_these_arrays_on_tear_down_ = new List<char*>(2);
}
- delete_these_arrays_on_tear_down->Add(memory);
+ delete_these_arrays_on_tear_down_->Add(memory);
}
return memory;
}
void Bootstrapper::TearDown() {
- if (delete_these_non_arrays_on_tear_down != NULL) {
- int len = delete_these_non_arrays_on_tear_down->length();
+ if (delete_these_non_arrays_on_tear_down_ != NULL) {
+ int len = delete_these_non_arrays_on_tear_down_->length();
ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
- delete delete_these_non_arrays_on_tear_down->at(i);
- delete_these_non_arrays_on_tear_down->at(i) = NULL;
+ delete delete_these_non_arrays_on_tear_down_->at(i);
+ delete_these_non_arrays_on_tear_down_->at(i) = NULL;
}
- delete delete_these_non_arrays_on_tear_down;
- delete_these_non_arrays_on_tear_down = NULL;
+ delete delete_these_non_arrays_on_tear_down_;
+ delete_these_non_arrays_on_tear_down_ = NULL;
}
- if (delete_these_arrays_on_tear_down != NULL) {
- int len = delete_these_arrays_on_tear_down->length();
+ if (delete_these_arrays_on_tear_down_ != NULL) {
+ int len = delete_these_arrays_on_tear_down_->length();
ASSERT(len < 1000); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
- delete[] delete_these_arrays_on_tear_down->at(i);
- delete_these_arrays_on_tear_down->at(i) = NULL;
+ delete[] delete_these_arrays_on_tear_down_->at(i);
+ delete_these_arrays_on_tear_down_->at(i) = NULL;
}
- delete delete_these_arrays_on_tear_down;
- delete_these_arrays_on_tear_down = NULL;
+ delete delete_these_arrays_on_tear_down_;
+ delete_these_arrays_on_tear_down_ = NULL;
}
- extensions_cache.Initialize(false); // Yes, symmetrical
+ extensions_cache_.Initialize(false); // Yes, symmetrical
}
class Genesis BASE_EMBEDDED {
public:
- Genesis(Handle<Object> global_object,
+ Genesis(Isolate* isolate,
+ Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions);
~Genesis() { }
@@ -193,8 +154,13 @@ class Genesis BASE_EMBEDDED {
Genesis* previous() { return previous_; }
+ Isolate* isolate() const { return isolate_; }
+ Factory* factory() const { return isolate_->factory(); }
+ Heap* heap() const { return isolate_->heap(); }
+
private:
Handle<Context> global_context_;
+ Isolate* isolate_;
// There may be more than one active genesis object: When GC is
// triggered during environment creation there may be weak handle
@@ -206,7 +172,11 @@ class Genesis BASE_EMBEDDED {
// Creates some basic objects. Used for creating a context from scratch.
void CreateRoots();
// Creates the empty function. Used for creating a context from scratch.
- Handle<JSFunction> CreateEmptyFunction();
+ Handle<JSFunction> CreateEmptyFunction(Isolate* isolate);
+ // Creates the ThrowTypeError function. ECMA 5th Ed. 13.2.3
+ Handle<JSFunction> GetThrowTypeErrorFunction();
+
+ void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
// Creates the global objects using the global and the template passed in
// through the API. We call this regardless of whether we are building a
// context from scratch or using a deserialized one from the partial snapshot
@@ -232,7 +202,9 @@ class Genesis BASE_EMBEDDED {
// Installs the contents of the native .js files on the global objects.
// Used for creating a context from scratch.
void InstallNativeFunctions();
+ void InstallExperimentalNativeFunctions();
bool InstallNatives();
+ bool InstallExperimentalNatives();
void InstallBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
void InitializeNormalizedMapCaches();
@@ -260,11 +232,26 @@ class Genesis BASE_EMBEDDED {
ADD_READONLY_PROTOTYPE,
ADD_WRITEABLE_PROTOTYPE
};
+
+ Handle<Map> CreateFunctionMap(PrototypePropertyMode prototype_mode);
+
Handle<DescriptorArray> ComputeFunctionInstanceDescriptor(
PrototypePropertyMode prototypeMode);
void MakeFunctionInstancePrototypeWritable();
- static bool CompileBuiltin(int index);
+ Handle<Map> CreateStrictModeFunctionMap(
+ PrototypePropertyMode prototype_mode,
+ Handle<JSFunction> empty_function,
+ Handle<FixedArray> arguments_callbacks,
+ Handle<FixedArray> caller_callbacks);
+
+ Handle<DescriptorArray> ComputeStrictFunctionInstanceDescriptor(
+ PrototypePropertyMode propertyMode,
+ Handle<FixedArray> arguments,
+ Handle<FixedArray> caller);
+
+ static bool CompileBuiltin(Isolate* isolate, int index);
+ static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
static bool CompileNative(Vector<const char> name, Handle<String> source);
static bool CompileScriptCached(Vector<const char> name,
Handle<String> source,
@@ -274,25 +261,34 @@ class Genesis BASE_EMBEDDED {
bool use_runtime_context);
Handle<Context> result_;
- Handle<JSFunction> empty_function_;
+
+ // Function instance maps. Function literal maps are created initially with
+ // a read only prototype for the processing of JS builtins. Later the function
+ // instance maps are replaced in order to make prototype writable.
+ // These are the final, writable prototype, maps.
+ Handle<Map> function_instance_map_writable_prototype_;
+ Handle<Map> strict_mode_function_instance_map_writable_prototype_;
+ Handle<JSFunction> throw_type_error_function;
+
BootstrapperActive active_;
friend class Bootstrapper;
};
void Bootstrapper::Iterate(ObjectVisitor* v) {
- extensions_cache.Iterate(v);
+ extensions_cache_.Iterate(v);
v->Synchronize("Extensions");
}
Handle<Context> Bootstrapper::CreateEnvironment(
+ Isolate* isolate,
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions) {
HandleScope scope;
Handle<Context> env;
- Genesis genesis(global_object, global_template, extensions);
+ Genesis genesis(isolate, global_object, global_template, extensions);
env = genesis.result();
if (!env.is_null()) {
if (InstallExtensions(env, extensions)) {
@@ -305,17 +301,19 @@ Handle<Context> Bootstrapper::CreateEnvironment(
static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
// object.__proto__ = proto;
+ Factory* factory = object->GetIsolate()->factory();
Handle<Map> old_to_map = Handle<Map>(object->map());
- Handle<Map> new_to_map = Factory::CopyMapDropTransitions(old_to_map);
+ Handle<Map> new_to_map = factory->CopyMapDropTransitions(old_to_map);
new_to_map->set_prototype(*proto);
object->set_map(*new_to_map);
}
void Bootstrapper::DetachGlobal(Handle<Context> env) {
- JSGlobalProxy::cast(env->global_proxy())->set_context(*Factory::null_value());
+ Factory* factory = env->GetIsolate()->factory();
+ JSGlobalProxy::cast(env->global_proxy())->set_context(*factory->null_value());
SetObjectPrototype(Handle<JSObject>(env->global_proxy()),
- Factory::null_value());
+ factory->null_value());
env->set_global_proxy(env->global());
env->global()->set_global_receiver(env->global());
}
@@ -339,11 +337,13 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
Handle<JSObject> prototype,
Builtins::Name call,
bool is_ecma_native) {
- Handle<String> symbol = Factory::LookupAsciiSymbol(name);
- Handle<Code> call_code = Handle<Code>(Builtins::builtin(call));
+ Isolate* isolate = target->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<String> symbol = factory->LookupAsciiSymbol(name);
+ Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
Handle<JSFunction> function = prototype.is_null() ?
- Factory::NewFunctionWithoutPrototype(symbol, call_code) :
- Factory::NewFunctionWithPrototype(symbol,
+ factory->NewFunctionWithoutPrototype(symbol, call_code) :
+ factory->NewFunctionWithPrototype(symbol,
type,
instance_size,
prototype,
@@ -359,161 +359,300 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
PrototypePropertyMode prototypeMode) {
- Handle<DescriptorArray> result = Factory::empty_descriptor_array();
+ Handle<DescriptorArray> descriptors =
+ factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
+ ? 4
+ : 5);
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ { // Add length.
+ Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
+ CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
+ descriptors->Set(0, &d);
+ }
+ { // Add name.
+ Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
+ CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
+ descriptors->Set(1, &d);
+ }
+ { // Add arguments.
+ Handle<Foreign> foreign =
+ factory()->NewForeign(&Accessors::FunctionArguments);
+ CallbacksDescriptor d(*factory()->arguments_symbol(), *foreign, attributes);
+ descriptors->Set(2, &d);
+ }
+ { // Add caller.
+ Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionCaller);
+ CallbacksDescriptor d(*factory()->caller_symbol(), *foreign, attributes);
+ descriptors->Set(3, &d);
+ }
if (prototypeMode != DONT_ADD_PROTOTYPE) {
- PropertyAttributes attributes = static_cast<PropertyAttributes>(
- DONT_ENUM |
- DONT_DELETE |
- (prototypeMode == ADD_READONLY_PROTOTYPE ? READ_ONLY : 0));
- result =
- Factory::CopyAppendProxyDescriptor(
- result,
- Factory::prototype_symbol(),
- Factory::NewProxy(&Accessors::FunctionPrototype),
- attributes);
+ // Add prototype.
+ if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
+ attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
+ }
+ Handle<Foreign> foreign =
+ factory()->NewForeign(&Accessors::FunctionPrototype);
+ CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
+ descriptors->Set(4, &d);
}
+ descriptors->Sort();
+ return descriptors;
+}
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- // Add length.
- result =
- Factory::CopyAppendProxyDescriptor(
- result,
- Factory::length_symbol(),
- Factory::NewProxy(&Accessors::FunctionLength),
- attributes);
-
- // Add name.
- result =
- Factory::CopyAppendProxyDescriptor(
- result,
- Factory::name_symbol(),
- Factory::NewProxy(&Accessors::FunctionName),
- attributes);
-
- // Add arguments.
- result =
- Factory::CopyAppendProxyDescriptor(
- result,
- Factory::arguments_symbol(),
- Factory::NewProxy(&Accessors::FunctionArguments),
- attributes);
-
- // Add caller.
- result =
- Factory::CopyAppendProxyDescriptor(
- result,
- Factory::caller_symbol(),
- Factory::NewProxy(&Accessors::FunctionCaller),
- attributes);
- return result;
+Handle<Map> Genesis::CreateFunctionMap(PrototypePropertyMode prototype_mode) {
+ Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ Handle<DescriptorArray> descriptors =
+ ComputeFunctionInstanceDescriptor(prototype_mode);
+ map->set_instance_descriptors(*descriptors);
+ map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
+ return map;
}
-Handle<JSFunction> Genesis::CreateEmptyFunction() {
- // Allocate the map for function instances.
- Handle<Map> fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- global_context()->set_function_instance_map(*fm);
+Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
+ // Allocate the map for function instances. Maps are allocated first and their
+ // prototypes patched later, once empty function is created.
+
// Please note that the prototype property for function instances must be
// writable.
- Handle<DescriptorArray> function_map_descriptors =
- ComputeFunctionInstanceDescriptor(ADD_WRITEABLE_PROTOTYPE);
- fm->set_instance_descriptors(*function_map_descriptors);
- fm->set_function_with_prototype(true);
+ Handle<Map> function_instance_map =
+ CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
+ global_context()->set_function_instance_map(*function_instance_map);
// Functions with this map will not have a 'prototype' property, and
// can not be used as constructors.
Handle<Map> function_without_prototype_map =
- Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ CreateFunctionMap(DONT_ADD_PROTOTYPE);
global_context()->set_function_without_prototype_map(
*function_without_prototype_map);
- Handle<DescriptorArray> function_without_prototype_map_descriptors =
- ComputeFunctionInstanceDescriptor(DONT_ADD_PROTOTYPE);
- function_without_prototype_map->set_instance_descriptors(
- *function_without_prototype_map_descriptors);
- function_without_prototype_map->set_function_with_prototype(false);
- // Allocate the function map first and then patch the prototype later
- fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- global_context()->set_function_map(*fm);
- function_map_descriptors =
- ComputeFunctionInstanceDescriptor(ADD_READONLY_PROTOTYPE);
- fm->set_instance_descriptors(*function_map_descriptors);
- fm->set_function_with_prototype(true);
+ // Allocate the function map. This map is temporary, used only for processing
+ // of builtins.
+ // Later the map is replaced with writable prototype map, allocated below.
+ Handle<Map> function_map = CreateFunctionMap(ADD_READONLY_PROTOTYPE);
+ global_context()->set_function_map(*function_map);
- Handle<String> object_name = Handle<String>(Heap::Object_symbol());
+ // The final map for functions. Writeable prototype.
+ // This map is installed in MakeFunctionInstancePrototypeWritable.
+ function_instance_map_writable_prototype_ =
+ CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
+
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+
+ Handle<String> object_name = Handle<String>(heap->Object_symbol());
{ // --- O b j e c t ---
Handle<JSFunction> object_fun =
- Factory::NewFunction(object_name, Factory::null_value());
+ factory->NewFunction(object_name, factory->null_value());
Handle<Map> object_function_map =
- Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
object_fun->set_initial_map(*object_function_map);
object_function_map->set_constructor(*object_fun);
global_context()->set_object_function(*object_fun);
// Allocate a new prototype for the object function.
- Handle<JSObject> prototype = Factory::NewJSObject(Top::object_function(),
- TENURED);
+ Handle<JSObject> prototype = factory->NewJSObject(
+ isolate->object_function(),
+ TENURED);
global_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
object_function_map->
- set_instance_descriptors(Heap::empty_descriptor_array());
+ set_instance_descriptors(heap->empty_descriptor_array());
}
// Allocate the empty function as the prototype for function ECMAScript
// 262 15.3.4.
- Handle<String> symbol = Factory::LookupAsciiSymbol("Empty");
+ Handle<String> symbol = factory->LookupAsciiSymbol("Empty");
Handle<JSFunction> empty_function =
- Factory::NewFunctionWithoutPrototype(symbol);
+ factory->NewFunctionWithoutPrototype(symbol, kNonStrictMode);
// --- E m p t y ---
Handle<Code> code =
- Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
+ Handle<Code>(isolate->builtins()->builtin(
+ Builtins::kEmptyFunction));
empty_function->set_code(*code);
empty_function->shared()->set_code(*code);
- Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
- Handle<Script> script = Factory::NewScript(source);
+ Handle<String> source = factory->NewStringFromAscii(CStrVector("() {}"));
+ Handle<Script> script = factory->NewScript(source);
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
empty_function->shared()->set_script(*script);
empty_function->shared()->set_start_position(0);
empty_function->shared()->set_end_position(source->length());
empty_function->shared()->DontAdaptArguments();
+
+ // Set prototypes for the function maps.
global_context()->function_map()->set_prototype(*empty_function);
global_context()->function_instance_map()->set_prototype(*empty_function);
global_context()->function_without_prototype_map()->
set_prototype(*empty_function);
+ function_instance_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
- Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(
+ Handle<Map> empty_fm = factory->CopyMapDropDescriptors(
function_without_prototype_map);
empty_fm->set_instance_descriptors(
- *function_without_prototype_map_descriptors);
+ function_without_prototype_map->instance_descriptors());
empty_fm->set_prototype(global_context()->object_function()->prototype());
empty_function->set_map(*empty_fm);
return empty_function;
}
+Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor(
+ PrototypePropertyMode prototypeMode,
+ Handle<FixedArray> arguments,
+ Handle<FixedArray> caller) {
+ Handle<DescriptorArray> descriptors =
+ factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
+ ? 4
+ : 5);
+ PropertyAttributes attributes = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+ { // length
+ Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
+ CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
+ descriptors->Set(0, &d);
+ }
+ { // name
+ Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
+ CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
+ descriptors->Set(1, &d);
+ }
+ { // arguments
+ CallbacksDescriptor d(*factory()->arguments_symbol(),
+ *arguments,
+ attributes);
+ descriptors->Set(2, &d);
+ }
+ { // caller
+ CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attributes);
+ descriptors->Set(3, &d);
+ }
+
+ // prototype
+ if (prototypeMode != DONT_ADD_PROTOTYPE) {
+ if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
+ attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
+ }
+ Handle<Foreign> foreign =
+ factory()->NewForeign(&Accessors::FunctionPrototype);
+ CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
+ descriptors->Set(4, &d);
+ }
+
+ descriptors->Sort();
+ return descriptors;
+}
+
+
+// ECMAScript 5th Edition, 13.2.3
+Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
+ if (throw_type_error_function.is_null()) {
+ Handle<String> name = factory()->LookupAsciiSymbol("ThrowTypeError");
+ throw_type_error_function =
+ factory()->NewFunctionWithoutPrototype(name, kNonStrictMode);
+ Handle<Code> code(isolate()->builtins()->builtin(
+ Builtins::kStrictModePoisonPill));
+ throw_type_error_function->set_map(
+ global_context()->function_map());
+ throw_type_error_function->set_code(*code);
+ throw_type_error_function->shared()->set_code(*code);
+ throw_type_error_function->shared()->DontAdaptArguments();
+
+ PreventExtensions(throw_type_error_function);
+ }
+ return throw_type_error_function;
+}
+
+
+Handle<Map> Genesis::CreateStrictModeFunctionMap(
+ PrototypePropertyMode prototype_mode,
+ Handle<JSFunction> empty_function,
+ Handle<FixedArray> arguments_callbacks,
+ Handle<FixedArray> caller_callbacks) {
+ Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ Handle<DescriptorArray> descriptors =
+ ComputeStrictFunctionInstanceDescriptor(prototype_mode,
+ arguments_callbacks,
+ caller_callbacks);
+ map->set_instance_descriptors(*descriptors);
+ map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
+ map->set_prototype(*empty_function);
+ return map;
+}
+
+
+void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
+ // Create the callbacks arrays for ThrowTypeError functions.
+ // The get/set callacks are filled in after the maps are created below.
+ Factory* factory = empty->GetIsolate()->factory();
+ Handle<FixedArray> arguments = factory->NewFixedArray(2, TENURED);
+ Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
+
+ // Allocate map for the strict mode function instances.
+ Handle<Map> strict_mode_function_instance_map =
+ CreateStrictModeFunctionMap(
+ ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
+ global_context()->set_strict_mode_function_instance_map(
+ *strict_mode_function_instance_map);
+
+ // Allocate map for the prototype-less strict mode instances.
+ Handle<Map> strict_mode_function_without_prototype_map =
+ CreateStrictModeFunctionMap(
+ DONT_ADD_PROTOTYPE, empty, arguments, caller);
+ global_context()->set_strict_mode_function_without_prototype_map(
+ *strict_mode_function_without_prototype_map);
+
+ // Allocate map for the strict mode functions. This map is temporary, used
+ // only for processing of builtins.
+ // Later the map is replaced with writable prototype map, allocated below.
+ Handle<Map> strict_mode_function_map =
+ CreateStrictModeFunctionMap(
+ ADD_READONLY_PROTOTYPE, empty, arguments, caller);
+ global_context()->set_strict_mode_function_map(
+ *strict_mode_function_map);
+
+ // The final map for the strict mode functions. Writeable prototype.
+ // This map is installed in MakeFunctionInstancePrototypeWritable.
+ strict_mode_function_instance_map_writable_prototype_ =
+ CreateStrictModeFunctionMap(
+ ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
+
+ // Create the ThrowTypeError function instance.
+ Handle<JSFunction> throw_function =
+ GetThrowTypeErrorFunction();
+
+ // Complete the callback fixed arrays.
+ arguments->set(0, *throw_function);
+ arguments->set(1, *throw_function);
+ caller->set(0, *throw_function);
+ caller->set(1, *throw_function);
+}
+
+
static void AddToWeakGlobalContextList(Context* context) {
ASSERT(context->IsGlobalContext());
+ Heap* heap = context->GetIsolate()->heap();
#ifdef DEBUG
{ // NOLINT
ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
// Check that context is not in the list yet.
- for (Object* current = Heap::global_contexts_list();
+ for (Object* current = heap->global_contexts_list();
!current->IsUndefined();
current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) {
ASSERT(current != context);
}
}
#endif
- context->set(Context::NEXT_CONTEXT_LINK, Heap::global_contexts_list());
- Heap::set_global_contexts_list(context);
+ context->set(Context::NEXT_CONTEXT_LINK, heap->global_contexts_list());
+ heap->set_global_contexts_list(context);
}
@@ -522,11 +661,10 @@ void Genesis::CreateRoots() {
// closure and extension object later (we need the empty function
// and the global object, but in order to create those, we need the
// global context).
- global_context_ =
- Handle<Context>::cast(
- GlobalHandles::Create(*Factory::NewGlobalContext()));
+ global_context_ = Handle<Context>::cast(isolate()->global_handles()->Create(
+ *factory()->NewGlobalContext()));
AddToWeakGlobalContextList(*global_context_);
- Top::set_context(*global_context());
+ isolate()->set_context(*global_context());
// Allocate the message listeners object.
{
@@ -570,29 +708,33 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
}
if (js_global_template.is_null()) {
- Handle<String> name = Handle<String>(Heap::empty_symbol());
- Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<String> name = Handle<String>(heap()->empty_symbol());
+ Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
+ Builtins::kIllegal));
js_global_function =
- Factory::NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
- JSGlobalObject::kSize, code, true);
+ factory()->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
+ JSGlobalObject::kSize, code, true);
// Change the constructor property of the prototype of the
// hidden global function to refer to the Object function.
Handle<JSObject> prototype =
Handle<JSObject>(
JSObject::cast(js_global_function->instance_prototype()));
SetLocalPropertyNoThrow(
- prototype, Factory::constructor_symbol(), Top::object_function(), NONE);
+ prototype,
+ factory()->constructor_symbol(),
+ isolate()->object_function(),
+ NONE);
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
FunctionTemplateInfo::cast(js_global_template->constructor()));
js_global_function =
- Factory::CreateApiFunction(js_global_constructor,
- Factory::InnerGlobalObject);
+ factory()->CreateApiFunction(js_global_constructor,
+ factory()->InnerGlobalObject);
}
js_global_function->initial_map()->set_is_hidden_prototype();
Handle<GlobalObject> inner_global =
- Factory::NewGlobalObject(js_global_function);
+ factory()->NewGlobalObject(js_global_function);
if (inner_global_out != NULL) {
*inner_global_out = inner_global;
}
@@ -600,22 +742,23 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
// Step 2: create or re-initialize the global proxy object.
Handle<JSFunction> global_proxy_function;
if (global_template.IsEmpty()) {
- Handle<String> name = Handle<String>(Heap::empty_symbol());
- Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<String> name = Handle<String>(heap()->empty_symbol());
+ Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
+ Builtins::kIllegal));
global_proxy_function =
- Factory::NewFunction(name, JS_GLOBAL_PROXY_TYPE,
- JSGlobalProxy::kSize, code, true);
+ factory()->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
+ JSGlobalProxy::kSize, code, true);
} else {
Handle<ObjectTemplateInfo> data =
v8::Utils::OpenHandle(*global_template);
Handle<FunctionTemplateInfo> global_constructor(
FunctionTemplateInfo::cast(data->constructor()));
global_proxy_function =
- Factory::CreateApiFunction(global_constructor,
- Factory::OuterGlobalObject);
+ factory()->CreateApiFunction(global_constructor,
+ factory()->OuterGlobalObject);
}
- Handle<String> global_name = Factory::LookupAsciiSymbol("global");
+ Handle<String> global_name = factory()->LookupAsciiSymbol("global");
global_proxy_function->shared()->set_instance_class_name(*global_name);
global_proxy_function->initial_map()->set_is_access_check_needed(true);
@@ -629,7 +772,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
Handle<JSGlobalProxy>::cast(global_object));
} else {
return Handle<JSGlobalProxy>::cast(
- Factory::NewJSObject(global_proxy_function, TENURED));
+ factory()->NewJSObject(global_proxy_function, TENURED));
}
}
@@ -654,7 +797,7 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
ForceSetProperty(builtins_global,
- Factory::LookupAsciiSymbol("global"),
+ factory()->LookupAsciiSymbol("global"),
inner_global,
attributes);
// Setup the reference from the global object to the builtins object.
@@ -671,7 +814,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// --- G l o b a l C o n t e x t ---
// Use the empty function as closure (no scope info).
global_context()->set_closure(*empty_function);
- global_context()->set_fcontext(*global_context());
global_context()->set_previous(NULL);
// Set extension and global object.
global_context()->set_extension(*inner_global);
@@ -682,33 +824,37 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// object reinitialization.
global_context()->set_security_token(*inner_global);
- Handle<String> object_name = Handle<String>(Heap::Object_symbol());
+ Isolate* isolate = inner_global->GetIsolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+
+ Handle<String> object_name = Handle<String>(heap->Object_symbol());
SetLocalPropertyNoThrow(inner_global, object_name,
- Top::object_function(), DONT_ENUM);
+ isolate->object_function(), DONT_ENUM);
Handle<JSObject> global = Handle<JSObject>(global_context()->global());
// Install global Function object
InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
- empty_function, Builtins::Illegal, true); // ECMA native.
+ empty_function, Builtins::kIllegal, true); // ECMA native.
{ // --- A r r a y ---
Handle<JSFunction> array_function =
InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
- Top::initial_object_prototype(), Builtins::ArrayCode,
- true);
+ isolate->initial_object_prototype(),
+ Builtins::kArrayCode, true);
array_function->shared()->set_construct_stub(
- Builtins::builtin(Builtins::ArrayConstructCode));
+ isolate->builtins()->builtin(Builtins::kArrayConstructCode));
array_function->shared()->DontAdaptArguments();
// This seems a bit hackish, but we need to make sure Array.length
// is 1.
array_function->shared()->set_length(1);
Handle<DescriptorArray> array_descriptors =
- Factory::CopyAppendProxyDescriptor(
- Factory::empty_descriptor_array(),
- Factory::length_symbol(),
- Factory::NewProxy(&Accessors::ArrayLength),
+ factory->CopyAppendForeignDescriptor(
+ factory->empty_descriptor_array(),
+ factory->length_symbol(),
+ factory->NewForeign(&Accessors::ArrayLength),
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
// Cache the fast JavaScript array map
@@ -725,33 +871,33 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
{ // --- N u m b e r ---
Handle<JSFunction> number_fun =
InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
- Top::initial_object_prototype(), Builtins::Illegal,
- true);
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true);
global_context()->set_number_function(*number_fun);
}
{ // --- B o o l e a n ---
Handle<JSFunction> boolean_fun =
InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
- Top::initial_object_prototype(), Builtins::Illegal,
- true);
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true);
global_context()->set_boolean_function(*boolean_fun);
}
{ // --- S t r i n g ---
Handle<JSFunction> string_fun =
InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
- Top::initial_object_prototype(), Builtins::Illegal,
- true);
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true);
string_fun->shared()->set_construct_stub(
- Builtins::builtin(Builtins::StringConstructCode));
+ isolate->builtins()->builtin(Builtins::kStringConstructCode));
global_context()->set_string_function(*string_fun);
// Add 'length' property to strings.
Handle<DescriptorArray> string_descriptors =
- Factory::CopyAppendProxyDescriptor(
- Factory::empty_descriptor_array(),
- Factory::length_symbol(),
- Factory::NewProxy(&Accessors::StringLength),
+ factory->CopyAppendForeignDescriptor(
+ factory->empty_descriptor_array(),
+ factory->length_symbol(),
+ factory->NewForeign(&Accessors::StringLength),
static_cast<PropertyAttributes>(DONT_ENUM |
DONT_DELETE |
READ_ONLY));
@@ -765,8 +911,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Builtin functions for Date.prototype.
Handle<JSFunction> date_fun =
InstallFunction(global, "Date", JS_VALUE_TYPE, JSValue::kSize,
- Top::initial_object_prototype(), Builtins::Illegal,
- true);
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true);
global_context()->set_date_function(*date_fun);
}
@@ -776,8 +922,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Builtin functions for RegExp.prototype.
Handle<JSFunction> regexp_fun =
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
- Top::initial_object_prototype(), Builtins::Illegal,
- true);
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true);
global_context()->set_regexp_function(*regexp_fun);
ASSERT(regexp_fun->has_initial_map());
@@ -785,13 +931,13 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
ASSERT_EQ(0, initial_map->inobject_properties());
- Handle<DescriptorArray> descriptors = Factory::NewDescriptorArray(5);
+ Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
PropertyAttributes final =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
int enum_index = 0;
{
// ECMA-262, section 15.10.7.1.
- FieldDescriptor field(Heap::source_symbol(),
+ FieldDescriptor field(heap->source_symbol(),
JSRegExp::kSourceFieldIndex,
final,
enum_index++);
@@ -799,7 +945,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
{
// ECMA-262, section 15.10.7.2.
- FieldDescriptor field(Heap::global_symbol(),
+ FieldDescriptor field(heap->global_symbol(),
JSRegExp::kGlobalFieldIndex,
final,
enum_index++);
@@ -807,7 +953,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
{
// ECMA-262, section 15.10.7.3.
- FieldDescriptor field(Heap::ignore_case_symbol(),
+ FieldDescriptor field(heap->ignore_case_symbol(),
JSRegExp::kIgnoreCaseFieldIndex,
final,
enum_index++);
@@ -815,7 +961,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
{
// ECMA-262, section 15.10.7.4.
- FieldDescriptor field(Heap::multiline_symbol(),
+ FieldDescriptor field(heap->multiline_symbol(),
JSRegExp::kMultilineFieldIndex,
final,
enum_index++);
@@ -825,7 +971,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// ECMA-262, section 15.10.7.5.
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- FieldDescriptor field(Heap::last_index_symbol(),
+ FieldDescriptor field(heap->last_index_symbol(),
JSRegExp::kLastIndexFieldIndex,
writable,
enum_index++);
@@ -844,13 +990,13 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
{ // -- J S O N
- Handle<String> name = Factory::NewStringFromAscii(CStrVector("JSON"));
- Handle<JSFunction> cons = Factory::NewFunction(
+ Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
+ Handle<JSFunction> cons = factory->NewFunction(
name,
- Factory::the_hole_value());
+ factory->the_hole_value());
cons->SetInstancePrototype(global_context()->initial_object_prototype());
cons->SetInstanceClassName(*name);
- Handle<JSObject> json_object = Factory::NewJSObject(cons, TENURED);
+ Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM);
global_context()->set_json_object(*json_object);
@@ -860,14 +1006,15 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
// class_name equals 'Arguments'.
- Handle<String> symbol = Factory::LookupAsciiSymbol("Arguments");
- Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<String> symbol = factory->LookupAsciiSymbol("Arguments");
+ Handle<Code> code = Handle<Code>(
+ isolate->builtins()->builtin(Builtins::kIllegal));
Handle<JSObject> prototype =
Handle<JSObject>(
JSObject::cast(global_context()->object_function()->prototype()));
Handle<JSFunction> function =
- Factory::NewFunctionWithPrototype(symbol,
+ factory->NewFunctionWithPrototype(symbol,
JS_OBJECT_TYPE,
JSObject::kHeaderSize,
prototype,
@@ -876,30 +1023,117 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
ASSERT(!function->has_initial_map());
function->shared()->set_instance_class_name(*symbol);
function->shared()->set_expected_nof_properties(2);
- Handle<JSObject> result = Factory::NewJSObject(function);
+ Handle<JSObject> result = factory->NewJSObject(function);
global_context()->set_arguments_boilerplate(*result);
- // Note: callee must be added as the first property and
- // length must be added as the second property.
- SetLocalPropertyNoThrow(result, Factory::callee_symbol(),
- Factory::undefined_value(),
+ // Note: length must be added as the first property and
+ // callee must be added as the second property.
+ SetLocalPropertyNoThrow(result, factory->length_symbol(),
+ factory->undefined_value(),
DONT_ENUM);
- SetLocalPropertyNoThrow(result, Factory::length_symbol(),
- Factory::undefined_value(),
+ SetLocalPropertyNoThrow(result, factory->callee_symbol(),
+ factory->undefined_value(),
DONT_ENUM);
#ifdef DEBUG
LookupResult lookup;
- result->LocalLookup(Heap::callee_symbol(), &lookup);
+ result->LocalLookup(heap->callee_symbol(), &lookup);
+ ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
+ ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
+
+ result->LocalLookup(heap->length_symbol(), &lookup);
ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
- ASSERT(lookup.GetFieldIndex() == Heap::arguments_callee_index);
+ ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
+
+ ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
+ ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
+
+ // Check the state of the object.
+ ASSERT(result->HasFastProperties());
+ ASSERT(result->HasFastElements());
+#endif
+ }
+
+ { // --- aliased_arguments_boilerplate_
+ Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
+ Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
+ new_map->set_pre_allocated_property_fields(2);
+ Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
+ new_map->set_elements_kind(JSObject::NON_STRICT_ARGUMENTS_ELEMENTS);
+ // Set up a well-formed parameter map to make assertions happy.
+ Handle<FixedArray> elements = factory->NewFixedArray(2);
+ elements->set_map(heap->non_strict_arguments_elements_map());
+ Handle<FixedArray> array;
+ array = factory->NewFixedArray(0);
+ elements->set(0, *array);
+ array = factory->NewFixedArray(0);
+ elements->set(1, *array);
+ result->set_elements(*elements);
+ global_context()->set_aliased_arguments_boilerplate(*result);
+ }
+
+ { // --- strict mode arguments boilerplate
+ const PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+ // Create the ThrowTypeError functions.
+ Handle<FixedArray> callee = factory->NewFixedArray(2, TENURED);
+ Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
+
+ Handle<JSFunction> throw_function =
+ GetThrowTypeErrorFunction();
+
+ // Install the ThrowTypeError functions.
+ callee->set(0, *throw_function);
+ callee->set(1, *throw_function);
+ caller->set(0, *throw_function);
+ caller->set(1, *throw_function);
+
+ // Create the descriptor array for the arguments object.
+ Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
+ { // length
+ FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
+ descriptors->Set(0, &d);
+ }
+ { // callee
+ CallbacksDescriptor d(*factory->callee_symbol(), *callee, attributes);
+ descriptors->Set(1, &d);
+ }
+ { // caller
+ CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
+ descriptors->Set(2, &d);
+ }
+ descriptors->Sort();
+
+ // Create the map. Allocate one in-object field for length.
+ Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
+ Heap::kArgumentsObjectSizeStrict);
+ map->set_instance_descriptors(*descriptors);
+ map->set_function_with_prototype(true);
+ map->set_prototype(global_context()->object_function()->prototype());
+ map->set_pre_allocated_property_fields(1);
+ map->set_inobject_properties(1);
+
+ // Copy constructor from the non-strict arguments boilerplate.
+ map->set_constructor(
+ global_context()->arguments_boilerplate()->map()->constructor());
+
+ // Allocate the arguments boilerplate object.
+ Handle<JSObject> result = factory->NewJSObjectFromMap(map);
+ global_context()->set_strict_mode_arguments_boilerplate(*result);
+
+ // Add length property only for strict mode boilerplate.
+ SetLocalPropertyNoThrow(result, factory->length_symbol(),
+ factory->undefined_value(),
+ DONT_ENUM);
- result->LocalLookup(Heap::length_symbol(), &lookup);
+#ifdef DEBUG
+ LookupResult lookup;
+ result->LocalLookup(heap->length_symbol(), &lookup);
ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
- ASSERT(lookup.GetFieldIndex() == Heap::arguments_length_index);
+ ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
- ASSERT(result->map()->inobject_properties() > Heap::arguments_callee_index);
- ASSERT(result->map()->inobject_properties() > Heap::arguments_length_index);
+ ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
// Check the state of the object.
ASSERT(result->HasFastProperties());
@@ -909,15 +1143,16 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
{ // --- context extension
// Create a function for the context extension objects.
- Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<Code> code = Handle<Code>(
+ isolate->builtins()->builtin(Builtins::kIllegal));
Handle<JSFunction> context_extension_fun =
- Factory::NewFunction(Factory::empty_symbol(),
+ factory->NewFunction(factory->empty_symbol(),
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JSObject::kHeaderSize,
code,
true);
- Handle<String> name = Factory::LookupAsciiSymbol("context_extension");
+ Handle<String> name = factory->LookupAsciiSymbol("context_extension");
context_extension_fun->shared()->set_instance_class_name(*name);
global_context()->set_context_extension_function(*context_extension_fun);
}
@@ -926,9 +1161,10 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
{
// Setup the call-as-function delegate.
Handle<Code> code =
- Handle<Code>(Builtins::builtin(Builtins::HandleApiCallAsFunction));
+ Handle<Code>(isolate->builtins()->builtin(
+ Builtins::kHandleApiCallAsFunction));
Handle<JSFunction> delegate =
- Factory::NewFunction(Factory::empty_symbol(), JS_OBJECT_TYPE,
+ factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
global_context()->set_call_as_function_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
@@ -937,44 +1173,57 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
{
// Setup the call-as-constructor delegate.
Handle<Code> code =
- Handle<Code>(Builtins::builtin(Builtins::HandleApiCallAsConstructor));
+ Handle<Code>(isolate->builtins()->builtin(
+ Builtins::kHandleApiCallAsConstructor));
Handle<JSFunction> delegate =
- Factory::NewFunction(Factory::empty_symbol(), JS_OBJECT_TYPE,
+ factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
global_context()->set_call_as_constructor_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
}
// Initialize the out of memory slot.
- global_context()->set_out_of_memory(Heap::false_value());
+ global_context()->set_out_of_memory(heap->false_value());
// Initialize the data slot.
- global_context()->set_data(Heap::undefined_value());
+ global_context()->set_data(heap->undefined_value());
}
-bool Genesis::CompileBuiltin(int index) {
+bool Genesis::CompileBuiltin(Isolate* isolate, int index) {
Vector<const char> name = Natives::GetScriptName(index);
- Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
+ Handle<String> source_code =
+ isolate->bootstrapper()->NativesSourceLookup(index);
+ return CompileNative(name, source_code);
+}
+
+
+bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
+ Vector<const char> name = ExperimentalNatives::GetScriptName(index);
+ Factory* factory = isolate->factory();
+ Handle<String> source_code =
+ factory->NewStringFromAscii(
+ ExperimentalNatives::GetRawScriptSource(index));
return CompileNative(name, source_code);
}
bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
HandleScope scope;
+ Isolate* isolate = source->GetIsolate();
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debugger::set_compiling_natives(true);
+ isolate->debugger()->set_compiling_natives(true);
#endif
bool result = CompileScriptCached(name,
source,
NULL,
NULL,
- Handle<Context>(Top::context()),
+ Handle<Context>(isolate->context()),
true);
- ASSERT(Top::has_pending_exception() != result);
- if (!result) Top::clear_pending_exception();
+ ASSERT(isolate->has_pending_exception() != result);
+ if (!result) isolate->clear_pending_exception();
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debugger::set_compiling_natives(false);
+ isolate->debugger()->set_compiling_natives(false);
#endif
return result;
}
@@ -986,6 +1235,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
v8::Extension* extension,
Handle<Context> top_context,
bool use_runtime_context) {
+ Factory* factory = source->GetIsolate()->factory();
HandleScope scope;
Handle<SharedFunctionInfo> function_info;
@@ -993,7 +1243,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
// function and insert it into the cache.
if (cache == NULL || !cache->Lookup(name, &function_info)) {
ASSERT(source->IsAsciiRepresentation());
- Handle<String> script_name = Factory::NewStringFromUtf8(name);
+ Handle<String> script_name = factory->NewStringFromUtf8(name);
function_info = Compiler::Compile(
source,
script_name,
@@ -1016,7 +1266,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
? Handle<Context>(top_context->runtime_context())
: top_context);
Handle<JSFunction> fun =
- Factory::NewFunctionFromSharedFunctionInfo(function_info, context);
+ factory->NewFunctionFromSharedFunctionInfo(function_info, context);
// Call function using either the runtime object or the global
// object as the receiver. Provide no parameters.
@@ -1025,17 +1275,19 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
? top_context->builtins()
: top_context->global());
bool has_pending_exception;
- Handle<Object> result =
- Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+ Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
if (has_pending_exception) return false;
return true;
}
-#define INSTALL_NATIVE(Type, name, var) \
- Handle<String> var##_name = Factory::LookupAsciiSymbol(name); \
- global_context()->set_##var(Type::cast( \
- global_context()->builtins()->GetPropertyNoExceptionThrown(*var##_name)));
+#define INSTALL_NATIVE(Type, name, var) \
+ Handle<String> var##_name = factory()->LookupAsciiSymbol(name); \
+ Object* var##_native = \
+ global_context()->builtins()->GetPropertyNoExceptionThrown( \
+ *var##_name); \
+ global_context()->set_##var(Type::cast(var##_native));
+
void Genesis::InstallNativeFunctions() {
HandleScope scope;
@@ -1055,6 +1307,13 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSObject, "functionCache", function_cache);
}
+void Genesis::InstallExperimentalNativeFunctions() {
+ if (FLAG_harmony_proxies) {
+ INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
+ INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
+ }
+}
+
#undef INSTALL_NATIVE
@@ -1064,17 +1323,19 @@ bool Genesis::InstallNatives() {
// Create a function for the builtins object. Allocate space for the
// JavaScript builtins, a reference to the builtins object
// (itself) and a reference to the global_context directly in the object.
- Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+ Handle<Code> code = Handle<Code>(
+ isolate()->builtins()->builtin(Builtins::kIllegal));
Handle<JSFunction> builtins_fun =
- Factory::NewFunction(Factory::empty_symbol(), JS_BUILTINS_OBJECT_TYPE,
- JSBuiltinsObject::kSize, code, true);
+ factory()->NewFunction(factory()->empty_symbol(),
+ JS_BUILTINS_OBJECT_TYPE,
+ JSBuiltinsObject::kSize, code, true);
- Handle<String> name = Factory::LookupAsciiSymbol("builtins");
+ Handle<String> name = factory()->LookupAsciiSymbol("builtins");
builtins_fun->shared()->set_instance_class_name(*name);
// Allocate the builtins object.
Handle<JSBuiltinsObject> builtins =
- Handle<JSBuiltinsObject>::cast(Factory::NewGlobalObject(builtins_fun));
+ Handle<JSBuiltinsObject>::cast(factory()->NewGlobalObject(builtins_fun));
builtins->set_builtins(*builtins);
builtins->set_global_context(*global_context());
builtins->set_global_receiver(*builtins);
@@ -1085,7 +1346,7 @@ bool Genesis::InstallNatives() {
// global object.
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- Handle<String> global_symbol = Factory::LookupAsciiSymbol("global");
+ Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
Handle<Object> global_obj(global_context()->global());
SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
@@ -1094,12 +1355,13 @@ bool Genesis::InstallNatives() {
// Create a bridge function that has context in the global context.
Handle<JSFunction> bridge =
- Factory::NewFunction(Factory::empty_symbol(), Factory::undefined_value());
- ASSERT(bridge->context() == *Top::global_context());
+ factory()->NewFunction(factory()->empty_symbol(),
+ factory()->undefined_value());
+ ASSERT(bridge->context() == *isolate()->global_context());
// Allocate the builtins context.
Handle<Context> context =
- Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
+ factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
context->set_global(*builtins); // override builtins global object
global_context()->set_runtime_context(*context);
@@ -1108,123 +1370,127 @@ bool Genesis::InstallNatives() {
// Builtin functions for Script.
Handle<JSFunction> script_fun =
InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
- Top::initial_object_prototype(), Builtins::Illegal,
- false);
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, false);
Handle<JSObject> prototype =
- Factory::NewJSObject(Top::object_function(), TENURED);
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(script_fun, prototype);
global_context()->set_script_function(*script_fun);
// Add 'source' and 'data' property to scripts.
PropertyAttributes common_attributes =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Handle<Proxy> proxy_source = Factory::NewProxy(&Accessors::ScriptSource);
+ Handle<Foreign> foreign_source =
+ factory()->NewForeign(&Accessors::ScriptSource);
Handle<DescriptorArray> script_descriptors =
- Factory::CopyAppendProxyDescriptor(
- Factory::empty_descriptor_array(),
- Factory::LookupAsciiSymbol("source"),
- proxy_source,
+ factory()->CopyAppendForeignDescriptor(
+ factory()->empty_descriptor_array(),
+ factory()->LookupAsciiSymbol("source"),
+ foreign_source,
common_attributes);
- Handle<Proxy> proxy_name = Factory::NewProxy(&Accessors::ScriptName);
+ Handle<Foreign> foreign_name =
+ factory()->NewForeign(&Accessors::ScriptName);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory()->CopyAppendForeignDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("name"),
- proxy_name,
+ factory()->LookupAsciiSymbol("name"),
+ foreign_name,
common_attributes);
- Handle<Proxy> proxy_id = Factory::NewProxy(&Accessors::ScriptId);
+ Handle<Foreign> foreign_id = factory()->NewForeign(&Accessors::ScriptId);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory()->CopyAppendForeignDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("id"),
- proxy_id,
+ factory()->LookupAsciiSymbol("id"),
+ foreign_id,
common_attributes);
- Handle<Proxy> proxy_line_offset =
- Factory::NewProxy(&Accessors::ScriptLineOffset);
+ Handle<Foreign> foreign_line_offset =
+ factory()->NewForeign(&Accessors::ScriptLineOffset);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory()->CopyAppendForeignDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("line_offset"),
- proxy_line_offset,
+ factory()->LookupAsciiSymbol("line_offset"),
+ foreign_line_offset,
common_attributes);
- Handle<Proxy> proxy_column_offset =
- Factory::NewProxy(&Accessors::ScriptColumnOffset);
+ Handle<Foreign> foreign_column_offset =
+ factory()->NewForeign(&Accessors::ScriptColumnOffset);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory()->CopyAppendForeignDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("column_offset"),
- proxy_column_offset,
+ factory()->LookupAsciiSymbol("column_offset"),
+ foreign_column_offset,
common_attributes);
- Handle<Proxy> proxy_data = Factory::NewProxy(&Accessors::ScriptData);
+ Handle<Foreign> foreign_data =
+ factory()->NewForeign(&Accessors::ScriptData);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory()->CopyAppendForeignDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("data"),
- proxy_data,
+ factory()->LookupAsciiSymbol("data"),
+ foreign_data,
common_attributes);
- Handle<Proxy> proxy_type = Factory::NewProxy(&Accessors::ScriptType);
+ Handle<Foreign> foreign_type =
+ factory()->NewForeign(&Accessors::ScriptType);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory()->CopyAppendForeignDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("type"),
- proxy_type,
+ factory()->LookupAsciiSymbol("type"),
+ foreign_type,
common_attributes);
- Handle<Proxy> proxy_compilation_type =
- Factory::NewProxy(&Accessors::ScriptCompilationType);
+ Handle<Foreign> foreign_compilation_type =
+ factory()->NewForeign(&Accessors::ScriptCompilationType);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory()->CopyAppendForeignDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("compilation_type"),
- proxy_compilation_type,
+ factory()->LookupAsciiSymbol("compilation_type"),
+ foreign_compilation_type,
common_attributes);
- Handle<Proxy> proxy_line_ends =
- Factory::NewProxy(&Accessors::ScriptLineEnds);
+ Handle<Foreign> foreign_line_ends =
+ factory()->NewForeign(&Accessors::ScriptLineEnds);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory()->CopyAppendForeignDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("line_ends"),
- proxy_line_ends,
+ factory()->LookupAsciiSymbol("line_ends"),
+ foreign_line_ends,
common_attributes);
- Handle<Proxy> proxy_context_data =
- Factory::NewProxy(&Accessors::ScriptContextData);
+ Handle<Foreign> foreign_context_data =
+ factory()->NewForeign(&Accessors::ScriptContextData);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory()->CopyAppendForeignDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("context_data"),
- proxy_context_data,
+ factory()->LookupAsciiSymbol("context_data"),
+ foreign_context_data,
common_attributes);
- Handle<Proxy> proxy_eval_from_script =
- Factory::NewProxy(&Accessors::ScriptEvalFromScript);
+ Handle<Foreign> foreign_eval_from_script =
+ factory()->NewForeign(&Accessors::ScriptEvalFromScript);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory()->CopyAppendForeignDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("eval_from_script"),
- proxy_eval_from_script,
+ factory()->LookupAsciiSymbol("eval_from_script"),
+ foreign_eval_from_script,
common_attributes);
- Handle<Proxy> proxy_eval_from_script_position =
- Factory::NewProxy(&Accessors::ScriptEvalFromScriptPosition);
+ Handle<Foreign> foreign_eval_from_script_position =
+ factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory()->CopyAppendForeignDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("eval_from_script_position"),
- proxy_eval_from_script_position,
+ factory()->LookupAsciiSymbol("eval_from_script_position"),
+ foreign_eval_from_script_position,
common_attributes);
- Handle<Proxy> proxy_eval_from_function_name =
- Factory::NewProxy(&Accessors::ScriptEvalFromFunctionName);
+ Handle<Foreign> foreign_eval_from_function_name =
+ factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName);
script_descriptors =
- Factory::CopyAppendProxyDescriptor(
+ factory()->CopyAppendForeignDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("eval_from_function_name"),
- proxy_eval_from_function_name,
+ factory()->LookupAsciiSymbol("eval_from_function_name"),
+ foreign_eval_from_function_name,
common_attributes);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
script_map->set_instance_descriptors(*script_descriptors);
// Allocate the empty script.
- Handle<Script> script = Factory::NewScript(Factory::empty_string());
+ Handle<Script> script = factory()->NewScript(factory()->empty_string());
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- Heap::public_set_empty_script(*script);
+ heap()->public_set_empty_script(*script);
}
{
// Builtin function for OpaqueReference -- a JSValue-based object,
@@ -1232,14 +1498,52 @@ bool Genesis::InstallNatives() {
// objects, that JavaScript code may not access.
Handle<JSFunction> opaque_reference_fun =
InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
- JSValue::kSize, Top::initial_object_prototype(),
- Builtins::Illegal, false);
+ JSValue::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, false);
Handle<JSObject> prototype =
- Factory::NewJSObject(Top::object_function(), TENURED);
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(opaque_reference_fun, prototype);
global_context()->set_opaque_reference_function(*opaque_reference_fun);
}
+ { // --- I n t e r n a l A r r a y ---
+ // An array constructor on the builtins object that works like
+ // the public Array constructor, except that its prototype
+ // doesn't inherit from Object.prototype.
+ // To be used only for internal work by builtins. Instances
+ // must not be leaked to user code.
+ // Only works correctly when called as a constructor. The normal
+ // Array code uses Array.prototype as prototype when called as
+ // a function.
+ Handle<JSFunction> array_function =
+ InstallFunction(builtins,
+ "InternalArray",
+ JS_ARRAY_TYPE,
+ JSArray::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kArrayCode,
+ true);
+ Handle<JSObject> prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ SetPrototype(array_function, prototype);
+
+ array_function->shared()->set_construct_stub(
+ isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
+ array_function->shared()->DontAdaptArguments();
+
+ // Make "length" magic on instances.
+ Handle<DescriptorArray> array_descriptors =
+ factory()->CopyAppendForeignDescriptor(
+ factory()->empty_descriptor_array(),
+ factory()->length_symbol(),
+ factory()->NewForeign(&Accessors::ArrayLength),
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
+
+ array_function->initial_map()->set_instance_descriptors(
+ *array_descriptors);
+ }
+
if (FLAG_disable_native_files) {
PrintF("Warning: Running without installed natives!\n");
return true;
@@ -1249,8 +1553,7 @@ bool Genesis::InstallNatives() {
for (int i = Natives::GetDebuggerCount();
i < Natives::GetBuiltinsCount();
i++) {
- Vector<const char> name = Natives::GetScriptName(i);
- if (!CompileBuiltin(i)) return false;
+ if (!CompileBuiltin(isolate(), i)) return false;
// TODO(ager): We really only need to install the JS builtin
// functions on the builtins object after compiling and running
// runtime.js.
@@ -1270,9 +1573,9 @@ bool Genesis::InstallNatives() {
InstallBuiltinFunctionIds();
// Install Function.prototype.call and apply.
- { Handle<String> key = Factory::function_class_symbol();
+ { Handle<String> key = factory()->function_class_symbol();
Handle<JSFunction> function =
- Handle<JSFunction>::cast(GetProperty(Top::global(), key));
+ Handle<JSFunction>::cast(GetProperty(isolate()->global(), key));
Handle<JSObject> proto =
Handle<JSObject>(JSObject::cast(function->instance_prototype()));
@@ -1280,12 +1583,12 @@ bool Genesis::InstallNatives() {
Handle<JSFunction> call =
InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize,
Handle<JSObject>::null(),
- Builtins::FunctionCall,
+ Builtins::kFunctionCall,
false);
Handle<JSFunction> apply =
InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
Handle<JSObject>::null(),
- Builtins::FunctionApply,
+ Builtins::kFunctionApply,
false);
// Make sure that Function.prototype.call appears to be compiled.
@@ -1314,7 +1617,7 @@ bool Genesis::InstallNatives() {
// Add initial map.
Handle<Map> initial_map =
- Factory::NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
+ factory()->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
initial_map->set_constructor(*array_constructor);
// Set prototype on map.
@@ -1328,13 +1631,13 @@ bool Genesis::InstallNatives() {
ASSERT_EQ(1, array_descriptors->number_of_descriptors());
Handle<DescriptorArray> reresult_descriptors =
- Factory::NewDescriptorArray(3);
+ factory()->NewDescriptorArray(3);
reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
int enum_index = 0;
{
- FieldDescriptor index_field(Heap::index_symbol(),
+ FieldDescriptor index_field(heap()->index_symbol(),
JSRegExpResult::kIndexIndex,
NONE,
enum_index++);
@@ -1342,7 +1645,7 @@ bool Genesis::InstallNatives() {
}
{
- FieldDescriptor input_field(Heap::input_symbol(),
+ FieldDescriptor input_field(heap()->input_symbol(),
JSRegExpResult::kInputIndex,
NONE,
enum_index++);
@@ -1358,6 +1661,7 @@ bool Genesis::InstallNatives() {
global_context()->set_regexp_result_map(*initial_map);
}
+
#ifdef DEBUG
builtins->Verify();
#endif
@@ -1366,20 +1670,38 @@ bool Genesis::InstallNatives() {
}
+bool Genesis::InstallExperimentalNatives() {
+ for (int i = ExperimentalNatives::GetDebuggerCount();
+ i < ExperimentalNatives::GetBuiltinsCount();
+ i++) {
+ if (FLAG_harmony_proxies &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native proxy.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
+ }
+
+ InstallExperimentalNativeFunctions();
+
+ return true;
+}
+
+
static Handle<JSObject> ResolveBuiltinIdHolder(
Handle<Context> global_context,
const char* holder_expr) {
+ Factory* factory = global_context->GetIsolate()->factory();
Handle<GlobalObject> global(global_context->global());
const char* period_pos = strchr(holder_expr, '.');
if (period_pos == NULL) {
return Handle<JSObject>::cast(
- GetProperty(global, Factory::LookupAsciiSymbol(holder_expr)));
+ GetProperty(global, factory->LookupAsciiSymbol(holder_expr)));
}
ASSERT_EQ(".prototype", period_pos);
Vector<const char> property(holder_expr,
static_cast<int>(period_pos - holder_expr));
Handle<JSFunction> function = Handle<JSFunction>::cast(
- GetProperty(global, Factory::LookupSymbol(property)));
+ GetProperty(global, factory->LookupSymbol(property)));
return Handle<JSObject>(JSObject::cast(function->prototype()));
}
@@ -1387,7 +1709,8 @@ static Handle<JSObject> ResolveBuiltinIdHolder(
static void InstallBuiltinFunctionId(Handle<JSObject> holder,
const char* function_name,
BuiltinFunctionId id) {
- Handle<String> name = Factory::LookupAsciiSymbol(function_name);
+ Factory* factory = holder->GetIsolate()->factory();
+ Handle<String> name = factory->LookupAsciiSymbol(function_name);
Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
Handle<JSFunction> function(JSFunction::cast(function_object));
function->shared()->set_function_data(Smi::FromInt(id));
@@ -1414,13 +1737,14 @@ void Genesis::InstallBuiltinFunctionIds() {
F(16, global_context()->regexp_function())
-static FixedArray* CreateCache(int size, JSFunction* factory) {
+static FixedArray* CreateCache(int size, Handle<JSFunction> factory_function) {
+ Factory* factory = factory_function->GetIsolate()->factory();
// Caches are supposed to live for a long time, allocate in old space.
int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size;
// Cannot use cast as object is not fully initialized yet.
JSFunctionResultCache* cache = reinterpret_cast<JSFunctionResultCache*>(
- *Factory::NewFixedArrayWithHoles(array_size, TENURED));
- cache->set(JSFunctionResultCache::kFactoryIndex, factory);
+ *factory->NewFixedArrayWithHoles(array_size, TENURED));
+ cache->set(JSFunctionResultCache::kFactoryIndex, *factory_function);
cache->MakeZeroSize();
return cache;
}
@@ -1433,13 +1757,13 @@ void Genesis::InstallJSFunctionResultCaches() {
#undef F
;
- Handle<FixedArray> caches = Factory::NewFixedArray(kNumberOfCaches, TENURED);
+ Handle<FixedArray> caches = FACTORY->NewFixedArray(kNumberOfCaches, TENURED);
int index = 0;
-#define F(size, func) do { \
- FixedArray* cache = CreateCache((size), (func)); \
- caches->set(index++, cache); \
+#define F(size, func) do { \
+ FixedArray* cache = CreateCache((size), Handle<JSFunction>(func)); \
+ caches->set(index++, cache); \
} while (false)
JSFUNCTION_RESULT_CACHE_LIST(F);
@@ -1452,19 +1776,17 @@ void Genesis::InstallJSFunctionResultCaches() {
void Genesis::InitializeNormalizedMapCaches() {
Handle<FixedArray> array(
- Factory::NewFixedArray(NormalizedMapCache::kEntries, TENURED));
+ FACTORY->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
global_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
}
-int BootstrapperActive::nesting_ = 0;
-
-
bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
v8::ExtensionConfiguration* extensions) {
+ Isolate* isolate = global_context->GetIsolate();
BootstrapperActive active;
- SaveContext saved_context;
- Top::set_context(*global_context);
+ SaveContext saved_context(isolate);
+ isolate->set_context(*global_context);
if (!Genesis::InstallExtensions(global_context, extensions)) return false;
Genesis::InstallSpecialObjects(global_context);
return true;
@@ -1472,20 +1794,21 @@ bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
+ Factory* factory = global_context->GetIsolate()->factory();
HandleScope scope;
Handle<JSGlobalObject> js_global(
JSGlobalObject::cast(global_context->global()));
// Expose the natives in global if a name for it is specified.
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives_string =
- Factory::LookupAsciiSymbol(FLAG_expose_natives_as);
+ factory->LookupAsciiSymbol(FLAG_expose_natives_as);
SetLocalPropertyNoThrow(js_global, natives_string,
Handle<JSObject>(js_global->builtins()), DONT_ENUM);
}
Handle<Object> Error = GetProperty(js_global, "Error");
if (Error->IsJSObject()) {
- Handle<String> name = Factory::LookupAsciiSymbol("stackTraceLimit");
+ Handle<String> name = factory->LookupAsciiSymbol("stackTraceLimit");
SetLocalPropertyNoThrow(Handle<JSObject>::cast(Error),
name,
Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
@@ -1495,18 +1818,19 @@ void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
+ Debug* debug = Isolate::Current()->debug();
// If loading fails we just bail out without installing the
// debugger but without tanking the whole context.
- if (!Debug::Load()) return;
+ if (!debug->Load()) return;
// Set the security token for the debugger context to the same as
// the shell global context to allow calling between these (otherwise
// exposing debug global object doesn't make much sense).
- Debug::debug_context()->set_security_token(
+ debug->debug_context()->set_security_token(
global_context->security_token());
Handle<String> debug_string =
- Factory::LookupAsciiSymbol(FLAG_expose_debug_as);
- Handle<Object> global_proxy(Debug::debug_context()->global_proxy());
+ factory->LookupAsciiSymbol(FLAG_expose_debug_as);
+ Handle<Object> global_proxy(debug->debug_context()->global_proxy());
SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM);
}
#endif
@@ -1515,6 +1839,10 @@ void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
bool Genesis::InstallExtensions(Handle<Context> global_context,
v8::ExtensionConfiguration* extensions) {
+ // TODO(isolates): Extensions on multiple isolates may take a little more
+ // effort. (The external API reads 'ignore'-- does that mean
+ // we can break the interface?)
+
// Clear coloring of extension list
v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
while (current != NULL) {
@@ -1582,17 +1910,18 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
for (int i = 0; i < extension->dependency_count(); i++) {
if (!InstallExtension(extension->dependencies()[i])) return false;
}
+ Isolate* isolate = Isolate::Current();
Vector<const char> source = CStrVector(extension->source());
- Handle<String> source_code = Factory::NewStringFromAscii(source);
+ Handle<String> source_code = isolate->factory()->NewStringFromAscii(source);
bool result = CompileScriptCached(CStrVector(extension->name()),
source_code,
- &extensions_cache,
+ isolate->bootstrapper()->extensions_cache(),
extension,
- Handle<Context>(Top::context()),
+ Handle<Context>(isolate->context()),
false);
- ASSERT(Top::has_pending_exception() != result);
+ ASSERT(isolate->has_pending_exception() != result);
if (!result) {
- Top::clear_pending_exception();
+ isolate->clear_pending_exception();
}
current->set_state(v8::INSTALLED);
return result;
@@ -1601,9 +1930,10 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
HandleScope scope;
+ Factory* factory = builtins->GetIsolate()->factory();
for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
- Handle<String> name = Factory::LookupAsciiSymbol(Builtins::GetName(id));
+ Handle<String> name = factory->LookupAsciiSymbol(Builtins::GetName(id));
Object* function_object = builtins->GetPropertyNoExceptionThrown(*name);
Handle<JSFunction> function
= Handle<JSFunction>(JSFunction::cast(function_object));
@@ -1656,8 +1986,8 @@ bool Genesis::ConfigureApiObject(Handle<JSObject> object,
Handle<JSObject> obj =
Execution::InstantiateObject(object_template, &pending_exception);
if (pending_exception) {
- ASSERT(Top::has_pending_exception());
- Top::clear_pending_exception();
+ ASSERT(isolate()->has_pending_exception());
+ isolate()->clear_pending_exception();
return false;
}
TransferObject(obj, object);
@@ -1705,14 +2035,16 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
break;
}
case MAP_TRANSITION:
+ case EXTERNAL_ARRAY_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
// Ignore non-properties.
break;
case NORMAL:
// Do not occur since the from object has fast properties.
+ case HANDLER:
case INTERCEPTOR:
- // No element in instance descriptors have interceptor type.
+ // No element in instance descriptors have proxy or interceptor type.
UNREACHABLE();
break;
}
@@ -1748,13 +2080,14 @@ void Genesis::TransferIndexedProperties(Handle<JSObject> from,
// Cloning the elements array is sufficient.
Handle<FixedArray> from_elements =
Handle<FixedArray>(FixedArray::cast(from->elements()));
- Handle<FixedArray> to_elements = Factory::CopyFixedArray(from_elements);
+ Handle<FixedArray> to_elements = FACTORY->CopyFixedArray(from_elements);
to->set_elements(*to_elements);
}
void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
HandleScope outer;
+ Factory* factory = from->GetIsolate()->factory();
ASSERT(!from->IsJSArray());
ASSERT(!to->IsJSArray());
@@ -1764,29 +2097,31 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
// Transfer the prototype (new map is needed).
Handle<Map> old_to_map = Handle<Map>(to->map());
- Handle<Map> new_to_map = Factory::CopyMapDropTransitions(old_to_map);
+ Handle<Map> new_to_map = factory->CopyMapDropTransitions(old_to_map);
new_to_map->set_prototype(from->map()->prototype());
to->set_map(*new_to_map);
}
void Genesis::MakeFunctionInstancePrototypeWritable() {
- // Make a new function map so all future functions
- // will have settable and enumerable prototype properties.
- HandleScope scope;
-
- Handle<DescriptorArray> function_map_descriptors =
- ComputeFunctionInstanceDescriptor(ADD_WRITEABLE_PROTOTYPE);
- Handle<Map> fm = Factory::CopyMapDropDescriptors(Top::function_map());
- fm->set_instance_descriptors(*function_map_descriptors);
- fm->set_function_with_prototype(true);
- Top::context()->global_context()->set_function_map(*fm);
+ // The maps with writable prototype are created in CreateEmptyFunction
+ // and CreateStrictModeFunctionMaps respectively. Initially the maps are
+ // created with read-only prototype for JS builtins processing.
+ ASSERT(!function_instance_map_writable_prototype_.is_null());
+ ASSERT(!strict_mode_function_instance_map_writable_prototype_.is_null());
+
+ // Replace function instance maps to make prototype writable.
+ global_context()->set_function_map(
+ *function_instance_map_writable_prototype_);
+ global_context()->set_strict_mode_function_map(
+ *strict_mode_function_instance_map_writable_prototype_);
}
-Genesis::Genesis(Handle<Object> global_object,
+Genesis::Genesis(Isolate* isolate,
+ Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
- v8::ExtensionConfiguration* extensions) {
+ v8::ExtensionConfiguration* extensions) : isolate_(isolate) {
result_ = Handle<Context>::null();
// If V8 isn't running and cannot be initialized, just return.
if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
@@ -1794,18 +2129,15 @@ Genesis::Genesis(Handle<Object> global_object,
// Before creating the roots we must save the context and restore it
// on all function exits.
HandleScope scope;
- SaveContext saved_context;
+ SaveContext saved_context(isolate);
Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
if (!new_context.is_null()) {
global_context_ =
- Handle<Context>::cast(GlobalHandles::Create(*new_context));
+ Handle<Context>::cast(isolate->global_handles()->Create(*new_context));
AddToWeakGlobalContextList(*global_context_);
- Top::set_context(*global_context_);
- i::Counters::contexts_created_by_snapshot.Increment();
- JSFunction* empty_function =
- JSFunction::cast(global_context_->function_map()->prototype());
- empty_function_ = Handle<JSFunction>(empty_function);
+ isolate->set_context(*global_context_);
+ isolate->counters()->contexts_created_by_snapshot()->Increment();
Handle<GlobalObject> inner_global;
Handle<JSGlobalProxy> global_proxy =
CreateNewGlobals(global_template,
@@ -1819,7 +2151,8 @@ Genesis::Genesis(Handle<Object> global_object,
} else {
// We get here if there was no context snapshot.
CreateRoots();
- Handle<JSFunction> empty_function = CreateEmptyFunction();
+ Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
+ CreateStrictModeFunctionMaps(empty_function);
Handle<GlobalObject> inner_global;
Handle<JSGlobalProxy> global_proxy =
CreateNewGlobals(global_template, global_object, &inner_global);
@@ -1832,9 +2165,12 @@ Genesis::Genesis(Handle<Object> global_object,
MakeFunctionInstancePrototypeWritable();
if (!ConfigureGlobalObjects(global_template)) return;
- i::Counters::contexts_created_from_scratch.Increment();
+ isolate->counters()->contexts_created_from_scratch()->Increment();
}
+ // Install experimental natives.
+ if (!InstallExperimentalNatives()) return;
+
result_ = global_context_;
}
@@ -1843,46 +2179,28 @@ Genesis::Genesis(Handle<Object> global_object,
// Reserve space for statics needing saving and restoring.
int Bootstrapper::ArchiveSpacePerThread() {
- return BootstrapperActive::ArchiveSpacePerThread();
+ return sizeof(NestingCounterType);
}
// Archive statics that are thread local.
char* Bootstrapper::ArchiveState(char* to) {
- return BootstrapperActive::ArchiveState(to);
+ *reinterpret_cast<NestingCounterType*>(to) = nesting_;
+ nesting_ = 0;
+ return to + sizeof(NestingCounterType);
}
// Restore statics that are thread local.
char* Bootstrapper::RestoreState(char* from) {
- return BootstrapperActive::RestoreState(from);
+ nesting_ = *reinterpret_cast<NestingCounterType*>(from);
+ return from + sizeof(NestingCounterType);
}
// Called when the top-level V8 mutex is destroyed.
void Bootstrapper::FreeThreadResources() {
- ASSERT(!BootstrapperActive::IsActive());
-}
-
-
-// Reserve space for statics needing saving and restoring.
-int BootstrapperActive::ArchiveSpacePerThread() {
- return sizeof(nesting_);
-}
-
-
-// Archive statics that are thread local.
-char* BootstrapperActive::ArchiveState(char* to) {
- *reinterpret_cast<int*>(to) = nesting_;
- nesting_ = 0;
- return to + sizeof(nesting_);
-}
-
-
-// Restore statics that are thread local.
-char* BootstrapperActive::RestoreState(char* from) {
- nesting_ = *reinterpret_cast<int*>(from);
- return from + sizeof(nesting_);
+ ASSERT(!IsActive());
}
} } // namespace v8::internal
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 2b789e28e..abf61b9fe 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -29,77 +29,148 @@
#ifndef V8_BOOTSTRAPPER_H_
#define V8_BOOTSTRAPPER_H_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
-class BootstrapperActive BASE_EMBEDDED {
+// A SourceCodeCache uses a FixedArray to store pairs of
+// (AsciiString*, JSFunction*), mapping names of native code files
+// (runtime.js, etc.) to precompiled functions. Instead of mapping
+// names to functions it might make sense to let the JS2C tool
+// generate an index for each native JS file.
+class SourceCodeCache BASE_EMBEDDED {
public:
- BootstrapperActive() { nesting_++; }
- ~BootstrapperActive() { nesting_--; }
+ explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
- // Support for thread preemption.
- static int ArchiveSpacePerThread();
- static char* ArchiveState(char* to);
- static char* RestoreState(char* from);
+ void Initialize(bool create_heap_objects) {
+ cache_ = create_heap_objects ? HEAP->empty_fixed_array() : NULL;
+ }
+
+ void Iterate(ObjectVisitor* v) {
+ v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
+ }
+
+ bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
+ for (int i = 0; i < cache_->length(); i+=2) {
+ SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
+ if (str->IsEqualTo(name)) {
+ *handle = Handle<SharedFunctionInfo>(
+ SharedFunctionInfo::cast(cache_->get(i + 1)));
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
+ HandleScope scope;
+ int length = cache_->length();
+ Handle<FixedArray> new_array =
+ FACTORY->NewFixedArray(length + 2, TENURED);
+ cache_->CopyTo(0, *new_array, 0, cache_->length());
+ cache_ = *new_array;
+ Handle<String> str = FACTORY->NewStringFromAscii(name, TENURED);
+ cache_->set(length, *str);
+ cache_->set(length + 1, *shared);
+ Script::cast(shared->script())->set_type(Smi::FromInt(type_));
+ }
private:
- static bool IsActive() { return nesting_ != 0; }
- static int nesting_;
- friend class Bootstrapper;
+ Script::Type type_;
+ FixedArray* cache_;
+ DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
};
// The Boostrapper is the public interface for creating a JavaScript global
// context.
-class Bootstrapper : public AllStatic {
+class Bootstrapper {
public:
// Requires: Heap::Setup has been called.
- static void Initialize(bool create_heap_objects);
- static void TearDown();
+ void Initialize(bool create_heap_objects);
+ void TearDown();
// Creates a JavaScript Global Context with initial object graph.
// The returned value is a global handle casted to V8Environment*.
- static Handle<Context> CreateEnvironment(
+ Handle<Context> CreateEnvironment(
+ Isolate* isolate,
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions);
// Detach the environment from its outer global object.
- static void DetachGlobal(Handle<Context> env);
+ void DetachGlobal(Handle<Context> env);
// Reattach an outer global object to an environment.
- static void ReattachGlobal(Handle<Context> env, Handle<Object> global_object);
+ void ReattachGlobal(Handle<Context> env, Handle<Object> global_object);
// Traverses the pointers for memory management.
- static void Iterate(ObjectVisitor* v);
+ void Iterate(ObjectVisitor* v);
// Accessor for the native scripts source code.
- static Handle<String> NativesSourceLookup(int index);
+ Handle<String> NativesSourceLookup(int index);
// Tells whether bootstrapping is active.
- static bool IsActive() { return BootstrapperActive::IsActive(); }
+ bool IsActive() const { return nesting_ != 0; }
// Support for thread preemption.
static int ArchiveSpacePerThread();
- static char* ArchiveState(char* to);
- static char* RestoreState(char* from);
- static void FreeThreadResources();
+ char* ArchiveState(char* to);
+ char* RestoreState(char* from);
+ void FreeThreadResources();
// This will allocate a char array that is deleted when V8 is shut down.
// It should only be used for strictly finite allocations.
- static char* AllocateAutoDeletedArray(int bytes);
+ char* AllocateAutoDeletedArray(int bytes);
// Used for new context creation.
- static bool InstallExtensions(Handle<Context> global_context,
- v8::ExtensionConfiguration* extensions);
+ bool InstallExtensions(Handle<Context> global_context,
+ v8::ExtensionConfiguration* extensions);
+
+ SourceCodeCache* extensions_cache() { return &extensions_cache_; }
+
+ private:
+ typedef int NestingCounterType;
+ NestingCounterType nesting_;
+ SourceCodeCache extensions_cache_;
+ // This is for delete, not delete[].
+ List<char*>* delete_these_non_arrays_on_tear_down_;
+ // This is for delete[]
+ List<char*>* delete_these_arrays_on_tear_down_;
+
+ friend class BootstrapperActive;
+ friend class Isolate;
+ friend class NativesExternalStringResource;
+
+ Bootstrapper();
+
+ DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
+};
+
+
+class BootstrapperActive BASE_EMBEDDED {
+ public:
+ BootstrapperActive() {
+ ++Isolate::Current()->bootstrapper()->nesting_;
+ }
+
+ ~BootstrapperActive() {
+ --Isolate::Current()->bootstrapper()->nesting_;
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BootstrapperActive);
};
class NativesExternalStringResource
: public v8::String::ExternalAsciiStringResource {
public:
- explicit NativesExternalStringResource(const char* source);
+ NativesExternalStringResource(Bootstrapper* bootstrapper,
+ const char* source,
+ size_t length);
const char* data() const {
return data_;
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 0f9d152f5..6a44d8cae 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -107,7 +107,6 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
} // namespace
-
// ----------------------------------------------------------------------------
// Support macro for defining builtins in C++.
// ----------------------------------------------------------------------------
@@ -123,26 +122,27 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
#ifdef DEBUG
-#define BUILTIN(name) \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
- name##ArgumentsType args); \
- MUST_USE_RESULT static MaybeObject* Builtin_##name( \
- name##ArgumentsType args) { \
- args.Verify(); \
- return Builtin_Impl_##name(args); \
- } \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
- name##ArgumentsType args)
+#define BUILTIN(name) \
+ MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+ name##ArgumentsType args, Isolate* isolate); \
+ MUST_USE_RESULT static MaybeObject* Builtin_##name( \
+ name##ArgumentsType args, Isolate* isolate) { \
+ ASSERT(isolate == Isolate::Current()); \
+ args.Verify(); \
+ return Builtin_Impl_##name(args, isolate); \
+ } \
+ MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+ name##ArgumentsType args, Isolate* isolate)
#else // For release mode.
-#define BUILTIN(name) \
- static MaybeObject* Builtin_##name(name##ArgumentsType args)
+#define BUILTIN(name) \
+ static MaybeObject* Builtin_##name(name##ArgumentsType args, Isolate* isolate)
#endif
-static inline bool CalledAsConstructor() {
+static inline bool CalledAsConstructor(Isolate* isolate) {
#ifdef DEBUG
// Calculate the result using a full stack frame iterator and check
// that the state of the stack is as we assume it to be in the
@@ -153,7 +153,7 @@ static inline bool CalledAsConstructor() {
StackFrame* frame = it.frame();
bool reference_result = frame->is_construct();
#endif
- Address fp = Top::c_entry_fp(Top::GetCurrentThread());
+ Address fp = Isolate::c_entry_fp(isolate->thread_local_top());
// Because we know fp points to an exit frame we can use the relevant
// part of ExitFrame::ComputeCallerState directly.
const int kCallerOffset = ExitFrameConstants::kCallerFPOffset;
@@ -172,30 +172,30 @@ static inline bool CalledAsConstructor() {
// ----------------------------------------------------------------------------
-
BUILTIN(Illegal) {
UNREACHABLE();
- return Heap::undefined_value(); // Make compiler happy.
+ return isolate->heap()->undefined_value(); // Make compiler happy.
}
BUILTIN(EmptyFunction) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
BUILTIN(ArrayCodeGeneric) {
- Counters::array_function_runtime.Increment();
+ Heap* heap = isolate->heap();
+ isolate->counters()->array_function_runtime()->Increment();
JSArray* array;
- if (CalledAsConstructor()) {
+ if (CalledAsConstructor(isolate)) {
array = JSArray::cast(*args.receiver());
} else {
// Allocate the JS Array
JSFunction* constructor =
- Top::context()->global_context()->array_function();
+ isolate->context()->global_context()->array_function();
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateJSObject(constructor);
+ { MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
array = JSArray::cast(obj);
@@ -212,7 +212,7 @@ BUILTIN(ArrayCodeGeneric) {
int len = Smi::cast(obj)->value();
if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArrayWithHoles(len);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
array->SetContent(FixedArray::cast(obj));
@@ -235,7 +235,7 @@ BUILTIN(ArrayCodeGeneric) {
int number_of_elements = args.length() - 1;
Smi* len = Smi::FromInt(number_of_elements);
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArrayWithHoles(len->value());
+ { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len->value());
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -255,77 +255,81 @@ BUILTIN(ArrayCodeGeneric) {
}
-MUST_USE_RESULT static MaybeObject* AllocateJSArray() {
+MUST_USE_RESULT static MaybeObject* AllocateJSArray(Heap* heap) {
JSFunction* array_function =
- Top::context()->global_context()->array_function();
+ heap->isolate()->context()->global_context()->array_function();
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateJSObject(array_function);
+ { MaybeObject* maybe_result = heap->AllocateJSObject(array_function);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
return result;
}
-MUST_USE_RESULT static MaybeObject* AllocateEmptyJSArray() {
+MUST_USE_RESULT static MaybeObject* AllocateEmptyJSArray(Heap* heap) {
Object* result;
- { MaybeObject* maybe_result = AllocateJSArray();
+ { MaybeObject* maybe_result = AllocateJSArray(heap);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSArray* result_array = JSArray::cast(result);
result_array->set_length(Smi::FromInt(0));
- result_array->set_elements(Heap::empty_fixed_array());
+ result_array->set_elements(heap->empty_fixed_array());
return result_array;
}
-static void CopyElements(AssertNoAllocation* no_gc,
+static void CopyElements(Heap* heap,
+ AssertNoAllocation* no_gc,
FixedArray* dst,
int dst_index,
FixedArray* src,
int src_index,
int len) {
ASSERT(dst != src); // Use MoveElements instead.
- ASSERT(dst->map() != Heap::fixed_cow_array_map());
+ ASSERT(dst->map() != HEAP->fixed_cow_array_map());
ASSERT(len > 0);
CopyWords(dst->data_start() + dst_index,
src->data_start() + src_index,
len);
WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
if (mode == UPDATE_WRITE_BARRIER) {
- Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
+ heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
}
-static void MoveElements(AssertNoAllocation* no_gc,
+static void MoveElements(Heap* heap,
+ AssertNoAllocation* no_gc,
FixedArray* dst,
int dst_index,
FixedArray* src,
int src_index,
int len) {
- ASSERT(dst->map() != Heap::fixed_cow_array_map());
+ ASSERT(dst->map() != HEAP->fixed_cow_array_map());
memmove(dst->data_start() + dst_index,
src->data_start() + src_index,
len * kPointerSize);
WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
if (mode == UPDATE_WRITE_BARRIER) {
- Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
+ heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
}
-static void FillWithHoles(FixedArray* dst, int from, int to) {
- ASSERT(dst->map() != Heap::fixed_cow_array_map());
- MemsetPointer(dst->data_start() + from, Heap::the_hole_value(), to - from);
+static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) {
+ ASSERT(dst->map() != heap->fixed_cow_array_map());
+ MemsetPointer(dst->data_start() + from, heap->the_hole_value(), to - from);
}
-static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
- ASSERT(elms->map() != Heap::fixed_cow_array_map());
+static FixedArray* LeftTrimFixedArray(Heap* heap,
+ FixedArray* elms,
+ int to_trim) {
+ ASSERT(elms->map() != HEAP->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
- ASSERT(!Heap::lo_space()->Contains(elms));
+ ASSERT(!HEAP->lo_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0);
STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
@@ -336,7 +340,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
const int len = elms->length();
if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
- !Heap::new_space()->Contains(elms)) {
+ !heap->new_space()->Contains(elms)) {
// If we are doing a big trim in old space then we zap the space that was
// formerly part of the array so that the GC (aided by the card-based
// remembered set) won't find pointers to new-space there.
@@ -349,9 +353,9 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- Heap::CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
+ heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
- former_start[to_trim] = Heap::fixed_array_map();
+ former_start[to_trim] = heap->fixed_array_map();
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
return FixedArray::cast(HeapObject::FromAddress(
@@ -359,55 +363,59 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
}
-static bool ArrayPrototypeHasNoElements(Context* global_context,
+static bool ArrayPrototypeHasNoElements(Heap* heap,
+ Context* global_context,
JSObject* array_proto) {
// This method depends on non writability of Object and Array prototype
// fields.
- if (array_proto->elements() != Heap::empty_fixed_array()) return false;
+ if (array_proto->elements() != heap->empty_fixed_array()) return false;
// Hidden prototype
array_proto = JSObject::cast(array_proto->GetPrototype());
- ASSERT(array_proto->elements() == Heap::empty_fixed_array());
+ ASSERT(array_proto->elements() == heap->empty_fixed_array());
// Object.prototype
Object* proto = array_proto->GetPrototype();
- if (proto == Heap::null_value()) return false;
+ if (proto == heap->null_value()) return false;
array_proto = JSObject::cast(proto);
if (array_proto != global_context->initial_object_prototype()) return false;
- if (array_proto->elements() != Heap::empty_fixed_array()) return false;
+ if (array_proto->elements() != heap->empty_fixed_array()) return false;
return array_proto->GetPrototype()->IsNull();
}
MUST_USE_RESULT
static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
- Object* receiver) {
+ Heap* heap, Object* receiver) {
if (!receiver->IsJSArray()) return NULL;
JSArray* array = JSArray::cast(receiver);
HeapObject* elms = array->elements();
- if (elms->map() == Heap::fixed_array_map()) return elms;
- if (elms->map() == Heap::fixed_cow_array_map()) {
+ if (elms->map() == heap->fixed_array_map()) return elms;
+ if (elms->map() == heap->fixed_cow_array_map()) {
return array->EnsureWritableFastElements();
}
return NULL;
}
-static inline bool IsJSArrayFastElementMovingAllowed(JSArray* receiver) {
- Context* global_context = Top::context()->global_context();
+static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
+ JSArray* receiver) {
+ Context* global_context = heap->isolate()->context()->global_context();
JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype());
return receiver->GetPrototype() == array_proto &&
- ArrayPrototypeHasNoElements(global_context, array_proto);
+ ArrayPrototypeHasNoElements(heap, global_context, array_proto);
}
MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
+ Isolate* isolate,
const char* name,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
- HandleScope handleScope;
+ HandleScope handleScope(isolate);
Handle<Object> js_builtin =
- GetProperty(Handle<JSObject>(Top::global_context()->builtins()),
- name);
+ GetProperty(Handle<JSObject>(
+ isolate->global_context()->builtins()),
+ name);
ASSERT(js_builtin->IsJSFunction());
Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
ScopedVector<Object**> argv(args.length() - 1);
@@ -427,11 +435,14 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
BUILTIN(ArrayPush) {
+ Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(receiver);
- if (maybe_elms_obj == NULL) return CallJsBuiltin("ArrayPush", args);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
+ if (maybe_elms_obj == NULL) {
+ return CallJsBuiltin(isolate, "ArrayPush", args);
+ }
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
FixedArray* elms = FixedArray::cast(elms_obj);
@@ -452,16 +463,16 @@ BUILTIN(ArrayPush) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateUninitializedFixedArray(capacity);
+ { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
if (len > 0) {
- CopyElements(&no_gc, new_elms, 0, elms, 0, len);
+ CopyElements(heap, &no_gc, new_elms, 0, elms, 0, len);
}
- FillWithHoles(new_elms, new_length, capacity);
+ FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
@@ -481,18 +492,19 @@ BUILTIN(ArrayPush) {
BUILTIN(ArrayPop) {
+ Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(receiver);
- if (maybe_elms_obj == NULL) return CallJsBuiltin("ArrayPop", args);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
+ if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value();
- if (len == 0) return Heap::undefined_value();
+ if (len == 0) return heap->undefined_value();
// Get top element
MaybeObject* top = elms->get(len - 1);
@@ -513,38 +525,40 @@ BUILTIN(ArrayPop) {
BUILTIN(ArrayShift) {
+ Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(receiver);
- if (maybe_elms_obj == NULL) return CallJsBuiltin("ArrayShift", args);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArrayShift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
- if (!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
- return CallJsBuiltin("ArrayShift", args);
+ if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ return CallJsBuiltin(isolate, "ArrayShift", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
- if (len == 0) return Heap::undefined_value();
+ if (len == 0) return heap->undefined_value();
// Get first element
Object* first = elms->get(0);
if (first->IsTheHole()) {
- first = Heap::undefined_value();
+ first = heap->undefined_value();
}
- if (!Heap::lo_space()->Contains(elms)) {
+ if (!heap->lo_space()->Contains(elms)) {
// As elms still in the same space they used to be,
// there is no need to update region dirty mark.
- array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
+ array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER);
} else {
// Shift the elements.
AssertNoAllocation no_gc;
- MoveElements(&no_gc, elms, 0, elms, 1, len - 1);
- elms->set(len - 1, Heap::the_hole_value());
+ MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
+ elms->set(len - 1, heap->the_hole_value());
}
// Set the length.
@@ -555,15 +569,17 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
+ Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(receiver);
- if (maybe_elms_obj == NULL) return CallJsBuiltin("ArrayUnshift", args);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArrayUnshift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
- if (!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
- return CallJsBuiltin("ArrayUnshift", args);
+ if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
@@ -580,22 +596,22 @@ BUILTIN(ArrayUnshift) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateUninitializedFixedArray(capacity);
+ { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
if (len > 0) {
- CopyElements(&no_gc, new_elms, to_add, elms, 0, len);
+ CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
}
- FillWithHoles(new_elms, new_length, capacity);
+ FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
} else {
AssertNoAllocation no_gc;
- MoveElements(&no_gc, elms, to_add, elms, 0, len);
+ MoveElements(heap, &no_gc, elms, to_add, elms, 0, len);
}
// Add the provided values.
@@ -612,14 +628,15 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
+ Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
FixedArray* elms;
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
if (!array->HasFastElements() ||
- !IsJSArrayFastElementMovingAllowed(array)) {
- return CallJsBuiltin("ArraySlice", args);
+ !IsJSArrayFastElementMovingAllowed(heap, array)) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
elms = FixedArray::cast(array->elements());
@@ -628,28 +645,28 @@ BUILTIN(ArraySlice) {
// Array.slice(arguments, ...) is quite a common idiom (notably more
// than 50% of invocations in Web apps). Treat it in C++ as well.
Map* arguments_map =
- Top::context()->global_context()->arguments_boilerplate()->map();
+ isolate->context()->global_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastElements();
if (!is_arguments_object_with_fast_elements) {
- return CallJsBuiltin("ArraySlice", args);
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
elms = FixedArray::cast(JSObject::cast(receiver)->elements());
Object* len_obj = JSObject::cast(receiver)
- ->InObjectPropertyAt(Heap::arguments_length_index);
+ ->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
if (!len_obj->IsSmi()) {
- return CallJsBuiltin("ArraySlice", args);
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
len = Smi::cast(len_obj)->value();
if (len > elms->length()) {
- return CallJsBuiltin("ArraySlice", args);
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
for (int i = 0; i < len; i++) {
- if (elms->get(i) == Heap::the_hole_value()) {
- return CallJsBuiltin("ArraySlice", args);
+ if (elms->get(i) == heap->the_hole_value()) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
}
}
@@ -666,14 +683,14 @@ BUILTIN(ArraySlice) {
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
} else if (!arg1->IsUndefined()) {
- return CallJsBuiltin("ArraySlice", args);
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
if (n_arguments > 1) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
relative_end = Smi::cast(arg2)->value();
} else if (!arg2->IsUndefined()) {
- return CallJsBuiltin("ArraySlice", args);
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
}
}
@@ -689,23 +706,23 @@ BUILTIN(ArraySlice) {
// Calculate the length of result array.
int result_len = final - k;
if (result_len <= 0) {
- return AllocateEmptyJSArray();
+ return AllocateEmptyJSArray(heap);
}
Object* result;
- { MaybeObject* maybe_result = AllocateJSArray();
+ { MaybeObject* maybe_result = AllocateJSArray(heap);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSArray* result_array = JSArray::cast(result);
{ MaybeObject* maybe_result =
- Heap::AllocateUninitializedFixedArray(result_len);
+ heap->AllocateUninitializedFixedArray(result_len);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
FixedArray* result_elms = FixedArray::cast(result);
AssertNoAllocation no_gc;
- CopyElements(&no_gc, result_elms, 0, elms, k, result_len);
+ CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
// Set elements.
result_array->set_elements(result_elms);
@@ -717,15 +734,17 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
+ Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(receiver);
- if (maybe_elms_obj == NULL) return CallJsBuiltin("ArraySplice", args);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArraySplice", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
- if (!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
- return CallJsBuiltin("ArraySplice", args);
+ if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ return CallJsBuiltin(isolate, "ArraySplice", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
@@ -741,7 +760,7 @@ BUILTIN(ArraySplice) {
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
} else if (!arg1->IsUndefined()) {
- return CallJsBuiltin("ArraySplice", args);
+ return CallJsBuiltin(isolate, "ArraySplice", args);
}
}
int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
@@ -763,7 +782,7 @@ BUILTIN(ArraySplice) {
if (arg2->IsSmi()) {
value = Smi::cast(arg2)->value();
} else {
- return CallJsBuiltin("ArraySplice", args);
+ return CallJsBuiltin(isolate, "ArraySplice", args);
}
}
actual_delete_count = Min(Max(value, 0), len - actual_start);
@@ -772,27 +791,28 @@ BUILTIN(ArraySplice) {
JSArray* result_array = NULL;
if (actual_delete_count == 0) {
Object* result;
- { MaybeObject* maybe_result = AllocateEmptyJSArray();
+ { MaybeObject* maybe_result = AllocateEmptyJSArray(heap);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
result_array = JSArray::cast(result);
} else {
// Allocate result array.
Object* result;
- { MaybeObject* maybe_result = AllocateJSArray();
+ { MaybeObject* maybe_result = AllocateJSArray(heap);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
result_array = JSArray::cast(result);
{ MaybeObject* maybe_result =
- Heap::AllocateUninitializedFixedArray(actual_delete_count);
+ heap->AllocateUninitializedFixedArray(actual_delete_count);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
FixedArray* result_elms = FixedArray::cast(result);
AssertNoAllocation no_gc;
// Fill newly created array.
- CopyElements(&no_gc,
+ CopyElements(heap,
+ &no_gc,
result_elms, 0,
elms, actual_start,
actual_delete_count);
@@ -810,7 +830,7 @@ BUILTIN(ArraySplice) {
if (item_count < actual_delete_count) {
// Shrink the array.
- const bool trim_array = !Heap::lo_space()->Contains(elms) &&
+ const bool trim_array = !heap->lo_space()->Contains(elms) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
@@ -818,18 +838,18 @@ BUILTIN(ArraySplice) {
if (actual_start > 0) {
AssertNoAllocation no_gc;
- MoveElements(&no_gc, elms, delta, elms, 0, actual_start);
+ MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start);
}
- elms = LeftTrimFixedArray(elms, delta);
+ elms = LeftTrimFixedArray(heap, elms, delta);
array->set_elements(elms, SKIP_WRITE_BARRIER);
} else {
AssertNoAllocation no_gc;
- MoveElements(&no_gc,
+ MoveElements(heap, &no_gc,
elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
- FillWithHoles(elms, new_length, len);
+ FillWithHoles(heap, elms, new_length, len);
}
} else if (item_count > actual_delete_count) {
// Currently fixed arrays cannot grow too big, so
@@ -842,7 +862,7 @@ BUILTIN(ArraySplice) {
int capacity = new_length + (new_length >> 1) + 16;
Object* obj;
{ MaybeObject* maybe_obj =
- Heap::AllocateUninitializedFixedArray(capacity);
+ heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
@@ -850,22 +870,22 @@ BUILTIN(ArraySplice) {
AssertNoAllocation no_gc;
// Copy the part before actual_start as is.
if (actual_start > 0) {
- CopyElements(&no_gc, new_elms, 0, elms, 0, actual_start);
+ CopyElements(heap, &no_gc, new_elms, 0, elms, 0, actual_start);
}
const int to_copy = len - actual_delete_count - actual_start;
if (to_copy > 0) {
- CopyElements(&no_gc,
+ CopyElements(heap, &no_gc,
new_elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
to_copy);
}
- FillWithHoles(new_elms, new_length, capacity);
+ FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
} else {
AssertNoAllocation no_gc;
- MoveElements(&no_gc,
+ MoveElements(heap, &no_gc,
elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
@@ -886,11 +906,12 @@ BUILTIN(ArraySplice) {
BUILTIN(ArrayConcat) {
- Context* global_context = Top::context()->global_context();
+ Heap* heap = isolate->heap();
+ Context* global_context = isolate->context()->global_context();
JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype());
- if (!ArrayPrototypeHasNoElements(global_context, array_proto)) {
- return CallJsBuiltin("ArrayConcat", args);
+ if (!ArrayPrototypeHasNoElements(heap, global_context, array_proto)) {
+ return CallJsBuiltin(isolate, "ArrayConcat", args);
}
// Iterate through all the arguments performing checks
@@ -901,7 +922,7 @@ BUILTIN(ArrayConcat) {
Object* arg = args[i];
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
|| JSArray::cast(arg)->GetPrototype() != array_proto) {
- return CallJsBuiltin("ArrayConcat", args);
+ return CallJsBuiltin(isolate, "ArrayConcat", args);
}
int len = Smi::cast(JSArray::cast(arg)->length())->value();
@@ -914,23 +935,23 @@ BUILTIN(ArrayConcat) {
ASSERT(result_len >= 0);
if (result_len > FixedArray::kMaxLength) {
- return CallJsBuiltin("ArrayConcat", args);
+ return CallJsBuiltin(isolate, "ArrayConcat", args);
}
}
if (result_len == 0) {
- return AllocateEmptyJSArray();
+ return AllocateEmptyJSArray(heap);
}
// Allocate result.
Object* result;
- { MaybeObject* maybe_result = AllocateJSArray();
+ { MaybeObject* maybe_result = AllocateJSArray(heap);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSArray* result_array = JSArray::cast(result);
{ MaybeObject* maybe_result =
- Heap::AllocateUninitializedFixedArray(result_len);
+ heap->AllocateUninitializedFixedArray(result_len);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
FixedArray* result_elms = FixedArray::cast(result);
@@ -943,7 +964,7 @@ BUILTIN(ArrayConcat) {
int len = Smi::cast(array->length())->value();
if (len > 0) {
FixedArray* elms = FixedArray::cast(array->elements());
- CopyElements(&no_gc, result_elms, start_pos, elms, 0, len);
+ CopyElements(heap, &no_gc, result_elms, start_pos, elms, 0, len);
start_pos += len;
}
}
@@ -958,6 +979,16 @@ BUILTIN(ArrayConcat) {
// -----------------------------------------------------------------------------
+// Strict mode poison pills
+
+
+BUILTIN(StrictModePoisonPill) {
+ HandleScope scope;
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_poison_pill", HandleVector<Object>(NULL, 0)));
+}
+
+// -----------------------------------------------------------------------------
//
@@ -967,10 +998,13 @@ BUILTIN(ArrayConcat) {
// overwritten with undefined. Arguments that do fit the expected
// type is overwritten with the object in the prototype chain that
// actually has that type.
-static inline Object* TypeCheck(int argc,
+static inline Object* TypeCheck(Heap* heap,
+ int argc,
Object** argv,
FunctionTemplateInfo* info) {
Object* recv = argv[0];
+ // API calls are only supported with JSObject receivers.
+ if (!recv->IsJSObject()) return heap->null_value();
Object* sig_obj = info->signature();
if (sig_obj->IsUndefined()) return recv;
SignatureInfo* sig = SignatureInfo::cast(sig_obj);
@@ -979,12 +1013,12 @@ static inline Object* TypeCheck(int argc,
Object* holder = recv;
if (!recv_type->IsUndefined()) {
- for (; holder != Heap::null_value(); holder = holder->GetPrototype()) {
+ for (; holder != heap->null_value(); holder = holder->GetPrototype()) {
if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) {
break;
}
}
- if (holder == Heap::null_value()) return holder;
+ if (holder == heap->null_value()) return holder;
}
Object* args_obj = sig->args();
// If there is no argument signature we're done
@@ -997,13 +1031,13 @@ static inline Object* TypeCheck(int argc,
if (argtype->IsUndefined()) continue;
Object** arg = &argv[-1 - i];
Object* current = *arg;
- for (; current != Heap::null_value(); current = current->GetPrototype()) {
+ for (; current != heap->null_value(); current = current->GetPrototype()) {
if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) {
*arg = current;
break;
}
}
- if (current == Heap::null_value()) *arg = Heap::undefined_value();
+ if (current == heap->null_value()) *arg = heap->undefined_value();
}
return holder;
}
@@ -1011,31 +1045,33 @@ static inline Object* TypeCheck(int argc,
template <bool is_construct>
MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
- BuiltinArguments<NEEDS_CALLED_FUNCTION> args) {
- ASSERT(is_construct == CalledAsConstructor());
+ BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) {
+ ASSERT(is_construct == CalledAsConstructor(isolate));
+ Heap* heap = isolate->heap();
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSFunction> function = args.called_function();
ASSERT(function->shared()->IsApiFunction());
FunctionTemplateInfo* fun_data = function->shared()->get_api_func_data();
if (is_construct) {
- Handle<FunctionTemplateInfo> desc(fun_data);
+ Handle<FunctionTemplateInfo> desc(fun_data, isolate);
bool pending_exception = false;
- Factory::ConfigureInstance(desc, Handle<JSObject>::cast(args.receiver()),
- &pending_exception);
- ASSERT(Top::has_pending_exception() == pending_exception);
+ isolate->factory()->ConfigureInstance(
+ desc, Handle<JSObject>::cast(args.receiver()), &pending_exception);
+ ASSERT(isolate->has_pending_exception() == pending_exception);
if (pending_exception) return Failure::Exception();
fun_data = *desc;
}
- Object* raw_holder = TypeCheck(args.length(), &args[0], fun_data);
+ Object* raw_holder = TypeCheck(heap, args.length(), &args[0], fun_data);
if (raw_holder->IsNull()) {
// This function cannot be called with the given receiver. Abort!
Handle<Object> obj =
- Factory::NewTypeError("illegal_invocation", HandleVector(&function, 1));
- return Top::Throw(*obj);
+ isolate->factory()->NewTypeError(
+ "illegal_invocation", HandleVector(&function, 1));
+ return isolate->Throw(*obj);
}
Object* raw_call_data = fun_data->call_code();
@@ -1047,10 +1083,10 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
Object* data_obj = call_data->data();
Object* result;
- LOG(ApiObjectAccess("call", JSObject::cast(*args.receiver())));
+ LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
ASSERT(raw_holder->IsJSObject());
- CustomArguments custom;
+ CustomArguments custom(isolate);
v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
data_obj, *function, raw_holder);
@@ -1063,17 +1099,18 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
- ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
+ VMState state(isolate, EXTERNAL);
+ ExternalCallbackScope call_scope(isolate,
+ v8::ToCData<Address>(callback_obj));
value = callback(new_args);
}
if (value.IsEmpty()) {
- result = Heap::undefined_value();
+ result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!is_construct || result->IsJSObject()) return result;
}
@@ -1082,12 +1119,12 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
BUILTIN(HandleApiCall) {
- return HandleApiCallHelper<false>(args);
+ return HandleApiCallHelper<false>(args, isolate);
}
BUILTIN(HandleApiCallConstruct) {
- return HandleApiCallHelper<true>(args);
+ return HandleApiCallHelper<true>(args, isolate);
}
@@ -1109,7 +1146,8 @@ static void VerifyTypeCheck(Handle<JSObject> object,
BUILTIN(FastHandleApiCall) {
- ASSERT(!CalledAsConstructor());
+ ASSERT(!CalledAsConstructor(isolate));
+ Heap* heap = isolate->heap();
const bool is_construct = false;
// We expect four more arguments: callback, function, call data, and holder.
@@ -1128,25 +1166,26 @@ BUILTIN(FastHandleApiCall) {
VerifyTypeCheck(Utils::OpenHandle(*new_args.Holder()),
Utils::OpenHandle(*new_args.Callee()));
#endif
- HandleScope scope;
+ HandleScope scope(isolate);
Object* result;
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
- ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
+ VMState state(isolate, EXTERNAL);
+ ExternalCallbackScope call_scope(isolate,
+ v8::ToCData<Address>(callback_obj));
v8::InvocationCallback callback =
v8::ToCData<v8::InvocationCallback>(callback_obj);
value = callback(new_args);
}
if (value.IsEmpty()) {
- result = Heap::undefined_value();
+ result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return result;
}
@@ -1155,11 +1194,13 @@ BUILTIN(FastHandleApiCall) {
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
+ Isolate* isolate,
bool is_construct_call,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
// Non-functions are never called as constructors. Even if this is an object
// called as a constructor the delegate call is not a construct call.
- ASSERT(!CalledAsConstructor());
+ ASSERT(!CalledAsConstructor(isolate));
+ Heap* heap = isolate->heap();
Handle<Object> receiver = args.at<Object>(0);
@@ -1182,11 +1223,10 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
// Get the data for the call and perform the callback.
Object* result;
{
- HandleScope scope;
+ HandleScope scope(isolate);
+ LOG(isolate, ApiObjectAccess("call non-function", obj));
- LOG(ApiObjectAccess("call non-function", obj));
-
- CustomArguments custom;
+ CustomArguments custom(isolate);
v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
call_data->data(), constructor, obj);
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
@@ -1197,18 +1237,19 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
- ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
+ VMState state(isolate, EXTERNAL);
+ ExternalCallbackScope call_scope(isolate,
+ v8::ToCData<Address>(callback_obj));
value = callback(new_args);
}
if (value.IsEmpty()) {
- result = Heap::undefined_value();
+ result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
}
}
// Check for exceptions and return result.
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return result;
}
@@ -1216,14 +1257,14 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
// Handle calls to non-function objects created through the API. This delegate
// function is used when the call is a normal function call.
BUILTIN(HandleApiCallAsFunction) {
- return HandleApiCallAsFunctionOrConstructor(false, args);
+ return HandleApiCallAsFunctionOrConstructor(isolate, false, args);
}
// Handle calls to non-function objects created through the API. This delegate
// function is used when the call is a construct call.
BUILTIN(HandleApiCallAsConstructor) {
- return HandleApiCallAsFunctionOrConstructor(true, args);
+ return HandleApiCallAsFunctionOrConstructor(isolate, true, args);
}
@@ -1277,8 +1318,18 @@ static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
}
+static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateRuntimeGetProperty(masm);
+}
+
+
static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm);
+ KeyedLoadIC::GenerateMiss(masm, false);
+}
+
+
+static void Generate_KeyedLoadIC_MissForceGeneric(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateMiss(masm, true);
}
@@ -1300,6 +1351,9 @@ static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
KeyedLoadIC::GenerateIndexedInterceptor(masm);
}
+static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateNonStrictArguments(masm);
+}
static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
@@ -1367,7 +1421,17 @@ static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm);
+ KeyedStoreIC::GenerateMiss(masm, false);
+}
+
+
+static void Generate_KeyedStoreIC_MissForceGeneric(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateMiss(masm, true);
+}
+
+
+static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateSlow(masm);
}
@@ -1380,6 +1444,9 @@ static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
KeyedStoreIC::GenerateInitialize(masm);
}
+static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateNonStrictArguments(masm);
+}
#ifdef ENABLE_DEBUGGER_SUPPORT
static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
@@ -1432,73 +1499,113 @@ static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
}
#endif
-Object* Builtins::builtins_[builtin_count] = { NULL, };
-const char* Builtins::names_[builtin_count] = { NULL, };
+
+Builtins::Builtins() : initialized_(false) {
+ memset(builtins_, 0, sizeof(builtins_[0]) * builtin_count);
+ memset(names_, 0, sizeof(names_[0]) * builtin_count);
+}
+
+
+Builtins::~Builtins() {
+}
+
#define DEF_ENUM_C(name, ignore) FUNCTION_ADDR(Builtin_##name),
- Address Builtins::c_functions_[cfunction_count] = {
- BUILTIN_LIST_C(DEF_ENUM_C)
- };
+Address const Builtins::c_functions_[cfunction_count] = {
+ BUILTIN_LIST_C(DEF_ENUM_C)
+};
#undef DEF_ENUM_C
#define DEF_JS_NAME(name, ignore) #name,
#define DEF_JS_ARGC(ignore, argc) argc,
-const char* Builtins::javascript_names_[id_count] = {
+const char* const Builtins::javascript_names_[id_count] = {
BUILTINS_LIST_JS(DEF_JS_NAME)
};
-int Builtins::javascript_argc_[id_count] = {
+int const Builtins::javascript_argc_[id_count] = {
BUILTINS_LIST_JS(DEF_JS_ARGC)
};
#undef DEF_JS_NAME
#undef DEF_JS_ARGC
-static bool is_initialized = false;
-void Builtins::Setup(bool create_heap_objects) {
- ASSERT(!is_initialized);
+struct BuiltinDesc {
+ byte* generator;
+ byte* c_code;
+ const char* s_name; // name is only used for generating log information.
+ int name;
+ Code::Flags flags;
+ BuiltinExtraArguments extra_args;
+};
- // Create a scope for the handles in the builtins.
- HandleScope scope;
+class BuiltinFunctionTable {
+ public:
+ BuiltinFunctionTable() {
+ Builtins::InitBuiltinFunctionTable();
+ }
+
+ static const BuiltinDesc* functions() { return functions_; }
- struct BuiltinDesc {
- byte* generator;
- byte* c_code;
- const char* s_name; // name is only used for generating log information.
- int name;
- Code::Flags flags;
- BuiltinExtraArguments extra_args;
- };
-
-#define DEF_FUNCTION_PTR_C(name, extra_args) \
- { FUNCTION_ADDR(Generate_Adaptor), \
- FUNCTION_ADDR(Builtin_##name), \
- #name, \
- c_##name, \
- Code::ComputeFlags(Code::BUILTIN), \
- extra_args \
- },
-
-#define DEF_FUNCTION_PTR_A(name, kind, state, extra) \
- { FUNCTION_ADDR(Generate_##name), \
- NULL, \
- #name, \
- name, \
- Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state, extra), \
- NO_EXTRA_ARGUMENTS \
- },
-
- // Define array of pointers to generators and C builtin functions.
- static BuiltinDesc functions[] = {
- BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
- BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
- BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
- // Terminator:
- { NULL, NULL, NULL, builtin_count, static_cast<Code::Flags>(0),
- NO_EXTRA_ARGUMENTS }
- };
+ private:
+ static BuiltinDesc functions_[Builtins::builtin_count + 1];
+
+ friend class Builtins;
+};
+
+BuiltinDesc BuiltinFunctionTable::functions_[Builtins::builtin_count + 1];
+
+static const BuiltinFunctionTable builtin_function_table_init;
+
+// Define array of pointers to generators and C builtin functions.
+// We do this in a sort of roundabout way so that we can do the initialization
+// within the lexical scope of Builtins:: and within a context where
+// Code::Flags names a non-abstract type.
+void Builtins::InitBuiltinFunctionTable() {
+ BuiltinDesc* functions = BuiltinFunctionTable::functions_;
+ functions[builtin_count].generator = NULL;
+ functions[builtin_count].c_code = NULL;
+ functions[builtin_count].s_name = NULL;
+ functions[builtin_count].name = builtin_count;
+ functions[builtin_count].flags = static_cast<Code::Flags>(0);
+ functions[builtin_count].extra_args = NO_EXTRA_ARGUMENTS;
+
+#define DEF_FUNCTION_PTR_C(aname, aextra_args) \
+ functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
+ functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
+ functions->s_name = #aname; \
+ functions->name = c_##aname; \
+ functions->flags = Code::ComputeFlags(Code::BUILTIN); \
+ functions->extra_args = aextra_args; \
+ ++functions;
+
+#define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
+ functions->generator = FUNCTION_ADDR(Generate_##aname); \
+ functions->c_code = NULL; \
+ functions->s_name = #aname; \
+ functions->name = k##aname; \
+ functions->flags = Code::ComputeFlags(Code::kind, \
+ NOT_IN_LOOP, \
+ state, \
+ extra); \
+ functions->extra_args = NO_EXTRA_ARGUMENTS; \
+ ++functions;
+
+ BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
+ BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
+ BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
#undef DEF_FUNCTION_PTR_C
#undef DEF_FUNCTION_PTR_A
+}
+
+void Builtins::Setup(bool create_heap_objects) {
+ ASSERT(!initialized_);
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+
+ // Create a scope for the handles in the builtins.
+ HandleScope scope(isolate);
+
+ const BuiltinDesc* functions = BuiltinFunctionTable::functions();
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects.
@@ -1508,7 +1615,7 @@ void Builtins::Setup(bool create_heap_objects) {
// separate code object for each one.
for (int i = 0; i < builtin_count; i++) {
if (create_heap_objects) {
- MacroAssembler masm(buffer, sizeof buffer);
+ MacroAssembler masm(isolate, buffer, sizeof buffer);
// Generate the code/adaptor.
typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
@@ -1526,14 +1633,15 @@ void Builtins::Setup(bool create_heap_objects) {
// This simplifies things because we don't need to retry.
AlwaysAllocateScope __scope__;
{ MaybeObject* maybe_code =
- Heap::CreateCode(desc, flags, masm.CodeObject());
+ heap->CreateCode(desc, flags, masm.CodeObject());
if (!maybe_code->ToObject(&code)) {
v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
}
}
}
// Log the event and add the code to the builtins array.
- PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
+ PROFILE(isolate,
+ CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code),
functions[i].s_name));
GDBJIT(AddCode(GDBJITInterface::BUILTIN,
@@ -1555,12 +1663,12 @@ void Builtins::Setup(bool create_heap_objects) {
}
// Mark as initialized.
- is_initialized = true;
+ initialized_ = true;
}
void Builtins::TearDown() {
- is_initialized = false;
+ initialized_ = false;
}
@@ -1570,7 +1678,8 @@ void Builtins::IterateBuiltins(ObjectVisitor* v) {
const char* Builtins::Lookup(byte* pc) {
- if (is_initialized) { // may be called during initialization (disassembler!)
+ // may be called during initialization (disassembler!)
+ if (initialized_) {
for (int i = 0; i < builtin_count; i++) {
Code* entry = Code::cast(builtins_[i]);
if (entry->contains(pc)) {
@@ -1582,4 +1691,23 @@ const char* Builtins::Lookup(byte* pc) {
}
+#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
+Handle<Code> Builtins::name() { \
+ Code** code_address = \
+ reinterpret_cast<Code**>(builtin_address(k##name)); \
+ return Handle<Code>(code_address); \
+}
+#define DEFINE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
+Handle<Code> Builtins::name() { \
+ Code** code_address = \
+ reinterpret_cast<Code**>(builtin_address(k##name)); \
+ return Handle<Code>(code_address); \
+}
+BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
+BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
+BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
+#undef DEFINE_BUILTIN_ACCESSOR_C
+#undef DEFINE_BUILTIN_ACCESSOR_A
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 5ea466513..f9a5a13bd 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -58,119 +58,131 @@ enum BuiltinExtraArguments {
V(FastHandleApiCall, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
- V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS)
-
+ V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \
+ \
+ V(StrictModePoisonPill, NO_EXTRA_ARGUMENTS)
// Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V) \
- V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructCall, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LazyCompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LazyRecompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(NotifyOSR, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- \
- V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- \
- V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
- kStrictMode) \
- V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
- kStrictMode) \
- V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
- kStrictMode) \
- V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
- kStrictMode) \
- V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
- kStrictMode) \
- \
- V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
- \
- V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
- kStrictMode) \
- V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
- kStrictMode) \
- \
- /* Uses KeyedLoadIC_Initialize; must be after in list. */ \
- V(FunctionCall, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(FunctionApply, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(ArrayCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(StringConstructCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- \
- V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
+#define BUILTIN_LIST_A(V) \
+ V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(JSConstructCall, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(LazyCompile, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(LazyRecompile, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(NotifyOSR, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ \
+ V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_Slow, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(KeyedStoreIC_Slow, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ \
+ V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ \
+ V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
+ kStrictMode) \
+ V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
+ kStrictMode) \
+ V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
+ kStrictMode) \
+ V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
+ kStrictMode) \
+ V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
+ kStrictMode) \
+ \
+ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ \
+ V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
+ kStrictMode) \
+ V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
+ kStrictMode) \
+ V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ \
+ /* Uses KeyedLoadIC_Initialize; must be after in list. */ \
+ V(FunctionCall, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(FunctionApply, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ \
+ V(ArrayCode, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ \
+ V(StringConstructCode, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ \
+ V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState)
@@ -235,25 +247,28 @@ enum BuiltinExtraArguments {
V(APPLY_OVERFLOW, 1)
+class BuiltinFunctionTable;
class ObjectVisitor;
-class Builtins : public AllStatic {
+class Builtins {
public:
+ ~Builtins();
+
// Generate all builtin code objects. Should be called once during
- // VM initialization.
- static void Setup(bool create_heap_objects);
- static void TearDown();
+ // isolate initialization.
+ void Setup(bool create_heap_objects);
+ void TearDown();
// Garbage collection support.
- static void IterateBuiltins(ObjectVisitor* v);
+ void IterateBuiltins(ObjectVisitor* v);
// Disassembler support.
- static const char* Lookup(byte* pc);
+ const char* Lookup(byte* pc);
enum Name {
-#define DEF_ENUM_C(name, ignore) name,
-#define DEF_ENUM_A(name, kind, state, extra) name,
+#define DEF_ENUM_C(name, ignore) k##name,
+#define DEF_ENUM_A(name, kind, state, extra) k##name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
@@ -276,13 +291,22 @@ class Builtins : public AllStatic {
id_count
};
- static Code* builtin(Name name) {
+#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
+#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
+ Handle<Code> name();
+ BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
+ BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
+ BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
+#undef DECLARE_BUILTIN_ACCESSOR_C
+#undef DECLARE_BUILTIN_ACCESSOR_A
+
+ Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins
// during the marking phase of mark sweep. See IC::Clear.
return reinterpret_cast<Code*>(builtins_[name]);
}
- static Address builtin_address(Name name) {
+ Address builtin_address(Name name) {
return reinterpret_cast<Address>(&builtins_[name]);
}
@@ -292,20 +316,24 @@ class Builtins : public AllStatic {
static const char* GetName(JavaScript id) { return javascript_names_[id]; }
static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
- static Handle<Code> GetCode(JavaScript id, bool* resolved);
+ Handle<Code> GetCode(JavaScript id, bool* resolved);
static int NumberOfJavaScriptBuiltins() { return id_count; }
+ bool is_initialized() const { return initialized_; }
+
private:
+ Builtins();
+
// The external C++ functions called from the code.
- static Address c_functions_[cfunction_count];
+ static Address const c_functions_[cfunction_count];
// Note: These are always Code objects, but to conform with
// IterateBuiltins() above which assumes Object**'s for the callback
// function f, we use an Object* array here.
- static Object* builtins_[builtin_count];
- static const char* names_[builtin_count];
- static const char* javascript_names_[id_count];
- static int javascript_argc_[id_count];
+ Object* builtins_[builtin_count];
+ const char* names_[builtin_count];
+ static const char* const javascript_names_[id_count];
+ static int const javascript_argc_[id_count];
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
@@ -330,8 +358,16 @@ class Builtins : public AllStatic {
static void Generate_ArrayConstructCode(MacroAssembler* masm);
static void Generate_StringConstructCode(MacroAssembler* masm);
-
static void Generate_OnStackReplacement(MacroAssembler* masm);
+
+ static void InitBuiltinFunctionTable();
+
+ bool initialized_;
+
+ friend class BuiltinFunctionTable;
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(Builtins);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index dac1eb8fe..5a901a26a 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,8 @@
#ifndef V8_CHAR_PREDICATES_H_
#define V8_CHAR_PREDICATES_H_
+#include "unicode.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc
index 3c3d940be..320fd6b5e 100644
--- a/deps/v8/src/checks.cc
+++ b/deps/v8/src/checks.cc
@@ -30,8 +30,8 @@
#include "v8.h"
#include "platform.h"
-#include "top.h"
+// TODO(isolates): is it necessary to lift this?
static int fatal_error_handler_nesting_depth = 0;
// Contains protection against recursive calls (faults while handling faults).
@@ -52,7 +52,7 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
if (fatal_error_handler_nesting_depth < 3) {
if (i::FLAG_stack_trace_on_abort) {
// Call this one twice on double fault
- i::Top::PrintStack();
+ i::Isolate::Current()->PrintStack();
}
}
i::OS::Abort();
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index 2bb94bb08..a560b2fb1 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -271,6 +271,8 @@ bool EnableSlowAsserts();
#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
+#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
+#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
#define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition)
#else
#define ASSERT_RESULT(expr) (expr)
@@ -278,6 +280,8 @@ bool EnableSlowAsserts();
#define ASSERT_EQ(v1, v2) ((void) 0)
#define ASSERT_NE(v1, v2) ((void) 0)
#define ASSERT_GE(v1, v2) ((void) 0)
+#define ASSERT_LT(v1, v2) ((void) 0)
+#define ASSERT_LE(v1, v2) ((void) 0)
#define SLOW_ASSERT(condition) ((void) 0)
#endif
// Static asserts has no impact on runtime performance, so they can be
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index ba77b21c6..db57280f4 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "code-stubs.h"
+#include "stub-cache.h"
#include "factory.h"
#include "gdb-jit.h"
#include "macro-assembler.h"
@@ -37,9 +38,10 @@ namespace v8 {
namespace internal {
bool CodeStub::FindCodeInCache(Code** code_out) {
- int index = Heap::code_stubs()->FindEntry(GetKey());
+ Heap* heap = Isolate::Current()->heap();
+ int index = heap->code_stubs()->FindEntry(GetKey());
if (index != NumberDictionary::kNotFound) {
- *code_out = Code::cast(Heap::code_stubs()->ValueAt(index));
+ *code_out = Code::cast(heap->code_stubs()->ValueAt(index));
return true;
}
return false;
@@ -48,7 +50,7 @@ bool CodeStub::FindCodeInCache(Code** code_out) {
void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
- Counters::code_stubs.Increment();
+ masm->isolate()->counters()->code_stubs()->Increment();
// Nested stubs are not allowed for leafs.
AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
@@ -62,9 +64,11 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
- PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
+ Isolate* isolate = masm->isolate();
+ PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
- Counters::total_stubs_code_size.Increment(code->instruction_size());
+ Counters* counters = isolate->counters();
+ counters->total_stubs_code_size()->Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
@@ -84,12 +88,15 @@ int CodeStub::GetCodeKind() {
Handle<Code> CodeStub::GetCode() {
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
Code* code;
if (!FindCodeInCache(&code)) {
- v8::HandleScope scope;
+ HandleScope scope(isolate);
// Generate the new code.
- MacroAssembler masm(NULL, 256);
+ MacroAssembler masm(isolate, NULL, 256);
GenerateCode(&masm);
// Create the code object.
@@ -101,22 +108,24 @@ Handle<Code> CodeStub::GetCode() {
static_cast<Code::Kind>(GetCodeKind()),
InLoop(),
GetICState());
- Handle<Code> new_object = Factory::NewCode(desc, flags, masm.CodeObject());
+ Handle<Code> new_object = factory->NewCode(
+ desc, flags, masm.CodeObject(), NeedsImmovableCode());
RecordCodeGeneration(*new_object, &masm);
FinishCode(*new_object);
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
- Factory::DictionaryAtNumberPut(
- Handle<NumberDictionary>(Heap::code_stubs()),
+ factory->DictionaryAtNumberPut(
+ Handle<NumberDictionary>(heap->code_stubs()),
GetKey(),
new_object);
- Heap::public_set_code_stubs(*dict);
+ heap->public_set_code_stubs(*dict);
code = *new_object;
}
- return Handle<Code>(code);
+ ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
+ return Handle<Code>(code, isolate);
}
@@ -124,8 +133,9 @@ MaybeObject* CodeStub::TryGetCode() {
Code* code;
if (!FindCodeInCache(&code)) {
// Generate the new code.
- MacroAssembler masm(NULL, 256);
+ MacroAssembler masm(Isolate::Current(), NULL, 256);
GenerateCode(&masm);
+ Heap* heap = masm.isolate()->heap();
// Create the code object.
CodeDesc desc;
@@ -138,7 +148,7 @@ MaybeObject* CodeStub::TryGetCode() {
GetICState());
Object* new_object;
{ MaybeObject* maybe_new_object =
- Heap::CreateCode(desc, flags, masm.CodeObject());
+ heap->CreateCode(desc, flags, masm.CodeObject());
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
code = Code::cast(new_object);
@@ -147,9 +157,9 @@ MaybeObject* CodeStub::TryGetCode() {
// Try to update the code cache but do not fail if unable.
MaybeObject* maybe_new_object =
- Heap::code_stubs()->AtNumberPut(GetKey(), code);
+ heap->code_stubs()->AtNumberPut(GetKey(), code);
if (maybe_new_object->ToObject(&new_object)) {
- Heap::public_set_code_stubs(NumberDictionary::cast(new_object));
+ heap->public_set_code_stubs(NumberDictionary::cast(new_object));
}
}
@@ -188,6 +198,12 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
case CompareIC::HEAP_NUMBERS:
GenerateHeapNumbers(masm);
break;
+ case CompareIC::STRINGS:
+ GenerateStrings(masm);
+ break;
+ case CompareIC::SYMBOLS:
+ GenerateSymbols(masm);
+ break;
case CompareIC::OBJECTS:
GenerateObjects(masm);
break;
@@ -200,7 +216,8 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
const char* InstanceofStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* args = "";
@@ -227,4 +244,24 @@ const char* InstanceofStub::GetName() {
}
+void KeyedLoadFastElementStub::Generate(MacroAssembler* masm) {
+ KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
+}
+
+
+void KeyedStoreFastElementStub::Generate(MacroAssembler* masm) {
+ KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
+}
+
+
+void KeyedLoadExternalArrayStub::Generate(MacroAssembler* masm) {
+ KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
+}
+
+
+void KeyedStoreExternalArrayStub::Generate(MacroAssembler* masm) {
+ KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 96ac7335c..3a756585e 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -28,35 +28,38 @@
#ifndef V8_CODE_STUBS_H_
#define V8_CODE_STUBS_H_
+#include "allocation.h"
#include "globals.h"
namespace v8 {
namespace internal {
-// List of code stubs used on all platforms. The order in this list is important
-// as only the stubs up to and including Instanceof allows nested stub calls.
+// List of code stubs used on all platforms.
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
V(CallFunction) \
- V(GenericBinaryOp) \
- V(TypeRecordingBinaryOp) \
+ V(UnaryOp) \
+ V(BinaryOp) \
V(StringAdd) \
- V(StringCharAt) \
V(SubString) \
V(StringCompare) \
- V(SmiOp) \
V(Compare) \
V(CompareIC) \
V(MathPow) \
V(TranscendentalCache) \
V(Instanceof) \
+ /* All stubs above this line only exist in a few versions, which are */ \
+ /* generated ahead of time. Therefore compiling a call to one of */ \
+ /* them can't cause a new stub to be compiled, so compiling a call to */ \
+ /* them is GC safe. The ones below this line exist in many variants */ \
+ /* so code compiling a call to one can cause a GC. This means they */ \
+ /* can't be called from other stubs, since stub generation code is */ \
+ /* not GC safe. */ \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
- V(IntegerMod) \
V(StackCheck) \
V(FastNewClosure) \
V(FastNewContext) \
V(FastCloneShallowArray) \
- V(GenericUnaryOp) \
V(RevertToNumber) \
V(ToBoolean) \
V(ToNumber) \
@@ -67,7 +70,12 @@ namespace internal {
V(NumberToString) \
V(CEntry) \
V(JSEntry) \
- V(DebuggerStatement)
+ V(KeyedLoadFastElement) \
+ V(KeyedStoreFastElement) \
+ V(KeyedLoadExternalArray) \
+ V(KeyedStoreExternalArray) \
+ V(DebuggerStatement) \
+ V(StringDictionaryNegativeLookup)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@@ -81,10 +89,20 @@ namespace internal {
#define CODE_STUB_LIST_ARM(V)
#endif
+// List of code stubs only used on MIPS platforms.
+#ifdef V8_TARGET_ARCH_MIPS
+#define CODE_STUB_LIST_MIPS(V) \
+ V(RegExpCEntry) \
+ V(DirectCEntry)
+#else
+#define CODE_STUB_LIST_MIPS(V)
+#endif
+
// Combined list of code stubs.
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
- CODE_STUB_LIST_ARM(V)
+ CODE_STUB_LIST_ARM(V) \
+ CODE_STUB_LIST_MIPS(V)
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
@@ -156,10 +174,10 @@ class CodeStub BASE_EMBEDDED {
// lazily generated function should be fully optimized or not.
virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
- // GenericBinaryOpStub needs to override this.
+ // BinaryOpStub needs to override this.
virtual int GetCodeKind();
- // GenericBinaryOpStub needs to override this.
+ // BinaryOpStub needs to override this.
virtual InlineCacheState GetICState() {
return UNINITIALIZED;
}
@@ -167,7 +185,11 @@ class CodeStub BASE_EMBEDDED {
// Returns a name for logging/debugging purposes.
virtual const char* GetName() { return MajorName(MajorKey(), false); }
-#ifdef DEBUG
+ // Returns whether the code generated for this stub needs to be allocated as
+ // a fixed (non-moveable) code object.
+ virtual bool NeedsImmovableCode() { return false; }
+
+ #ifdef DEBUG
virtual void Print() { PrintF("%s\n", GetName()); }
#endif
@@ -178,6 +200,7 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey());
}
+ // See comment above, where Instanceof is defined.
bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
@@ -251,7 +274,6 @@ class StackCheckStub : public CodeStub {
void Generate(MacroAssembler* masm);
private:
-
const char* GetName() { return "StackCheckStub"; }
Major MajorKey() { return StackCheck; }
@@ -274,12 +296,17 @@ class ToNumberStub: public CodeStub {
class FastNewClosureStub : public CodeStub {
public:
+ explicit FastNewClosureStub(StrictModeFlag strict_mode)
+ : strict_mode_(strict_mode) { }
+
void Generate(MacroAssembler* masm);
private:
const char* GetName() { return "FastNewClosureStub"; }
Major MajorKey() { return FastNewClosure; }
- int MinorKey() { return 0; }
+ int MinorKey() { return strict_mode_; }
+
+ StrictModeFlag strict_mode_;
};
@@ -373,54 +400,6 @@ class InstanceofStub: public CodeStub {
};
-enum NegativeZeroHandling {
- kStrictNegativeZero,
- kIgnoreNegativeZero
-};
-
-
-enum UnaryOpFlags {
- NO_UNARY_FLAGS = 0,
- NO_UNARY_SMI_CODE_IN_STUB = 1 << 0
-};
-
-
-class GenericUnaryOpStub : public CodeStub {
- public:
- GenericUnaryOpStub(Token::Value op,
- UnaryOverwriteMode overwrite,
- UnaryOpFlags flags,
- NegativeZeroHandling negative_zero = kStrictNegativeZero)
- : op_(op),
- overwrite_(overwrite),
- include_smi_code_((flags & NO_UNARY_SMI_CODE_IN_STUB) == 0),
- negative_zero_(negative_zero) { }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode overwrite_;
- bool include_smi_code_;
- NegativeZeroHandling negative_zero_;
-
- class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
- class IncludeSmiCodeField: public BitField<bool, 1, 1> {};
- class NegativeZeroField: public BitField<NegativeZeroHandling, 2, 1> {};
- class OpField: public BitField<Token::Value, 3, kMinorBits - 3> {};
-
- Major MajorKey() { return GenericUnaryOp; }
- int MinorKey() {
- return OpField::encode(op_) |
- OverwriteField::encode(overwrite_) |
- IncludeSmiCodeField::encode(include_smi_code_) |
- NegativeZeroField::encode(negative_zero_);
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName();
-};
-
-
class MathPowStub: public CodeStub {
public:
MathPowStub() {}
@@ -434,18 +413,6 @@ class MathPowStub: public CodeStub {
};
-class StringCharAtStub: public CodeStub {
- public:
- StringCharAtStub() {}
-
- private:
- Major MajorKey() { return StringCharAt; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class ICCompareStub: public CodeStub {
public:
ICCompareStub(Token::Value op, CompareIC::State state)
@@ -468,6 +435,8 @@ class ICCompareStub: public CodeStub {
void GenerateSmis(MacroAssembler* masm);
void GenerateHeapNumbers(MacroAssembler* masm);
+ void GenerateSymbols(MacroAssembler* masm);
+ void GenerateStrings(MacroAssembler* masm);
void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
@@ -623,6 +592,8 @@ class CEntryStub : public CodeStub {
Major MajorKey() { return CEntry; }
int MinorKey();
+ bool NeedsImmovableCode();
+
const char* GetName() { return "CEntryStub"; }
};
@@ -661,7 +632,9 @@ class ArgumentsAccessStub: public CodeStub {
public:
enum Type {
READ_ELEMENT,
- NEW_OBJECT
+ NEW_NON_STRICT_FAST,
+ NEW_NON_STRICT_SLOW,
+ NEW_STRICT
};
explicit ArgumentsAccessStub(Type type) : type_(type) { }
@@ -674,7 +647,9 @@ class ArgumentsAccessStub: public CodeStub {
void Generate(MacroAssembler* masm);
void GenerateReadElement(MacroAssembler* masm);
- void GenerateNewObject(MacroAssembler* masm);
+ void GenerateNewStrict(MacroAssembler* masm);
+ void GenerateNewNonStrictFast(MacroAssembler* masm);
+ void GenerateNewNonStrictSlow(MacroAssembler* masm);
const char* GetName() { return "ArgumentsAccessStub"; }
@@ -765,8 +740,9 @@ class CallFunctionStub: public CodeStub {
}
InLoopFlag InLoop() { return in_loop_; }
- bool ReceiverMightBeValue() {
- return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
+
+ bool ReceiverMightBeImplicit() {
+ return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
}
};
@@ -945,6 +921,98 @@ class AllowStubCallsScope {
DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope);
};
+#ifdef DEBUG
+#define DECLARE_ARRAY_STUB_PRINT(name) void Print() { PrintF(#name); }
+#else
+#define DECLARE_ARRAY_STUB_PRINT(name)
+#endif
+
+
+class KeyedLoadFastElementStub : public CodeStub {
+ public:
+ explicit KeyedLoadFastElementStub() {
+ }
+
+ Major MajorKey() { return KeyedLoadFastElement; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "KeyedLoadFastElementStub"; }
+
+ DECLARE_ARRAY_STUB_PRINT(KeyedLoadFastElementStub)
+};
+
+
+class KeyedStoreFastElementStub : public CodeStub {
+ public:
+ explicit KeyedStoreFastElementStub(bool is_js_array)
+ : is_js_array_(is_js_array) { }
+
+ Major MajorKey() { return KeyedStoreFastElement; }
+ int MinorKey() { return is_js_array_ ? 1 : 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "KeyedStoreFastElementStub"; }
+
+ DECLARE_ARRAY_STUB_PRINT(KeyedStoreFastElementStub)
+
+ private:
+ bool is_js_array_;
+};
+
+
+class KeyedLoadExternalArrayStub : public CodeStub {
+ public:
+ explicit KeyedLoadExternalArrayStub(JSObject::ElementsKind elements_kind)
+ : elements_kind_(elements_kind) { }
+
+ Major MajorKey() { return KeyedLoadExternalArray; }
+ int MinorKey() { return elements_kind_; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "KeyedLoadExternalArrayStub"; }
+
+ DECLARE_ARRAY_STUB_PRINT(KeyedLoadExternalArrayStub)
+
+ protected:
+ JSObject::ElementsKind elements_kind_;
+};
+
+
+class KeyedStoreExternalArrayStub : public CodeStub {
+ public:
+ explicit KeyedStoreExternalArrayStub(JSObject::ElementsKind elements_kind)
+ : elements_kind_(elements_kind) { }
+
+ Major MajorKey() { return KeyedStoreExternalArray; }
+ int MinorKey() { return elements_kind_; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "KeyedStoreExternalArrayStub"; }
+
+ DECLARE_ARRAY_STUB_PRINT(KeyedStoreExternalArrayStub)
+
+ protected:
+ JSObject::ElementsKind elements_kind_;
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ explicit ToBooleanStub(Register tos) : tos_(tos) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register tos_;
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return tos_.code(); }
+};
+
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/deps/v8/src/code.h b/deps/v8/src/code.h
index 072344b67..766c932e0 100644
--- a/deps/v8/src/code.h
+++ b/deps/v8/src/code.h
@@ -28,6 +28,8 @@
#ifndef V8_CODE_H_
#define V8_CODE_H_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index e6fcecde7..4e5c78136 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,16 +28,13 @@
#include "v8.h"
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "prettyprinter.h"
-#include "register-allocator-inl.h"
#include "rewriter.h"
#include "runtime.h"
-#include "scopeinfo.h"
#include "stub-cache.h"
-#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
@@ -61,67 +58,6 @@ Comment::~Comment() {
#undef __
-CodeGenerator* CodeGeneratorScope::top_ = NULL;
-
-
-void CodeGenerator::ProcessDeferred() {
- while (!deferred_.is_empty()) {
- DeferredCode* code = deferred_.RemoveLast();
- ASSERT(masm_ == code->masm());
- // Record position of deferred code stub.
- masm_->positions_recorder()->RecordStatementPosition(
- code->statement_position());
- if (code->position() != RelocInfo::kNoPosition) {
- masm_->positions_recorder()->RecordPosition(code->position());
- }
- // Generate the code.
- Comment cmnt(masm_, code->comment());
- masm_->bind(code->entry_label());
- if (code->AutoSaveAndRestore()) {
- code->SaveRegisters();
- }
- code->Generate();
- if (code->AutoSaveAndRestore()) {
- code->RestoreRegisters();
- code->Exit();
- }
- }
-}
-
-
-void DeferredCode::Exit() {
- masm_->jmp(exit_label());
-}
-
-
-void CodeGenerator::SetFrame(VirtualFrame* new_frame,
- RegisterFile* non_frame_registers) {
- RegisterFile saved_counts;
- if (has_valid_frame()) {
- frame_->DetachFromCodeGenerator();
- // The remaining register reference counts are the non-frame ones.
- allocator_->SaveTo(&saved_counts);
- }
-
- if (new_frame != NULL) {
- // Restore the non-frame register references that go with the new frame.
- allocator_->RestoreFrom(non_frame_registers);
- new_frame->AttachToCodeGenerator();
- }
-
- frame_ = new_frame;
- saved_counts.CopyTo(non_frame_registers);
-}
-
-
-void CodeGenerator::DeleteFrame() {
- if (has_valid_frame()) {
- frame_->DetachFromCodeGenerator();
- frame_ = NULL;
- }
-}
-
-
void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
#ifdef DEBUG
bool print_source = false;
@@ -129,7 +65,7 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
bool print_json_ast = false;
const char* ftype;
- if (Bootstrapper::IsActive()) {
+ if (Isolate::Current()->bootstrapper()->IsActive()) {
print_source = FLAG_print_builtin_source;
print_ast = FLAG_print_builtin_ast;
print_json_ast = FLAG_print_builtin_json_ast;
@@ -178,13 +114,17 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
Code::Flags flags,
CompilationInfo* info) {
+ Isolate* isolate = info->isolate();
+
// Allocate and install the code.
CodeDesc desc;
masm->GetCode(&desc);
- Handle<Code> code = Factory::NewCode(desc, flags, masm->CodeObject());
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, flags, masm->CodeObject());
if (!code.is_null()) {
- Counters::total_compiled_code_size.Increment(code->instruction_size());
+ isolate->counters()->total_compiled_code_size()->Increment(
+ code->instruction_size());
}
return code;
}
@@ -192,7 +132,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER
- bool print_code = Bootstrapper::IsActive()
+ bool print_code = Isolate::Current()->bootstrapper()->IsActive()
? FLAG_print_builtin_code
: (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
@@ -229,62 +169,18 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#endif // ENABLE_DISASSEMBLER
}
-
-// Generate the code. Compile the AST and assemble all the pieces into a
-// Code object.
-bool CodeGenerator::MakeCode(CompilationInfo* info) {
- // When using Crankshaft the classic backend should never be used.
- ASSERT(!V8::UseCrankshaft());
- Handle<Script> script = info->script();
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- int len = String::cast(script->source())->length();
- Counters::total_old_codegen_source_size.Increment(len);
- }
- if (FLAG_trace_codegen) {
- PrintF("Classic Compiler - ");
- }
- MakeCodePrologue(info);
- // Generate code.
- const int kInitialBufferSize = 4 * KB;
- MacroAssembler masm(NULL, kInitialBufferSize);
-#ifdef ENABLE_GDB_JIT_INTERFACE
- masm.positions_recorder()->StartGDBJITLineInfoRecording();
-#endif
- CodeGenerator cgen(&masm);
- CodeGeneratorScope scope(&cgen);
- cgen.Generate(info);
- if (cgen.HasStackOverflow()) {
- ASSERT(!Top::has_pending_exception());
- return false;
- }
-
- InLoopFlag in_loop = info->is_in_loop() ? IN_LOOP : NOT_IN_LOOP;
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
- Handle<Code> code = MakeCodeEpilogue(cgen.masm(), flags, info);
- // There is no stack check table in code generated by the classic backend.
- code->SetNoStackCheckTable();
- CodeGenerator::PrintCode(code, info);
- info->SetCode(code); // May be an empty handle.
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit && !code.is_null()) {
- GDBJITLineInfo* lineinfo =
- masm.positions_recorder()->DetachGDBJITLineInfo();
-
- GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
- }
-#endif
- return !code.is_null();
-}
-
-
#ifdef ENABLE_LOGGING_AND_PROFILING
+static Vector<const char> kRegexp = CStrVector("regexp");
+
bool CodeGenerator::ShouldGenerateLog(Expression* type) {
ASSERT(type != NULL);
- if (!Logger::is_logging() && !CpuProfiler::is_profiling()) return false;
+ Isolate* isolate = Isolate::Current();
+ if (!isolate->logger()->is_logging() && !CpuProfiler::is_profiling(isolate)) {
+ return false;
+ }
Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
if (FLAG_log_regexp) {
- static Vector<const char> kRegexp = CStrVector("regexp");
if (name->IsEqualTo(kRegexp))
return true;
}
@@ -294,120 +190,6 @@ bool CodeGenerator::ShouldGenerateLog(Expression* type) {
#endif
-void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
- int length = declarations->length();
- int globals = 0;
- for (int i = 0; i < length; i++) {
- Declaration* node = declarations->at(i);
- Variable* var = node->proxy()->var();
- Slot* slot = var->AsSlot();
-
- // If it was not possible to allocate the variable at compile
- // time, we need to "declare" it at runtime to make sure it
- // actually exists in the local context.
- if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
- VisitDeclaration(node);
- } else {
- // Count global variables and functions for later processing
- globals++;
- }
- }
-
- // Return in case of no declared global functions or variables.
- if (globals == 0) return;
-
- // Compute array of global variable and function declarations.
- Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
- for (int j = 0, i = 0; i < length; i++) {
- Declaration* node = declarations->at(i);
- Variable* var = node->proxy()->var();
- Slot* slot = var->AsSlot();
-
- if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
- // Skip - already processed.
- } else {
- array->set(j++, *(var->name()));
- if (node->fun() == NULL) {
- if (var->mode() == Variable::CONST) {
- // In case this is const property use the hole.
- array->set_the_hole(j++);
- } else {
- array->set_undefined(j++);
- }
- } else {
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(node->fun(), script());
- // Check for stack-overflow exception.
- if (function.is_null()) {
- SetStackOverflow();
- return;
- }
- array->set(j++, *function);
- }
- }
- }
-
- // Invoke the platform-dependent code generator to do the actual
- // declaration the global variables and functions.
- DeclareGlobals(array);
-}
-
-
-void CodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
- UNREACHABLE();
-}
-
-
-// Lookup table for code generators for special runtime calls which are
-// generated inline.
-#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
- &CodeGenerator::Generate##Name,
-
-const CodeGenerator::InlineFunctionGenerator
- CodeGenerator::kInlineFunctionGenerators[] = {
- INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
-};
-#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-
-
-bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
- ZoneList<Expression*>* args = node->arguments();
- Handle<String> name = node->name();
- Runtime::Function* function = node->function();
- if (function != NULL && function->intrinsic_type == Runtime::INLINE) {
- int lookup_index = static_cast<int>(function->function_id) -
- static_cast<int>(Runtime::kFirstInlineFunction);
- ASSERT(lookup_index >= 0);
- ASSERT(static_cast<size_t>(lookup_index) <
- ARRAY_SIZE(kInlineFunctionGenerators));
- InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
- (this->*generator)(args);
- return true;
- }
- return false;
-}
-
-
-// Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
-// known result for the test expression, with no side effects.
-CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
- Expression* cond) {
- if (cond == NULL) return ALWAYS_TRUE;
-
- Literal* lit = cond->AsLiteral();
- if (lit == NULL) return DONT_KNOW;
-
- if (lit->IsTrue()) {
- return ALWAYS_TRUE;
- } else if (lit->IsFalse()) {
- return ALWAYS_FALSE;
- }
-
- return DONT_KNOW;
-}
-
-
bool CodeGenerator::RecordPositions(MacroAssembler* masm,
int pos,
bool right_here) {
@@ -422,61 +204,20 @@ bool CodeGenerator::RecordPositions(MacroAssembler* masm,
}
-void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) RecordPositions(masm(), fun->start_position(), false);
-}
-
-
-void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) RecordPositions(masm(), fun->end_position() - 1, false);
-}
-
-
-void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
- if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos(), false);
-}
-
-
-void CodeGenerator::CodeForDoWhileConditionPosition(DoWhileStatement* stmt) {
- if (FLAG_debug_info)
- RecordPositions(masm(), stmt->condition_position(), false);
-}
-
-
-void CodeGenerator::CodeForSourcePosition(int pos) {
- if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
- masm()->positions_recorder()->RecordPosition(pos);
- }
-}
-
-
-const char* GenericUnaryOpStub::GetName() {
- switch (op_) {
- case Token::SUB:
- if (negative_zero_ == kStrictNegativeZero) {
- return overwrite_ == UNARY_OVERWRITE
- ? "GenericUnaryOpStub_SUB_Overwrite_Strict0"
- : "GenericUnaryOpStub_SUB_Alloc_Strict0";
- } else {
- return overwrite_ == UNARY_OVERWRITE
- ? "GenericUnaryOpStub_SUB_Overwrite_Ignore0"
- : "GenericUnaryOpStub_SUB_Alloc_Ignore0";
- }
- case Token::BIT_NOT:
- return overwrite_ == UNARY_OVERWRITE
- ? "GenericUnaryOpStub_BIT_NOT_Overwrite"
- : "GenericUnaryOpStub_BIT_NOT_Alloc";
- default:
- UNREACHABLE();
- return "<unknown>";
- }
-}
-
-
void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
switch (type_) {
- case READ_ELEMENT: GenerateReadElement(masm); break;
- case NEW_OBJECT: GenerateNewObject(masm); break;
+ case READ_ELEMENT:
+ GenerateReadElement(masm);
+ break;
+ case NEW_NON_STRICT_FAST:
+ GenerateNewNonStrictFast(masm);
+ break;
+ case NEW_NON_STRICT_SLOW:
+ GenerateNewNonStrictSlow(masm);
+ break;
+ case NEW_STRICT:
+ GenerateNewStrict(masm);
+ break;
}
}
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 23b36f07a..e551abfb1 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -54,7 +54,6 @@
// shared code:
// CodeGenerator
// ~CodeGenerator
-// ProcessDeferred
// Generate
// ComputeLazyCompile
// BuildFunctionInfo
@@ -68,7 +67,6 @@
// CodeForDoWhileConditionPosition
// CodeForSourcePosition
-enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#if V8_TARGET_ARCH_IA32
@@ -83,163 +81,4 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#error Unsupported target architecture.
#endif
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// Code generation can be nested. Code generation scopes form a stack
-// of active code generators.
-class CodeGeneratorScope BASE_EMBEDDED {
- public:
- explicit CodeGeneratorScope(CodeGenerator* cgen) {
- previous_ = top_;
- top_ = cgen;
- }
-
- ~CodeGeneratorScope() {
- top_ = previous_;
- }
-
- static CodeGenerator* Current() {
- ASSERT(top_ != NULL);
- return top_;
- }
-
- private:
- static CodeGenerator* top_;
- CodeGenerator* previous_;
-};
-
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-
-// State of used registers in a virtual frame.
-class FrameRegisterState {
- public:
- // Captures the current state of the given frame.
- explicit FrameRegisterState(VirtualFrame* frame);
-
- // Saves the state in the stack.
- void Save(MacroAssembler* masm) const;
-
- // Restores the state from the stack.
- void Restore(MacroAssembler* masm) const;
-
- private:
- // Constants indicating special actions. They should not be multiples
- // of kPointerSize so they will not collide with valid offsets from
- // the frame pointer.
- static const int kIgnore = -1;
- static const int kPush = 1;
-
- // This flag is ored with a valid offset from the frame pointer, so
- // it should fit in the low zero bits of a valid offset.
- static const int kSyncedFlag = 2;
-
- int registers_[RegisterAllocator::kNumRegisters];
-};
-
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
-
-
-class FrameRegisterState {
- public:
- inline FrameRegisterState(VirtualFrame frame) : frame_(frame) { }
-
- inline const VirtualFrame* frame() const { return &frame_; }
-
- private:
- VirtualFrame frame_;
-};
-
-#else
-
-#error Unsupported target architecture.
-
-#endif
-
-
-// RuntimeCallHelper implementation that saves/restores state of a
-// virtual frame.
-class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
- public:
- // Does not take ownership of |frame_state|.
- explicit VirtualFrameRuntimeCallHelper(const FrameRegisterState* frame_state)
- : frame_state_(frame_state) {}
-
- virtual void BeforeCall(MacroAssembler* masm) const;
-
- virtual void AfterCall(MacroAssembler* masm) const;
-
- private:
- const FrameRegisterState* frame_state_;
-};
-
-
-// Deferred code objects are small pieces of code that are compiled
-// out of line. They are used to defer the compilation of uncommon
-// paths thereby avoiding expensive jumps around uncommon code parts.
-class DeferredCode: public ZoneObject {
- public:
- DeferredCode();
- virtual ~DeferredCode() { }
-
- virtual void Generate() = 0;
-
- MacroAssembler* masm() { return masm_; }
-
- int statement_position() const { return statement_position_; }
- int position() const { return position_; }
-
- Label* entry_label() { return &entry_label_; }
- Label* exit_label() { return &exit_label_; }
-
-#ifdef DEBUG
- void set_comment(const char* comment) { comment_ = comment; }
- const char* comment() const { return comment_; }
-#else
- void set_comment(const char* comment) { }
- const char* comment() const { return ""; }
-#endif
-
- inline void Jump();
- inline void Branch(Condition cc);
- void BindExit() { masm_->bind(&exit_label_); }
-
- const FrameRegisterState* frame_state() const { return &frame_state_; }
-
- void SaveRegisters();
- void RestoreRegisters();
- void Exit();
-
- // If this returns true then all registers will be saved for the duration
- // of the Generate() call. Otherwise the registers are not saved and the
- // Generate() call must bracket runtime any runtime calls with calls to
- // SaveRegisters() and RestoreRegisters(). In this case the Generate
- // method must also call Exit() in order to return to the non-deferred
- // code.
- virtual bool AutoSaveAndRestore() { return true; }
-
- protected:
- MacroAssembler* masm_;
-
- private:
- int statement_position_;
- int position_;
-
- Label entry_label_;
- Label exit_label_;
-
- FrameRegisterState frame_state_;
-
-#ifdef DEBUG
- const char* comment_;
-#endif
- DISALLOW_COPY_AND_ASSIGN(DeferredCode);
-};
-
-
-} } // namespace v8::internal
-
#endif // V8_CODEGEN_H_
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index cccb7a4f2..5bd8bf31c 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -33,8 +33,6 @@
namespace v8 {
namespace internal {
-// The number of sub caches covering the different types to cache.
-static const int kSubCacheCount = 4;
// The number of generations for each sub cache.
// The number of ScriptGenerations is carefully chosen based on histograms.
@@ -47,162 +45,32 @@ static const int kRegExpGenerations = 2;
// Initial size of each compilation cache table allocated.
static const int kInitialCacheSize = 64;
-// Index for the first generation in the cache.
-static const int kFirstGeneration = 0;
-
-// The compilation cache consists of several generational sub-caches which uses
-// this class as a base class. A sub-cache contains a compilation cache tables
-// for each generation of the sub-cache. Since the same source code string has
-// different compiled code for scripts and evals, we use separate sub-caches
-// for different compilation modes, to avoid retrieving the wrong result.
-class CompilationSubCache {
- public:
- explicit CompilationSubCache(int generations): generations_(generations) {
- tables_ = NewArray<Object*>(generations);
- }
-
- ~CompilationSubCache() { DeleteArray(tables_); }
-
- // Get the compilation cache tables for a specific generation.
- Handle<CompilationCacheTable> GetTable(int generation);
- // Accessors for first generation.
- Handle<CompilationCacheTable> GetFirstTable() {
- return GetTable(kFirstGeneration);
+CompilationCache::CompilationCache(Isolate* isolate)
+ : isolate_(isolate),
+ script_(isolate, kScriptGenerations),
+ eval_global_(isolate, kEvalGlobalGenerations),
+ eval_contextual_(isolate, kEvalContextualGenerations),
+ reg_exp_(isolate, kRegExpGenerations),
+ enabled_(true),
+ eager_optimizing_set_(NULL) {
+ CompilationSubCache* subcaches[kSubCacheCount] =
+ {&script_, &eval_global_, &eval_contextual_, &reg_exp_};
+ for (int i = 0; i < kSubCacheCount; ++i) {
+ subcaches_[i] = subcaches[i];
}
- void SetFirstTable(Handle<CompilationCacheTable> value) {
- ASSERT(kFirstGeneration < generations_);
- tables_[kFirstGeneration] = *value;
- }
-
- // Age the sub-cache by evicting the oldest generation and creating a new
- // young generation.
- void Age();
-
- // GC support.
- void Iterate(ObjectVisitor* v);
- void IterateFunctions(ObjectVisitor* v);
-
- // Clear this sub-cache evicting all its content.
- void Clear();
-
- // Remove given shared function info from sub-cache.
- void Remove(Handle<SharedFunctionInfo> function_info);
-
- // Number of generations in this sub-cache.
- inline int generations() { return generations_; }
-
- private:
- int generations_; // Number of generations.
- Object** tables_; // Compilation cache tables - one for each generation.
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
-};
-
-
-// Sub-cache for scripts.
-class CompilationCacheScript : public CompilationSubCache {
- public:
- explicit CompilationCacheScript(int generations)
- : CompilationSubCache(generations) { }
-
- Handle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset);
- void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
-
- private:
- MUST_USE_RESULT MaybeObject* TryTablePut(
- Handle<String> source, Handle<SharedFunctionInfo> function_info);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(
- Handle<String> source, Handle<SharedFunctionInfo> function_info);
-
- bool HasOrigin(Handle<SharedFunctionInfo> function_info,
- Handle<Object> name,
- int line_offset,
- int column_offset);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
-};
-
-
-// Sub-cache for eval scripts.
-class CompilationCacheEval: public CompilationSubCache {
- public:
- explicit CompilationCacheEval(int generations)
- : CompilationSubCache(generations) { }
-
- Handle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Context> context,
- StrictModeFlag strict_mode);
-
- void Put(Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
- private:
- MUST_USE_RESULT MaybeObject* TryTablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(
- Handle<String> source,
- Handle<Context> context,
- Handle<SharedFunctionInfo> function_info);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
-};
-
-
-// Sub-cache for regular expressions.
-class CompilationCacheRegExp: public CompilationSubCache {
- public:
- explicit CompilationCacheRegExp(int generations)
- : CompilationSubCache(generations) { }
-
- Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
-
- void Put(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
- private:
- MUST_USE_RESULT MaybeObject* TryTablePut(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
-
- // Note: Returns a new hash table if operation results in expansion.
- Handle<CompilationCacheTable> TablePut(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
-};
-
-
-// Statically allocate all the sub-caches.
-static CompilationCacheScript script(kScriptGenerations);
-static CompilationCacheEval eval_global(kEvalGlobalGenerations);
-static CompilationCacheEval eval_contextual(kEvalContextualGenerations);
-static CompilationCacheRegExp reg_exp(kRegExpGenerations);
-static CompilationSubCache* subcaches[kSubCacheCount] =
- {&script, &eval_global, &eval_contextual, &reg_exp};
+}
-// Current enable state of the compilation cache.
-static bool enabled = true;
-static inline bool IsEnabled() {
- return FLAG_compilation_cache && enabled;
+CompilationCache::~CompilationCache() {
+ delete eager_optimizing_set_;
+ eager_optimizing_set_ = NULL;
}
-static Handle<CompilationCacheTable> AllocateTable(int size) {
- CALL_HEAP_FUNCTION(CompilationCacheTable::Allocate(size),
+static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) {
+ CALL_HEAP_FUNCTION(isolate,
+ CompilationCacheTable::Allocate(size),
CompilationCacheTable);
}
@@ -211,17 +79,16 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
ASSERT(generation < generations_);
Handle<CompilationCacheTable> result;
if (tables_[generation]->IsUndefined()) {
- result = AllocateTable(kInitialCacheSize);
+ result = AllocateTable(isolate(), kInitialCacheSize);
tables_[generation] = *result;
} else {
CompilationCacheTable* table =
CompilationCacheTable::cast(tables_[generation]);
- result = Handle<CompilationCacheTable>(table);
+ result = Handle<CompilationCacheTable>(table, isolate());
}
return result;
}
-
void CompilationSubCache::Age() {
// Age the generations implicitly killing off the oldest.
for (int i = generations_ - 1; i > 0; i--) {
@@ -229,12 +96,12 @@ void CompilationSubCache::Age() {
}
// Set the first generation as unborn.
- tables_[0] = Heap::undefined_value();
+ tables_[0] = isolate()->heap()->undefined_value();
}
void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
- Object* undefined = Heap::raw_unchecked_undefined_value();
+ Object* undefined = isolate()->heap()->raw_unchecked_undefined_value();
for (int i = 0; i < generations_; i++) {
if (tables_[i] != undefined) {
reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
@@ -249,14 +116,14 @@ void CompilationSubCache::Iterate(ObjectVisitor* v) {
void CompilationSubCache::Clear() {
- MemsetPointer(tables_, Heap::undefined_value(), generations_);
+ MemsetPointer(tables_, isolate()->heap()->undefined_value(), generations_);
}
void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
- { HandleScope scope;
+ { HandleScope scope(isolate());
for (int generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
table->Remove(*function_info);
@@ -265,6 +132,13 @@ void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
}
+CompilationCacheScript::CompilationCacheScript(Isolate* isolate,
+ int generations)
+ : CompilationSubCache(isolate, generations),
+ script_histogram_(NULL),
+ script_histogram_initialized_(false) { }
+
+
// We only re-use a cached function for some script source code if the
// script originates from the same place. This is to avoid issues
// when reporting errors, etc.
@@ -274,7 +148,7 @@ bool CompilationCacheScript::HasOrigin(
int line_offset,
int column_offset) {
Handle<Script> script =
- Handle<Script>(Script::cast(function_info->script()));
+ Handle<Script>(Script::cast(function_info->script()), isolate());
// If the script name isn't set, the boilerplate script should have
// an undefined name to have the same origin.
if (name.is_null()) {
@@ -303,10 +177,10 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
- { HandleScope scope;
+ { HandleScope scope(isolate());
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
- Handle<Object> probe(table->Lookup(*source));
+ Handle<Object> probe(table->Lookup(*source), isolate());
if (probe->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> function_info =
Handle<SharedFunctionInfo>::cast(probe);
@@ -320,30 +194,34 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
}
}
- static void* script_histogram = StatsTable::CreateHistogram(
- "V8.ScriptCache",
- 0,
- kScriptGenerations,
- kScriptGenerations + 1);
+ if (!script_histogram_initialized_) {
+ script_histogram_ = isolate()->stats_table()->CreateHistogram(
+ "V8.ScriptCache",
+ 0,
+ kScriptGenerations,
+ kScriptGenerations + 1);
+ script_histogram_initialized_ = true;
+ }
- if (script_histogram != NULL) {
+ if (script_histogram_ != NULL) {
// The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss.
- StatsTable::AddHistogramSample(script_histogram, generation);
+ isolate()->stats_table()->AddHistogramSample(script_histogram_, generation);
}
// Once outside the manacles of the handle scope, we need to recheck
// to see if we actually found a cached script. If so, we return a
// handle created in the caller's handle scope.
if (result != NULL) {
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
+ Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result),
+ isolate());
ASSERT(HasOrigin(shared, name, line_offset, column_offset));
// If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache.
if (generation != 0) Put(source, shared);
- Counters::compilation_cache_hits.Increment();
+ isolate()->counters()->compilation_cache_hits()->Increment();
return shared;
} else {
- Counters::compilation_cache_misses.Increment();
+ isolate()->counters()->compilation_cache_misses()->Increment();
return Handle<SharedFunctionInfo>::null();
}
}
@@ -360,13 +238,15 @@ MaybeObject* CompilationCacheScript::TryTablePut(
Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
Handle<String> source,
Handle<SharedFunctionInfo> function_info) {
- CALL_HEAP_FUNCTION(TryTablePut(source, function_info), CompilationCacheTable);
+ CALL_HEAP_FUNCTION(isolate(),
+ TryTablePut(source, function_info),
+ CompilationCacheTable);
}
void CompilationCacheScript::Put(Handle<String> source,
Handle<SharedFunctionInfo> function_info) {
- HandleScope scope;
+ HandleScope scope(isolate());
SetFirstTable(TablePut(source, function_info));
}
@@ -380,7 +260,7 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
// having cleared the cache.
Object* result = NULL;
int generation;
- { HandleScope scope;
+ { HandleScope scope(isolate());
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupEval(*source, *context, strict_mode);
@@ -391,14 +271,14 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
}
if (result->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo>
- function_info(SharedFunctionInfo::cast(result));
+ function_info(SharedFunctionInfo::cast(result), isolate());
if (generation != 0) {
Put(source, context, function_info);
}
- Counters::compilation_cache_hits.Increment();
+ isolate()->counters()->compilation_cache_hits()->Increment();
return function_info;
} else {
- Counters::compilation_cache_misses.Increment();
+ isolate()->counters()->compilation_cache_misses()->Increment();
return Handle<SharedFunctionInfo>::null();
}
}
@@ -417,7 +297,8 @@ Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
- CALL_HEAP_FUNCTION(TryTablePut(source, context, function_info),
+ CALL_HEAP_FUNCTION(isolate(),
+ TryTablePut(source, context, function_info),
CompilationCacheTable);
}
@@ -425,7 +306,7 @@ Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
void CompilationCacheEval::Put(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
- HandleScope scope;
+ HandleScope scope(isolate());
SetFirstTable(TablePut(source, context, function_info));
}
@@ -437,7 +318,7 @@ Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
// having cleared the cache.
Object* result = NULL;
int generation;
- { HandleScope scope;
+ { HandleScope scope(isolate());
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupRegExp(*source, flags);
@@ -447,14 +328,14 @@ Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
}
}
if (result->IsFixedArray()) {
- Handle<FixedArray> data(FixedArray::cast(result));
+ Handle<FixedArray> data(FixedArray::cast(result), isolate());
if (generation != 0) {
Put(source, flags, data);
}
- Counters::compilation_cache_hits.Increment();
+ isolate()->counters()->compilation_cache_hits()->Increment();
return data;
} else {
- Counters::compilation_cache_misses.Increment();
+ isolate()->counters()->compilation_cache_misses()->Increment();
return Handle<FixedArray>::null();
}
}
@@ -473,14 +354,16 @@ Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut(
Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
- CALL_HEAP_FUNCTION(TryTablePut(source, flags, data), CompilationCacheTable);
+ CALL_HEAP_FUNCTION(isolate(),
+ TryTablePut(source, flags, data),
+ CompilationCacheTable);
}
void CompilationCacheRegExp::Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
- HandleScope scope;
+ HandleScope scope(isolate());
SetFirstTable(TablePut(source, flags, data));
}
@@ -488,9 +371,9 @@ void CompilationCacheRegExp::Put(Handle<String> source,
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) return;
- eval_global.Remove(function_info);
- eval_contextual.Remove(function_info);
- script.Remove(function_info);
+ eval_global_.Remove(function_info);
+ eval_contextual_.Remove(function_info);
+ script_.Remove(function_info);
}
@@ -502,7 +385,7 @@ Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
return Handle<SharedFunctionInfo>::null();
}
- return script.Lookup(source, name, line_offset, column_offset);
+ return script_.Lookup(source, name, line_offset, column_offset);
}
@@ -517,9 +400,9 @@ Handle<SharedFunctionInfo> CompilationCache::LookupEval(
Handle<SharedFunctionInfo> result;
if (is_global) {
- result = eval_global.Lookup(source, context, strict_mode);
+ result = eval_global_.Lookup(source, context, strict_mode);
} else {
- result = eval_contextual.Lookup(source, context, strict_mode);
+ result = eval_contextual_.Lookup(source, context, strict_mode);
}
return result;
}
@@ -531,7 +414,7 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
return Handle<FixedArray>::null();
}
- return reg_exp.Lookup(source, flags);
+ return reg_exp_.Lookup(source, flags);
}
@@ -541,7 +424,7 @@ void CompilationCache::PutScript(Handle<String> source,
return;
}
- script.Put(source, function_info);
+ script_.Put(source, function_info);
}
@@ -553,11 +436,11 @@ void CompilationCache::PutEval(Handle<String> source,
return;
}
- HandleScope scope;
+ HandleScope scope(isolate());
if (is_global) {
- eval_global.Put(source, context, function_info);
+ eval_global_.Put(source, context, function_info);
} else {
- eval_contextual.Put(source, context, function_info);
+ eval_contextual_.Put(source, context, function_info);
}
}
@@ -570,7 +453,7 @@ void CompilationCache::PutRegExp(Handle<String> source,
return;
}
- reg_exp.Put(source, flags, data);
+ reg_exp_.Put(source, flags, data);
}
@@ -579,9 +462,11 @@ static bool SourceHashCompare(void* key1, void* key2) {
}
-static HashMap* EagerOptimizingSet() {
- static HashMap map(&SourceHashCompare);
- return &map;
+HashMap* CompilationCache::EagerOptimizingSet() {
+ if (eager_optimizing_set_ == NULL) {
+ eager_optimizing_set_ = new HashMap(&SourceHashCompare);
+ }
+ return eager_optimizing_set_;
}
@@ -615,38 +500,39 @@ void CompilationCache::ResetEagerOptimizingData() {
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
- subcaches[i]->Clear();
+ subcaches_[i]->Clear();
}
}
+
void CompilationCache::Iterate(ObjectVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) {
- subcaches[i]->Iterate(v);
+ subcaches_[i]->Iterate(v);
}
}
void CompilationCache::IterateFunctions(ObjectVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) {
- subcaches[i]->IterateFunctions(v);
+ subcaches_[i]->IterateFunctions(v);
}
}
void CompilationCache::MarkCompactPrologue() {
for (int i = 0; i < kSubCacheCount; i++) {
- subcaches[i]->Age();
+ subcaches_[i]->Age();
}
}
void CompilationCache::Enable() {
- enabled = true;
+ enabled_ = true;
}
void CompilationCache::Disable() {
- enabled = false;
+ enabled_ = false;
Clear();
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index f779a23aa..887d4e84e 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -31,6 +31,152 @@
namespace v8 {
namespace internal {
+class HashMap;
+
+// The compilation cache consists of several generational sub-caches which uses
+// this class as a base class. A sub-cache contains a compilation cache tables
+// for each generation of the sub-cache. Since the same source code string has
+// different compiled code for scripts and evals, we use separate sub-caches
+// for different compilation modes, to avoid retrieving the wrong result.
+class CompilationSubCache {
+ public:
+ CompilationSubCache(Isolate* isolate, int generations)
+ : isolate_(isolate),
+ generations_(generations) {
+ tables_ = NewArray<Object*>(generations);
+ }
+
+ ~CompilationSubCache() { DeleteArray(tables_); }
+
+ // Index for the first generation in the cache.
+ static const int kFirstGeneration = 0;
+
+ // Get the compilation cache tables for a specific generation.
+ Handle<CompilationCacheTable> GetTable(int generation);
+
+ // Accessors for first generation.
+ Handle<CompilationCacheTable> GetFirstTable() {
+ return GetTable(kFirstGeneration);
+ }
+ void SetFirstTable(Handle<CompilationCacheTable> value) {
+ ASSERT(kFirstGeneration < generations_);
+ tables_[kFirstGeneration] = *value;
+ }
+
+ // Age the sub-cache by evicting the oldest generation and creating a new
+ // young generation.
+ void Age();
+
+ // GC support.
+ void Iterate(ObjectVisitor* v);
+ void IterateFunctions(ObjectVisitor* v);
+
+ // Clear this sub-cache evicting all its content.
+ void Clear();
+
+ // Remove given shared function info from sub-cache.
+ void Remove(Handle<SharedFunctionInfo> function_info);
+
+ // Number of generations in this sub-cache.
+ inline int generations() { return generations_; }
+
+ protected:
+ Isolate* isolate() { return isolate_; }
+
+ private:
+ Isolate* isolate_;
+ int generations_; // Number of generations.
+ Object** tables_; // Compilation cache tables - one for each generation.
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
+};
+
+
+// Sub-cache for scripts.
+class CompilationCacheScript : public CompilationSubCache {
+ public:
+ CompilationCacheScript(Isolate* isolate, int generations);
+
+ Handle<SharedFunctionInfo> Lookup(Handle<String> source,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset);
+ void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
+
+ private:
+ MUST_USE_RESULT MaybeObject* TryTablePut(
+ Handle<String> source, Handle<SharedFunctionInfo> function_info);
+
+ // Note: Returns a new hash table if operation results in expansion.
+ Handle<CompilationCacheTable> TablePut(
+ Handle<String> source, Handle<SharedFunctionInfo> function_info);
+
+ bool HasOrigin(Handle<SharedFunctionInfo> function_info,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset);
+
+ void* script_histogram_;
+ bool script_histogram_initialized_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
+};
+
+
+// Sub-cache for eval scripts.
+class CompilationCacheEval: public CompilationSubCache {
+ public:
+ CompilationCacheEval(Isolate* isolate, int generations)
+ : CompilationSubCache(isolate, generations) { }
+
+ Handle<SharedFunctionInfo> Lookup(Handle<String> source,
+ Handle<Context> context,
+ StrictModeFlag strict_mode);
+
+ void Put(Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
+
+ private:
+ MUST_USE_RESULT MaybeObject* TryTablePut(
+ Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
+
+ // Note: Returns a new hash table if operation results in expansion.
+ Handle<CompilationCacheTable> TablePut(
+ Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
+};
+
+
+// Sub-cache for regular expressions.
+class CompilationCacheRegExp: public CompilationSubCache {
+ public:
+ CompilationCacheRegExp(Isolate* isolate, int generations)
+ : CompilationSubCache(isolate, generations) { }
+
+ Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
+
+ void Put(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data);
+ private:
+ MUST_USE_RESULT MaybeObject* TryTablePut(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data);
+
+ // Note: Returns a new hash table if operation results in expansion.
+ Handle<CompilationCacheTable> TablePut(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
+};
+
// The compilation cache keeps shared function infos for compiled
// scripts and evals. The shared function infos are looked up using
@@ -41,69 +187,98 @@ class CompilationCache {
// Finds the script shared function info for a source
// string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin.
- static Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset);
+ Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
// contain a script for the given source string.
- static Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- StrictModeFlag strict_mode);
+ Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
+ Handle<Context> context,
+ bool is_global,
+ StrictModeFlag strict_mode);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
- static Handle<FixedArray> LookupRegExp(Handle<String> source,
- JSRegExp::Flags flags);
+ Handle<FixedArray> LookupRegExp(Handle<String> source,
+ JSRegExp::Flags flags);
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
- static void PutScript(Handle<String> source,
- Handle<SharedFunctionInfo> function_info);
+ void PutScript(Handle<String> source,
+ Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple
// with the shared function info. This may overwrite an existing mapping.
- static void PutEval(Handle<String> source,
- Handle<Context> context,
- bool is_global,
- Handle<SharedFunctionInfo> function_info);
+ void PutEval(Handle<String> source,
+ Handle<Context> context,
+ bool is_global,
+ Handle<SharedFunctionInfo> function_info);
// Associate the (source, flags) pair to the given regexp data.
// This may overwrite an existing mapping.
- static void PutRegExp(Handle<String> source,
- JSRegExp::Flags flags,
- Handle<FixedArray> data);
+ void PutRegExp(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data);
// Support for eager optimization tracking.
- static bool ShouldOptimizeEagerly(Handle<JSFunction> function);
- static void MarkForEagerOptimizing(Handle<JSFunction> function);
- static void MarkForLazyOptimizing(Handle<JSFunction> function);
+ bool ShouldOptimizeEagerly(Handle<JSFunction> function);
+ void MarkForEagerOptimizing(Handle<JSFunction> function);
+ void MarkForLazyOptimizing(Handle<JSFunction> function);
// Reset the eager optimization tracking data.
- static void ResetEagerOptimizingData();
+ void ResetEagerOptimizingData();
// Clear the cache - also used to initialize the cache at startup.
- static void Clear();
+ void Clear();
// Remove given shared function info from all caches.
- static void Remove(Handle<SharedFunctionInfo> function_info);
+ void Remove(Handle<SharedFunctionInfo> function_info);
// GC support.
- static void Iterate(ObjectVisitor* v);
- static void IterateFunctions(ObjectVisitor* v);
+ void Iterate(ObjectVisitor* v);
+ void IterateFunctions(ObjectVisitor* v);
// Notify the cache that a mark-sweep garbage collection is about to
// take place. This is used to retire entries from the cache to
// avoid keeping them alive too long without using them.
- static void MarkCompactPrologue();
+ void MarkCompactPrologue();
// Enable/disable compilation cache. Used by debugger to disable compilation
// cache during debugging to make sure new scripts are always compiled.
- static void Enable();
- static void Disable();
+ void Enable();
+ void Disable();
+ private:
+ explicit CompilationCache(Isolate* isolate);
+ ~CompilationCache();
+
+ HashMap* EagerOptimizingSet();
+
+ // The number of sub caches covering the different types to cache.
+ static const int kSubCacheCount = 4;
+
+ bool IsEnabled() { return FLAG_compilation_cache && enabled_; }
+
+ Isolate* isolate() { return isolate_; }
+
+ Isolate* isolate_;
+
+ CompilationCacheScript script_;
+ CompilationCacheEval eval_global_;
+ CompilationCacheEval eval_contextual_;
+ CompilationCacheRegExp reg_exp_;
+ CompilationSubCache* subcaches_[kSubCacheCount];
+
+ // Current enable state of the compilation cache.
+ bool enabled_;
+
+ HashMap* eager_optimizing_set_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilationCache);
};
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 18f54c2af..f8d1b3de6 100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -30,9 +30,8 @@
#include "compiler.h"
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "compilation-cache.h"
-#include "data-flow.h"
#include "debug.h"
#include "full-codegen.h"
#include "gdb-jit.h"
@@ -51,7 +50,8 @@ namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script)
- : flags_(0),
+ : isolate_(script->GetIsolate()),
+ flags_(0),
function_(NULL),
scope_(NULL),
script_(script),
@@ -64,7 +64,8 @@ CompilationInfo::CompilationInfo(Handle<Script> script)
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
- : flags_(IsLazy::encode(true)),
+ : isolate_(shared_info->GetIsolate()),
+ flags_(IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
shared_info_(shared_info),
@@ -78,7 +79,8 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
- : flags_(IsLazy::encode(true)),
+ : isolate_(closure->GetIsolate()),
+ flags_(IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
closure_(closure),
@@ -92,22 +94,23 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
}
+// Disable optimization for the rest of the compilation pipeline.
void CompilationInfo::DisableOptimization() {
- if (FLAG_optimize_closures) {
- // If we allow closures optimizations and it's an optimizable closure
- // mark it correspondingly.
- bool is_closure = closure_.is_null() && !scope_->HasTrivialOuterContext();
- if (is_closure) {
- bool is_optimizable_closure =
- !scope_->outer_scope_calls_eval() && !scope_->inside_with();
- if (is_optimizable_closure) {
- SetMode(BASE);
- return;
- }
- }
- }
+ bool is_optimizable_closure =
+ FLAG_optimize_closures &&
+ closure_.is_null() &&
+ !scope_->HasTrivialOuterContext() &&
+ !scope_->outer_scope_calls_non_strict_eval() &&
+ !scope_->inside_with();
+ SetMode(is_optimizable_closure ? BASE : NONOPT);
+}
- SetMode(NONOPT);
+
+void CompilationInfo::AbortOptimization() {
+ Handle<Code> code(shared_info()->code());
+ SetCode(code);
+ Isolate* isolate = code->GetIsolate();
+ isolate->compilation_cache()->MarkForLazyOptimizing(closure());
}
@@ -119,19 +122,23 @@ void CompilationInfo::DisableOptimization() {
// all. However crankshaft support recompilation of functions, so in this case
// the full compiler need not be be used if a debugger is attached, but only if
// break points has actually been set.
-static bool AlwaysFullCompiler() {
+static bool is_debugging_active() {
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (V8::UseCrankshaft()) {
- return FLAG_always_full_compiler || Debug::has_break_points();
- } else {
- return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
- }
+ Isolate* isolate = Isolate::Current();
+ return V8::UseCrankshaft() ?
+ isolate->debug()->has_break_points() :
+ isolate->debugger()->IsDebuggerActive();
#else
- return FLAG_always_full_compiler;
+ return false;
#endif
}
+static bool AlwaysFullCompiler() {
+ return FLAG_always_full_compiler || is_debugging_active();
+}
+
+
static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
@@ -158,29 +165,6 @@ static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
}
-static void AbortAndDisable(CompilationInfo* info) {
- // Disable optimization for the shared function info and mark the
- // code as non-optimizable. The marker on the shared function info
- // is there because we flush non-optimized code thereby loosing the
- // non-optimizable information for the code. When the code is
- // regenerated and set on the shared function info it is marked as
- // non-optimizable if optimization is disabled for the shared
- // function info.
- Handle<SharedFunctionInfo> shared = info->shared_info();
- shared->set_optimization_disabled(true);
- Handle<Code> code = Handle<Code>(shared->code());
- ASSERT(code->kind() == Code::FUNCTION);
- code->set_optimizable(false);
- info->SetCode(code);
- if (FLAG_trace_opt) {
- PrintF("[disabled optimization for: ");
- info->closure()->PrintName();
- PrintF(" / %" V8PRIxPTR "]\n",
- reinterpret_cast<intptr_t>(*info->closure()));
- }
-}
-
-
static bool MakeCrankshaftCode(CompilationInfo* info) {
// Test if we can optimize this function when asked to. We can only
// do this after the scopes are computed.
@@ -197,6 +181,10 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
Handle<Code> code(info->shared_info()->code());
ASSERT(code->kind() == Code::FUNCTION);
+ // We should never arrive here if optimization has been disabled on the
+ // shared function info.
+ ASSERT(!info->shared_info()->optimization_disabled());
+
// Fall back to using the full code generator if it's not possible
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
@@ -210,7 +198,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
if (info->shared_info()->opt_count() > kMaxOptCount) {
- AbortAndDisable(info);
+ info->AbortOptimization();
+ Handle<JSFunction> closure = info->closure();
+ info->shared_info()->DisableOptimization(*closure);
// True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code.
return true;
@@ -229,7 +219,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
if ((scope->num_parameters() + 1) > parameter_limit ||
(info->osr_ast_id() != AstNode::kNoNumber &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) {
- AbortAndDisable(info);
+ info->AbortOptimization();
+ Handle<JSFunction> closure = info->closure();
+ info->shared_info()->DisableOptimization(*closure);
// True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code.
return true;
@@ -250,7 +242,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// performance of the hydrogen-based compiler.
int64_t start = OS::Ticks();
bool should_recompile = !info->shared_info()->has_deoptimization_support();
- if (should_recompile || FLAG_time_hydrogen) {
+ if (should_recompile || FLAG_hydrogen_stats) {
HPhase phase(HPhase::kFullCodeGen);
CompilationInfo unoptimized(info->shared_info());
// Note that we use the same AST that we will use for generating the
@@ -283,18 +275,18 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
HTracer::Instance()->TraceCompilation(info->function());
}
- TypeFeedbackOracle oracle(
- code, Handle<Context>(info->closure()->context()->global_context()));
- HGraphBuilder builder(&oracle);
+ Handle<Context> global_context(info->closure()->context()->global_context());
+ TypeFeedbackOracle oracle(code, global_context);
+ HGraphBuilder builder(info, &oracle);
HPhase phase(HPhase::kTotal);
- HGraph* graph = builder.CreateGraph(info);
- if (Top::has_pending_exception()) {
+ HGraph* graph = builder.CreateGraph();
+ if (info->isolate()->has_pending_exception()) {
info->SetCode(Handle<Code>::null());
return false;
}
if (graph != NULL && FLAG_build_lithium) {
- Handle<Code> optimized_code = graph->Compile();
+ Handle<Code> optimized_code = graph->Compile(info);
if (!optimized_code.is_null()) {
info->SetCode(optimized_code);
FinishOptimization(info->closure(), start);
@@ -302,49 +294,32 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
}
- // Compilation with the Hydrogen compiler failed. Keep using the
- // shared code but mark it as unoptimizable.
- AbortAndDisable(info);
+ // Keep using the shared code.
+ info->AbortOptimization();
+ if (!builder.inline_bailout()) {
+ // Mark the shared code as unoptimizable unless it was an inlined
+ // function that bailed out.
+ Handle<JSFunction> closure = info->closure();
+ info->shared_info()->DisableOptimization(*closure);
+ }
// True indicates the compilation pipeline is still going, not necessarily
// that we optimized the code.
return true;
}
+static bool GenerateCode(CompilationInfo* info) {
+ return V8::UseCrankshaft() ?
+ MakeCrankshaftCode(info) :
+ FullCodeGenerator::MakeCode(info);
+}
+
+
static bool MakeCode(CompilationInfo* info) {
// Precondition: code has been parsed. Postcondition: the code field in
// the compilation info is set if compilation succeeded.
ASSERT(info->function() != NULL);
-
- if (Rewriter::Rewrite(info) && Scope::Analyze(info)) {
- if (V8::UseCrankshaft()) return MakeCrankshaftCode(info);
-
- // Generate code and return it. Code generator selection is governed by
- // which backends are enabled and whether the function is considered
- // run-once code or not.
- //
- // --full-compiler enables the dedicated backend for code we expect to
- // be run once
- //
- // The normal choice of backend can be overridden with the flags
- // --always-full-compiler.
- if (Rewriter::Analyze(info)) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
- bool is_run_once = (shared.is_null())
- ? info->scope()->is_global_scope()
- : (shared->is_toplevel() || shared->try_full_codegen());
- bool can_use_full =
- FLAG_full_compiler && !info->function()->contains_loops();
- if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
- return FullCodeGenerator::MakeCode(info);
- } else {
- return AssignedVariablesAnalyzer::Analyze(info) &&
- CodeGenerator::MakeCode(info);
- }
- }
- }
-
- return false;
+ return Rewriter::Rewrite(info) && Scope::Analyze(info) && GenerateCode(info);
}
@@ -364,13 +339,13 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
- CompilationZoneScope zone_scope(DELETE_ON_EXIT);
-
- PostponeInterruptsScope postpone;
+ Isolate* isolate = info->isolate();
+ ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ PostponeInterruptsScope postpone(isolate);
- ASSERT(!i::Top::global_context().is_null());
+ ASSERT(!isolate->global_context().is_null());
Handle<Script> script = info->script();
- script->set_context_data((*i::Top::global_context())->data());
+ script->set_context_data((*isolate->global_context())->data());
#ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) {
@@ -379,19 +354,20 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// For eval scripts add information on the function from which eval was
// called.
if (info->is_eval()) {
- StackTraceFrameIterator it;
+ StackTraceFrameIterator it(isolate);
if (!it.done()) {
script->set_eval_from_shared(
JSFunction::cast(it.frame()->function())->shared());
+ Code* code = it.frame()->LookupCode();
int offset = static_cast<int>(
- it.frame()->pc() - it.frame()->code()->instruction_start());
+ it.frame()->pc() - code->instruction_start());
script->set_eval_from_instructions_offset(Smi::FromInt(offset));
}
}
}
// Notify debugger
- Debugger::OnBeforeCompile(script);
+ isolate->debugger()->OnBeforeCompile(script);
#endif
// Only allow non-global compiles for eval.
@@ -403,22 +379,22 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// rest of the function into account to avoid overlap with the
// parsing statistics.
HistogramTimer* rate = info->is_eval()
- ? &Counters::compile_eval
- : &Counters::compile;
+ ? info->isolate()->counters()->compile_eval()
+ : info->isolate()->counters()->compile();
HistogramTimerScope timer(rate);
// Compile the code.
FunctionLiteral* lit = info->function();
- LiveEditFunctionTracker live_edit_tracker(lit);
+ LiveEditFunctionTracker live_edit_tracker(isolate, lit);
if (!MakeCode(info)) {
- Top::StackOverflow();
+ isolate->StackOverflow();
return Handle<SharedFunctionInfo>::null();
}
// Allocate function.
ASSERT(!info->code().is_null());
Handle<SharedFunctionInfo> result =
- Factory::NewSharedFunctionInfo(
+ isolate->factory()->NewSharedFunctionInfo(
lit->name(),
lit->materialized_literal_count(),
info->code(),
@@ -428,7 +404,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Compiler::SetFunctionInfo(result, lit, true, script);
if (script->name()->IsString()) {
- PROFILE(CodeCreateEvent(
+ PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
@@ -439,13 +415,13 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
script,
info->code()));
} else {
- PROFILE(CodeCreateEvent(
+ PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
- Heap::empty_string()));
+ isolate->heap()->empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code()));
}
@@ -456,7 +432,8 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
- Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS);
+ isolate->debugger()->OnAfterCompile(
+ script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif
live_edit_tracker.RecordFunctionInfo(result, lit);
@@ -473,20 +450,23 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
ScriptDataImpl* input_pre_data,
Handle<Object> script_data,
NativesFlag natives) {
+ Isolate* isolate = source->GetIsolate();
int source_length = source->length();
- Counters::total_load_size.Increment(source_length);
- Counters::total_compile_size.Increment(source_length);
+ isolate->counters()->total_load_size()->Increment(source_length);
+ isolate->counters()->total_compile_size()->Increment(source_length);
// The VM is in the COMPILER state until exiting this function.
- VMState state(COMPILER);
+ VMState state(isolate, COMPILER);
+
+ CompilationCache* compilation_cache = isolate->compilation_cache();
// Do a lookup in the compilation cache but not for extensions.
Handle<SharedFunctionInfo> result;
if (extension == NULL) {
- result = CompilationCache::LookupScript(source,
- script_name,
- line_offset,
- column_offset);
+ result = compilation_cache->LookupScript(source,
+ script_name,
+ line_offset,
+ column_offset);
}
if (result.is_null()) {
@@ -512,7 +492,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
}
// Create a script object describing the script to be compiled.
- Handle<Script> script = Factory::NewScript(source);
+ Handle<Script> script = FACTORY->NewScript(source);
if (natives == NATIVES_CODE) {
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
}
@@ -522,7 +502,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
script->set_column_offset(Smi::FromInt(column_offset));
}
- script->set_data(script_data.is_null() ? Heap::undefined_value()
+ script->set_data(script_data.is_null() ? HEAP->undefined_value()
: *script_data);
// Compile the function and add it to the cache.
@@ -530,9 +510,13 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
info.MarkAsGlobal();
info.SetExtension(extension);
info.SetPreParseData(pre_data);
+ if (natives == NATIVES_CODE) {
+ info.MarkAsAllowingNativesSyntax();
+ info.MarkAsNative();
+ }
result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) {
- CompilationCache::PutScript(source, result);
+ compilation_cache->PutScript(source, result);
}
// Get rid of the pre-parsing data (if necessary).
@@ -541,7 +525,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
}
}
- if (result.is_null()) Top::ReportPendingMessages();
+ if (result.is_null()) isolate->ReportPendingMessages();
return result;
}
@@ -550,36 +534,39 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
Handle<Context> context,
bool is_global,
StrictModeFlag strict_mode) {
+ Isolate* isolate = source->GetIsolate();
int source_length = source->length();
- Counters::total_eval_size.Increment(source_length);
- Counters::total_compile_size.Increment(source_length);
+ isolate->counters()->total_eval_size()->Increment(source_length);
+ isolate->counters()->total_compile_size()->Increment(source_length);
// The VM is in the COMPILER state until exiting this function.
- VMState state(COMPILER);
+ VMState state(isolate, COMPILER);
// Do a lookup in the compilation cache; if the entry is not there, invoke
// the compiler and add the result to the cache.
Handle<SharedFunctionInfo> result;
- result = CompilationCache::LookupEval(source,
- context,
- is_global,
- strict_mode);
+ CompilationCache* compilation_cache = isolate->compilation_cache();
+ result = compilation_cache->LookupEval(source,
+ context,
+ is_global,
+ strict_mode);
if (result.is_null()) {
// Create a script object describing the script to be compiled.
- Handle<Script> script = Factory::NewScript(source);
+ Handle<Script> script = isolate->factory()->NewScript(source);
CompilationInfo info(script);
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
- if (strict_mode == kStrictMode) info.MarkAsStrict();
+ if (strict_mode == kStrictMode) info.MarkAsStrictMode();
info.SetCallingContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
+ CompilationCache* compilation_cache = isolate->compilation_cache();
// If caller is strict mode, the result must be strict as well,
// but not the other way around. Consider:
// eval("'use strict'; ...");
ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
- CompilationCache::PutEval(source, context, is_global, result);
+ compilation_cache->PutEval(source, context, is_global, result);
}
}
@@ -588,32 +575,45 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
bool Compiler::CompileLazy(CompilationInfo* info) {
- CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+ Isolate* isolate = info->isolate();
+
+ ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
// The VM is in the COMPILER state until exiting this function.
- VMState state(COMPILER);
+ VMState state(isolate, COMPILER);
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(isolate);
Handle<SharedFunctionInfo> shared = info->shared_info();
int compiled_size = shared->end_position() - shared->start_position();
- Counters::total_compile_size.Increment(compiled_size);
+ isolate->counters()->total_compile_size()->Increment(compiled_size);
// Generate the AST for the lazily compiled function.
if (ParserApi::Parse(info)) {
// Measure how long it takes to do the lazy compilation; only take the
// rest of the function into account to avoid overlap with the lazy
// parsing statistics.
- HistogramTimerScope timer(&Counters::compile_lazy);
+ HistogramTimerScope timer(isolate->counters()->compile_lazy());
+
+ // After parsing we know function's strict mode. Remember it.
+ if (info->function()->strict_mode()) {
+ shared->set_strict_mode(true);
+ info->MarkAsStrictMode();
+ }
// Compile the code.
if (!MakeCode(info)) {
- if (!Top::has_pending_exception()) {
- Top::StackOverflow();
+ if (!isolate->has_pending_exception()) {
+ isolate->StackOverflow();
}
} else {
ASSERT(!info->code().is_null());
Handle<Code> code = info->code();
+ // Set optimizable to false if this is disallowed by the shared
+ // function info, e.g., we might have flushed the code and must
+ // reset this bit when lazy compiling the code again.
+ if (shared->optimization_disabled()) code->set_optimizable(false);
+
Handle<JSFunction> function = info->closure();
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
@@ -650,16 +650,18 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
ASSERT(shared->is_compiled());
shared->set_code_age(0);
- if (V8::UseCrankshaft() && info->AllowOptimize()) {
+ if (info->AllowOptimize() && !shared->optimization_disabled()) {
// If we're asked to always optimize, we compile the optimized
// version of the function right away - unless the debugger is
// active as it makes no sense to compile optimized code then.
- if (FLAG_always_opt && !Debug::has_break_points()) {
+ if (FLAG_always_opt &&
+ !Isolate::Current()->DebuggerHasBreakPoints()) {
CompilationInfo optimized(function);
optimized.SetOptimizing(AstNode::kNoNumber);
return CompileLazy(&optimized);
- } else if (CompilationCache::ShouldOptimizeEagerly(function)) {
- RuntimeProfiler::OptimizeSoon(*function);
+ } else if (isolate->compilation_cache()->ShouldOptimizeEagerly(
+ function)) {
+ isolate->runtime_profiler()->OptimizeSoon(*function);
}
}
}
@@ -679,56 +681,35 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
CompilationInfo info(script);
info.SetFunction(literal);
info.SetScope(literal->scope());
+ if (literal->scope()->is_strict_mode()) info.MarkAsStrictMode();
+ if (script->type()->value() == Script::TYPE_NATIVE) info.MarkAsNative();
- LiveEditFunctionTracker live_edit_tracker(literal);
+ LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
// Determine if the function can be lazily compiled. This is necessary to
// allow some of our builtin JS files to be lazily compiled. These
// builtins cannot be handled lazily by the parser, since we have to know
// if a function uses the special natives syntax, which is something the
// parser records.
bool allow_lazy = literal->AllowsLazyCompilation() &&
- !LiveEditFunctionTracker::IsActive();
+ !LiveEditFunctionTracker::IsActive(info.isolate());
Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
// Generate code
if (FLAG_lazy && allow_lazy) {
- Handle<Code> code(Builtins::builtin(Builtins::LazyCompile));
+ Handle<Code> code = info.isolate()->builtins()->LazyCompile();
info.SetCode(code);
- } else {
- if (V8::UseCrankshaft()) {
- if (!MakeCrankshaftCode(&info)) {
- return Handle<SharedFunctionInfo>::null();
- }
- } else {
- // The bodies of function literals have not yet been visited by the
- // AST optimizer/analyzer.
- if (!Rewriter::Analyze(&info)) return Handle<SharedFunctionInfo>::null();
-
- bool is_run_once = literal->try_full_codegen();
- bool can_use_full = FLAG_full_compiler && !literal->contains_loops();
-
- if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
- if (!FullCodeGenerator::MakeCode(&info)) {
- return Handle<SharedFunctionInfo>::null();
- }
- } else {
- // We fall back to the classic V8 code generator.
- if (!AssignedVariablesAnalyzer::Analyze(&info) ||
- !CodeGenerator::MakeCode(&info)) {
- return Handle<SharedFunctionInfo>::null();
- }
- }
- }
+ } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
+ (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
ASSERT(!info.code().is_null());
-
- // Function compilation complete.
scope_info = SerializedScopeInfo::Create(info.scope());
+ } else {
+ return Handle<SharedFunctionInfo>::null();
}
// Create a shared function info object.
Handle<SharedFunctionInfo> result =
- Factory::NewSharedFunctionInfo(literal->name(),
+ FACTORY->NewSharedFunctionInfo(literal->name(),
literal->materialized_literal_count(),
info.code(),
scope_info);
@@ -765,9 +746,10 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->SetThisPropertyAssignmentsInfo(
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
- function_info->set_try_full_codegen(lit->try_full_codegen());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_strict_mode(lit->strict_mode());
+ function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
+ function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
}
@@ -780,27 +762,31 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
- if (Logger::is_logging() || CpuProfiler::is_profiling()) {
+ if (info->isolate()->logger()->is_logging() ||
+ CpuProfiler::is_profiling(info->isolate())) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
- if (*code == Builtins::builtin(Builtins::LazyCompile)) return;
+ if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
+ return;
if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
USE(line_num);
- PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
+ PROFILE(info->isolate(),
+ CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*shared,
String::cast(script->name()),
line_num));
} else {
- PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
+ PROFILE(info->isolate(),
+ CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*shared,
shared->DebugName()));
}
}
- GDBJIT(AddCode(name,
+ GDBJIT(AddCode(Handle<String>(shared->DebugName()),
Handle<Script>(info->script()),
Handle<Code>(info->code())));
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index e0a437ac6..a77fc8ea4 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,9 +28,8 @@
#ifndef V8_COMPILER_H_
#define V8_COMPILER_H_
+#include "allocation.h"
#include "ast.h"
-#include "frame-element.h"
-#include "register-allocator.h"
#include "zone.h"
namespace v8 {
@@ -46,10 +45,14 @@ class CompilationInfo BASE_EMBEDDED {
explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info);
explicit CompilationInfo(Handle<JSFunction> closure);
+ Isolate* isolate() {
+ ASSERT(Isolate::Current() == isolate_);
+ return isolate_;
+ }
bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; }
bool is_eval() const { return (flags_ & IsEval::mask()) != 0; }
bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; }
- bool is_strict() const { return (flags_ & IsStrict::mask()) != 0; }
+ bool is_strict_mode() const { return (flags_ & IsStrictMode::mask()) != 0; }
bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; }
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
@@ -70,16 +73,28 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(!is_lazy());
flags_ |= IsGlobal::encode(true);
}
- void MarkAsStrict() {
- flags_ |= IsStrict::encode(true);
+ void MarkAsStrictMode() {
+ flags_ |= IsStrictMode::encode(true);
}
StrictModeFlag StrictMode() {
- return is_strict() ? kStrictMode : kNonStrictMode;
+ return is_strict_mode() ? kStrictMode : kNonStrictMode;
}
void MarkAsInLoop() {
ASSERT(is_lazy());
flags_ |= IsInLoop::encode(true);
}
+ void MarkAsAllowingNativesSyntax() {
+ flags_ |= IsNativesSyntaxAllowed::encode(true);
+ }
+ bool allows_natives_syntax() const {
+ return IsNativesSyntaxAllowed::decode(flags_);
+ }
+ void MarkAsNative() {
+ flags_ |= IsNative::encode(true);
+ }
+ bool is_native() const {
+ return IsNative::decode(flags_);
+ }
void SetFunction(FunctionLiteral* literal) {
ASSERT(function_ == NULL);
function_ = literal;
@@ -135,7 +150,13 @@ class CompilationInfo BASE_EMBEDDED {
return V8::UseCrankshaft() && !closure_.is_null();
}
+ // Disable all optimization attempts of this info for the rest of the
+ // current compilation pipeline.
+ void AbortOptimization();
+
private:
+ Isolate* isolate_;
+
// Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailouts.
// OPTIMIZE is optimized code generated by the Hydrogen-based backend.
@@ -152,8 +173,9 @@ class CompilationInfo BASE_EMBEDDED {
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
- if (!shared_info_.is_null() && shared_info_->strict_mode()) {
- MarkAsStrict();
+ if (!shared_info_.is_null()) {
+ if (shared_info_->strict_mode()) MarkAsStrictMode();
+ if (shared_info_->native()) MarkAsNative();
}
}
@@ -173,7 +195,12 @@ class CompilationInfo BASE_EMBEDDED {
// Flags that can be set for lazy compilation.
class IsInLoop: public BitField<bool, 3, 1> {};
// Strict mode - used in eager compilation.
- class IsStrict: public BitField<bool, 4, 1> {};
+ class IsStrictMode: public BitField<bool, 4, 1> {};
+ // Native syntax (%-stuff) allowed?
+ class IsNativesSyntaxAllowed: public BitField<bool, 5, 1> {};
+ // Is this a function from our natives.
+ class IsNative: public BitField<bool, 6, 1> {};
+
unsigned flags_;
@@ -225,6 +252,8 @@ class Compiler : public AllStatic {
// give up.
static const int kDefaultMaxOptCount = 10;
+ static const int kMaxInliningLevels = 3;
+
// All routines return a SharedFunctionInfo.
// If an error occurs an exception is raised and the return handle
// contains NULL.
@@ -270,21 +299,6 @@ class Compiler : public AllStatic {
};
-// During compilation we need a global list of handles to constants
-// for frame elements. When the zone gets deleted, we make sure to
-// clear this list of handles as well.
-class CompilationZoneScope : public ZoneScope {
- public:
- explicit CompilationZoneScope(ZoneScopeMode mode) : ZoneScope(mode) { }
- virtual ~CompilationZoneScope() {
- if (ShouldDeleteOnExit()) {
- FrameElement::ClearConstantList();
- Result::ClearConstantList();
- }
- }
-};
-
-
} } // namespace v8::internal
#endif // V8_COMPILER_H_
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 3ad72a16b..d066d3476 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -34,6 +34,16 @@
namespace v8 {
namespace internal {
+Context* Context::declaration_context() {
+ Context* current = this;
+ while (!current->IsFunctionContext() && !current->IsGlobalContext()) {
+ current = current->previous();
+ ASSERT(current->closure() == closure());
+ }
+ return current;
+}
+
+
JSBuiltinsObject* Context::builtins() {
GlobalObject* object = global();
if (object->IsJSGlobalObject()) {
@@ -55,7 +65,7 @@ Context* Context::global_context() {
// During bootstrapping, the global object might not be set and we
// have to search the context chain to find the global context.
- ASSERT(Bootstrapper::IsActive());
+ ASSERT(Isolate::Current()->bootstrapper()->IsActive());
Context* current = this;
while (!current->IsGlobalContext()) {
JSFunction* closure = JSFunction::cast(current->closure());
@@ -74,9 +84,12 @@ void Context::set_global_proxy(JSObject* object) {
}
-Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
- int* index_, PropertyAttributes* attributes) {
- Handle<Context> context(this);
+Handle<Object> Context::Lookup(Handle<String> name,
+ ContextLookupFlags flags,
+ int* index_,
+ PropertyAttributes* attributes) {
+ Isolate* isolate = GetIsolate();
+ Handle<Context> context(this, isolate);
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
*index_ = -1;
@@ -95,39 +108,52 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
PrintF("\n");
}
- // check extension/with object
+ // Check extension/with/global object.
if (context->has_extension()) {
- Handle<JSObject> extension = Handle<JSObject>(context->extension());
- // Context extension objects needs to behave as if they have no
- // prototype. So even if we want to follow prototype chains, we
- // need to only do a local lookup for context extension objects.
- if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
- extension->IsJSContextExtensionObject()) {
- *attributes = extension->GetLocalPropertyAttribute(*name);
+ if (context->IsCatchContext()) {
+ // Catch contexts have the variable name in the extension slot.
+ if (name->Equals(String::cast(context->extension()))) {
+ if (FLAG_trace_contexts) {
+ PrintF("=> found in catch context\n");
+ }
+ *index_ = Context::THROWN_OBJECT_INDEX;
+ *attributes = NONE;
+ return context;
+ }
} else {
- *attributes = extension->GetPropertyAttribute(*name);
- }
- if (*attributes != ABSENT) {
- // property found
- if (FLAG_trace_contexts) {
- PrintF("=> found property in context object %p\n",
- reinterpret_cast<void*>(*extension));
+ // Global, function, and with contexts may have an object in the
+ // extension slot.
+ Handle<JSObject> extension(JSObject::cast(context->extension()),
+ isolate);
+ // Context extension objects needs to behave as if they have no
+ // prototype. So even if we want to follow prototype chains, we
+ // need to only do a local lookup for context extension objects.
+ if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
+ extension->IsJSContextExtensionObject()) {
+ *attributes = extension->GetLocalPropertyAttribute(*name);
+ } else {
+ *attributes = extension->GetPropertyAttribute(*name);
+ }
+ if (*attributes != ABSENT) {
+ // property found
+ if (FLAG_trace_contexts) {
+ PrintF("=> found property in context object %p\n",
+ reinterpret_cast<void*>(*extension));
+ }
+ return extension;
}
- return extension;
}
}
- if (context->is_function_context()) {
- // we have context-local slots
-
- // check non-parameter locals in context
+ // Only functions can have locals, parameters, and a function name.
+ if (context->IsFunctionContext()) {
+ // We may have context-local slots. Check locals in the context.
Handle<SerializedScopeInfo> scope_info(
- context->closure()->shared()->scope_info());
+ context->closure()->shared()->scope_info(), isolate);
Variable::Mode mode;
int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (index >= 0) {
- // slot found
if (FLAG_trace_contexts) {
PrintF("=> found local in context slot %d (mode = %d)\n",
index, mode);
@@ -140,39 +166,28 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// declared variables that were introduced through declaration nodes)
// must not appear here.
switch (mode) {
- case Variable::INTERNAL: // fall through
- case Variable::VAR: *attributes = NONE; break;
- case Variable::CONST: *attributes = READ_ONLY; break;
- case Variable::DYNAMIC: UNREACHABLE(); break;
- case Variable::DYNAMIC_GLOBAL: UNREACHABLE(); break;
- case Variable::DYNAMIC_LOCAL: UNREACHABLE(); break;
- case Variable::TEMPORARY: UNREACHABLE(); break;
+ case Variable::INTERNAL: // Fall through.
+ case Variable::VAR:
+ *attributes = NONE;
+ break;
+ case Variable::CONST:
+ *attributes = READ_ONLY;
+ break;
+ case Variable::DYNAMIC:
+ case Variable::DYNAMIC_GLOBAL:
+ case Variable::DYNAMIC_LOCAL:
+ case Variable::TEMPORARY:
+ UNREACHABLE();
+ break;
}
return context;
}
- // check parameter locals in context
- int param_index = scope_info->ParameterIndex(*name);
- if (param_index >= 0) {
- // slot found.
- int index =
- scope_info->ContextSlotIndex(Heap::arguments_shadow_symbol(), NULL);
- ASSERT(index >= 0); // arguments must exist and be in the heap context
- Handle<JSObject> arguments(JSObject::cast(context->get(index)));
- ASSERT(arguments->HasLocalProperty(Heap::length_symbol()));
- if (FLAG_trace_contexts) {
- PrintF("=> found parameter %d in arguments object\n", param_index);
- }
- *index_ = param_index;
- *attributes = NONE;
- return arguments;
- }
-
- // check intermediate context (holding only the function name variable)
+ // Check the slot corresponding to the intermediate context holding
+ // only the function name variable.
if (follow_context_chain) {
int index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) {
- // slot found
if (FLAG_trace_contexts) {
PrintF("=> found intermediate function in context slot %d\n",
index);
@@ -184,17 +199,14 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
}
- // proceed with enclosing context
+ // Proceed with the previous context.
if (context->IsGlobalContext()) {
follow_context_chain = false;
- } else if (context->is_function_context()) {
- context = Handle<Context>(Context::cast(context->closure()->context()));
} else {
- context = Handle<Context>(context->previous());
+ context = Handle<Context>(context->previous(), isolate);
}
} while (follow_context_chain);
- // slot not found
if (FLAG_trace_contexts) {
PrintF("=> no property/slot found\n");
}
@@ -209,11 +221,12 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
// before the global context and check that there are no context
// extension objects (conservative check for with statements).
while (!context->IsGlobalContext()) {
- // Check if the context is a potentially a with context.
+ // Check if the context is a catch or with context, or has introduced
+ // bindings by calling non-strict eval.
if (context->has_extension()) return false;
// Not a with context so it must be a function context.
- ASSERT(context->is_function_context());
+ ASSERT(context->IsFunctionContext());
// Check non-parameter locals.
Handle<SerializedScopeInfo> scope_info(
@@ -230,7 +243,7 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
// Check context only holding the function name variable.
index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) return false;
- context = Context::cast(context->closure()->context());
+ context = context->previous();
}
// No local or potential with statement found so the variable is
@@ -239,6 +252,30 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
}
+void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
+ bool* outer_scope_calls_non_strict_eval) {
+ // Skip up the context chain checking all the function contexts to see
+ // whether they call eval.
+ Context* context = this;
+ while (!context->IsGlobalContext()) {
+ if (context->IsFunctionContext()) {
+ Handle<SerializedScopeInfo> scope_info(
+ context->closure()->shared()->scope_info());
+ if (scope_info->CallsEval()) {
+ *outer_scope_calls_eval = true;
+ if (!scope_info->IsStrictMode()) {
+ // No need to go further since the answers will not change from
+ // here.
+ *outer_scope_calls_non_strict_eval = true;
+ return;
+ }
+ }
+ }
+ context = context->previous();
+ }
+}
+
+
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsGlobalContext());
#ifdef DEBUG
@@ -252,7 +289,7 @@ void Context::AddOptimizedFunction(JSFunction* function) {
// Check that the context belongs to the weak global contexts list.
bool found = false;
- Object* context = Heap::global_contexts_list();
+ Object* context = GetHeap()->global_contexts_list();
while (!context->IsUndefined()) {
if (context == this) {
found = true;
@@ -281,7 +318,7 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
} else {
prev->set_next_function_link(element_function->next_function_link());
}
- element_function->set_next_function_link(Heap::undefined_value());
+ element_function->set_next_function_link(GetHeap()->undefined_value());
return;
}
prev = element_function;
@@ -298,7 +335,7 @@ Object* Context::OptimizedFunctionsListHead() {
void Context::ClearOptimizedFunctions() {
- set(OPTIMIZED_FUNCTIONS_LIST, Heap::undefined_value());
+ set(OPTIMIZED_FUNCTIONS_LIST, GetHeap()->undefined_value());
}
@@ -306,14 +343,17 @@ void Context::ClearOptimizedFunctions() {
bool Context::IsBootstrappingOrContext(Object* object) {
// During bootstrapping we allow all objects to pass as
// contexts. This is necessary to fix circular dependencies.
- return Bootstrapper::IsActive() || object->IsContext();
+ return Isolate::Current()->bootstrapper()->IsActive() || object->IsContext();
}
bool Context::IsBootstrappingOrGlobalObject(Object* object) {
// During bootstrapping we allow all objects to pass as global
// objects. This is necessary to fix circular dependencies.
- return Bootstrapper::IsActive() || object->IsGlobalObject();
+ Isolate* isolate = Isolate::Current();
+ return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
+ isolate->bootstrapper()->IsActive() ||
+ object->IsGlobalObject();
}
#endif
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index d0d54d1bd..da6e08875 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -78,11 +78,20 @@ enum ContextLookupFlags {
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
+ V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
+ V(STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
+ strict_mode_function_without_prototype_map) \
V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
+ V(STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, Map, \
+ strict_mode_function_instance_map) \
V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
+ V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
+ aliased_arguments_boilerplate) \
+ V(STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
+ strict_mode_arguments_boilerplate) \
V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
@@ -99,7 +108,10 @@ enum ContextLookupFlags {
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \
- V(CONTEXT_DATA_INDEX, Object, data)
+ V(CONTEXT_DATA_INDEX, Object, data) \
+ V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
+ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
+ V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
// JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and
@@ -118,13 +130,6 @@ enum ContextLookupFlags {
// statically allocated context slots. The names are needed
// for dynamic lookups in the presence of 'with' or 'eval'.
//
-// [ fcontext ] A pointer to the innermost enclosing function context.
-// It is the same for all contexts *allocated* inside a
-// function, and the function context's fcontext points
-// to itself. It is only needed for fast access of the
-// function context (used for declarations, and static
-// context slot access).
-//
// [ previous ] A pointer to the previous context. It is NULL for
// function contexts, and non-NULL for 'with' contexts.
// Used to implement the 'with' statement.
@@ -146,19 +151,6 @@ enum ContextLookupFlags {
// (via static context addresses) or through 'eval' (dynamic context lookups).
// Finally, the global context contains additional slots for fast access to
// global properties.
-//
-// We may be able to simplify the implementation:
-//
-// - We may be able to get rid of 'fcontext': We can always use the fact that
-// previous == NULL for function contexts and so we can search for them. They
-// are only needed when doing dynamic declarations, and the context chains
-// tend to be very very short (depth of nesting of 'with' statements). At
-// the moment we also use it in generated code for context slot accesses -
-// and there we don't want a loop because of code bloat - but we may not
-// need it there after all (see comment in codegen_*.cc).
-//
-// - If we cannot get rid of fcontext, consider making 'previous' never NULL
-// except for the global context. This could simplify Context::Lookup.
class Context: public FixedArray {
public:
@@ -172,21 +164,31 @@ class Context: public FixedArray {
enum {
// These slots are in all contexts.
CLOSURE_INDEX,
- FCONTEXT_INDEX,
PREVIOUS_INDEX,
+ // The extension slot is used for either the global object (in global
+ // contexts), eval extension object (function contexts), subject of with
+ // (with contexts), or the variable name (catch contexts).
EXTENSION_INDEX,
GLOBAL_INDEX,
MIN_CONTEXT_SLOTS,
+ // This slot holds the thrown value in catch contexts.
+ THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
+
// These slots are only in global contexts.
GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
SECURITY_TOKEN_INDEX,
ARGUMENTS_BOILERPLATE_INDEX,
+ ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
+ STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
JS_ARRAY_MAP_INDEX,
REGEXP_RESULT_MAP_INDEX,
FUNCTION_MAP_INDEX,
+ STRICT_MODE_FUNCTION_MAP_INDEX,
FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+ STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
FUNCTION_INSTANCE_MAP_INDEX,
+ STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX,
INITIAL_OBJECT_PROTOTYPE_INDEX,
BOOLEAN_FUNCTION_INDEX,
NUMBER_FUNCTION_INDEX,
@@ -225,6 +227,9 @@ class Context: public FixedArray {
OUT_OF_MEMORY_INDEX,
MAP_CACHE_INDEX,
CONTEXT_DATA_INDEX,
+ ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
+ DERIVED_GET_TRAP_INDEX,
+ DERIVED_SET_TRAP_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
@@ -241,9 +246,6 @@ class Context: public FixedArray {
JSFunction* closure() { return JSFunction::cast(get(CLOSURE_INDEX)); }
void set_closure(JSFunction* closure) { set(CLOSURE_INDEX, closure); }
- Context* fcontext() { return Context::cast(get(FCONTEXT_INDEX)); }
- void set_fcontext(Context* context) { set(FCONTEXT_INDEX, context); }
-
Context* previous() {
Object* result = unchecked_previous();
ASSERT(IsBootstrappingOrContext(result));
@@ -251,14 +253,17 @@ class Context: public FixedArray {
}
void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
- bool has_extension() { return unchecked_extension() != NULL; }
- JSObject* extension() { return JSObject::cast(unchecked_extension()); }
- void set_extension(JSObject* object) { set(EXTENSION_INDEX, object); }
+ bool has_extension() { return extension() != NULL; }
+ Object* extension() { return get(EXTENSION_INDEX); }
+ void set_extension(Object* object) { set(EXTENSION_INDEX, object); }
+
+ // Get the context where var declarations will be hoisted to, which
+ // may be the context itself.
+ Context* declaration_context();
GlobalObject* global() {
Object* result = get(GLOBAL_INDEX);
- ASSERT(Heap::gc_state() != Heap::NOT_IN_GC ||
- IsBootstrappingOrGlobalObject(result));
+ ASSERT(IsBootstrappingOrGlobalObject(result));
return reinterpret_cast<GlobalObject*>(result);
}
void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
@@ -273,18 +278,27 @@ class Context: public FixedArray {
// Compute the global context by traversing the context chain.
Context* global_context();
- // Tells if this is a function context (as opposed to a 'with' context).
- bool is_function_context() { return unchecked_previous() == NULL; }
+ // Predicates for context types. IsGlobalContext is defined on Object
+ // because we frequently have to know if arbitrary objects are global
+ // contexts.
+ bool IsFunctionContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->function_context_map();
+ }
+ bool IsCatchContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->catch_context_map();
+ }
+ bool IsWithContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->with_context_map();
+ }
// Tells whether the global context is marked with out of memory.
- bool has_out_of_memory() {
- return global_context()->out_of_memory() == Heap::true_value();
- }
+ inline bool has_out_of_memory();
// Mark the global context with out of memory.
- void mark_out_of_memory() {
- global_context()->set_out_of_memory(Heap::true_value());
- }
+ inline void mark_out_of_memory();
// The exception holder is the object used as a with object in
// the implementation of a catch block.
@@ -343,6 +357,11 @@ class Context: public FixedArray {
// eval.
bool GlobalIfNotShadowedByEval(Handle<String> name);
+ // Determine if any function scope in the context call eval and if
+ // any of those calls are in non-strict mode.
+ void ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
+ bool* outer_scope_calls_non_strict_eval);
+
// Code generation support.
static int SlotOffset(int index) {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
@@ -362,7 +381,6 @@ class Context: public FixedArray {
private:
// Unchecked access to the slots.
Object* unchecked_previous() { return get(PREVIOUS_INDEX); }
- Object* unchecked_extension() { return get(EXTENSION_INDEX); }
#ifdef DEBUG
// Bootstrapping-aware type checks.
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index bf0294758..cb7dbf88d 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -60,11 +60,7 @@ static inline unsigned int FastD2UI(double x) {
if (x < k2Pow52) {
x += k2Pow52;
uint32_t result;
-#ifdef BIG_ENDIAN_FLOATING_POINT
- Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
-#else
Address mantissa_ptr = reinterpret_cast<Address>(&x);
-#endif
// Copy least significant 32 bits of mantissa.
memcpy(&result, mantissa_ptr, sizeof(result));
return negative ? ~result + 1 : result;
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index a348235d6..353b6810f 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -109,9 +109,11 @@ static const double JUNK_STRING_VALUE = OS::nan_value();
// Returns true if a nonspace found and false if the end has reached.
template <class Iterator, class EndMark>
-static inline bool AdvanceToNonspace(Iterator* current, EndMark end) {
+static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
+ Iterator* current,
+ EndMark end) {
while (*current != end) {
- if (!ScannerConstants::kIsWhiteSpace.get(**current)) return true;
+ if (!unicode_cache->IsWhiteSpace(**current)) return true;
++*current;
}
return false;
@@ -132,7 +134,8 @@ static double SignedZero(bool negative) {
// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
template <int radix_log_2, class Iterator, class EndMark>
-static double InternalStringToIntDouble(Iterator current,
+static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
+ Iterator current,
EndMark end,
bool negative,
bool allow_trailing_junk) {
@@ -157,7 +160,8 @@ static double InternalStringToIntDouble(Iterator current,
} else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
digit = static_cast<char>(*current) - 'A' + 10;
} else {
- if (allow_trailing_junk || !AdvanceToNonspace(&current, end)) {
+ if (allow_trailing_junk ||
+ !AdvanceToNonspace(unicode_cache, &current, end)) {
break;
} else {
return JUNK_STRING_VALUE;
@@ -188,7 +192,8 @@ static double InternalStringToIntDouble(Iterator current,
exponent += radix_log_2;
}
- if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
}
@@ -232,11 +237,16 @@ static double InternalStringToIntDouble(Iterator current,
template <class Iterator, class EndMark>
-static double InternalStringToInt(Iterator current, EndMark end, int radix) {
+static double InternalStringToInt(UnicodeCache* unicode_cache,
+ Iterator current,
+ EndMark end,
+ int radix) {
const bool allow_trailing_junk = true;
const double empty_string_val = JUNK_STRING_VALUE;
- if (!AdvanceToNonspace(&current, end)) return empty_string_val;
+ if (!AdvanceToNonspace(unicode_cache, &current, end)) {
+ return empty_string_val;
+ }
bool negative = false;
bool leading_zero = false;
@@ -244,10 +254,14 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
if (*current == '+') {
// Ignore leading sign; skip following spaces.
++current;
- if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
+ if (current == end) {
+ return JUNK_STRING_VALUE;
+ }
} else if (*current == '-') {
++current;
- if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
+ if (current == end) {
+ return JUNK_STRING_VALUE;
+ }
negative = true;
}
@@ -298,21 +312,21 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
switch (radix) {
case 2:
return InternalStringToIntDouble<1>(
- current, end, negative, allow_trailing_junk);
+ unicode_cache, current, end, negative, allow_trailing_junk);
case 4:
return InternalStringToIntDouble<2>(
- current, end, negative, allow_trailing_junk);
+ unicode_cache, current, end, negative, allow_trailing_junk);
case 8:
return InternalStringToIntDouble<3>(
- current, end, negative, allow_trailing_junk);
+ unicode_cache, current, end, negative, allow_trailing_junk);
case 16:
return InternalStringToIntDouble<4>(
- current, end, negative, allow_trailing_junk);
+ unicode_cache, current, end, negative, allow_trailing_junk);
case 32:
return InternalStringToIntDouble<5>(
- current, end, negative, allow_trailing_junk);
+ unicode_cache, current, end, negative, allow_trailing_junk);
default:
UNREACHABLE();
}
@@ -337,7 +351,8 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
if (current == end) break;
}
- if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
}
@@ -402,7 +417,8 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
v = v * multiplier + part;
} while (!done);
- if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
}
@@ -416,7 +432,8 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
// 2. *current - gets the current character in the sequence.
// 3. ++current (advances the position).
template <class Iterator, class EndMark>
-static double InternalStringToDouble(Iterator current,
+static double InternalStringToDouble(UnicodeCache* unicode_cache,
+ Iterator current,
EndMark end,
int flags,
double empty_string_val) {
@@ -428,7 +445,9 @@ static double InternalStringToDouble(Iterator current,
// 'parsing_done'.
// 4. 'current' is not dereferenced after the 'parsing_done' label.
// 5. Code before 'parsing_done' may rely on 'current != end'.
- if (!AdvanceToNonspace(&current, end)) return empty_string_val;
+ if (!AdvanceToNonspace(unicode_cache, &current, end)) {
+ return empty_string_val;
+ }
const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
@@ -463,7 +482,8 @@ static double InternalStringToDouble(Iterator current,
return JUNK_STRING_VALUE;
}
- if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
}
@@ -485,7 +505,8 @@ static double InternalStringToDouble(Iterator current,
return JUNK_STRING_VALUE; // "0x".
}
- return InternalStringToIntDouble<4>(current,
+ return InternalStringToIntDouble<4>(unicode_cache,
+ current,
end,
negative,
allow_trailing_junk);
@@ -621,7 +642,8 @@ static double InternalStringToDouble(Iterator current,
exponent += (sign == '-' ? -num : num);
}
- if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
}
@@ -629,7 +651,8 @@ static double InternalStringToDouble(Iterator current,
exponent += insignificant_digits;
if (octal) {
- return InternalStringToIntDouble<3>(buffer,
+ return InternalStringToIntDouble<3>(unicode_cache,
+ buffer,
buffer + buffer_pos,
negative,
allow_trailing_junk);
@@ -648,19 +671,23 @@ static double InternalStringToDouble(Iterator current,
}
-double StringToDouble(String* str, int flags, double empty_string_val) {
+double StringToDouble(UnicodeCache* unicode_cache,
+ String* str, int flags, double empty_string_val) {
StringShape shape(str);
if (shape.IsSequentialAscii()) {
const char* begin = SeqAsciiString::cast(str)->GetChars();
const char* end = begin + str->length();
- return InternalStringToDouble(begin, end, flags, empty_string_val);
+ return InternalStringToDouble(unicode_cache, begin, end, flags,
+ empty_string_val);
} else if (shape.IsSequentialTwoByte()) {
const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
const uc16* end = begin + str->length();
- return InternalStringToDouble(begin, end, flags, empty_string_val);
+ return InternalStringToDouble(unicode_cache, begin, end, flags,
+ empty_string_val);
} else {
StringInputBuffer buffer(str);
- return InternalStringToDouble(StringInputBufferIterator(&buffer),
+ return InternalStringToDouble(unicode_cache,
+ StringInputBufferIterator(&buffer),
StringInputBufferIterator::EndMarker(),
flags,
empty_string_val);
@@ -668,36 +695,52 @@ double StringToDouble(String* str, int flags, double empty_string_val) {
}
-double StringToInt(String* str, int radix) {
+double StringToInt(UnicodeCache* unicode_cache,
+ String* str,
+ int radix) {
StringShape shape(str);
if (shape.IsSequentialAscii()) {
const char* begin = SeqAsciiString::cast(str)->GetChars();
const char* end = begin + str->length();
- return InternalStringToInt(begin, end, radix);
+ return InternalStringToInt(unicode_cache, begin, end, radix);
} else if (shape.IsSequentialTwoByte()) {
const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
const uc16* end = begin + str->length();
- return InternalStringToInt(begin, end, radix);
+ return InternalStringToInt(unicode_cache, begin, end, radix);
} else {
StringInputBuffer buffer(str);
- return InternalStringToInt(StringInputBufferIterator(&buffer),
+ return InternalStringToInt(unicode_cache,
+ StringInputBufferIterator(&buffer),
StringInputBufferIterator::EndMarker(),
radix);
}
}
-double StringToDouble(const char* str, int flags, double empty_string_val) {
+double StringToDouble(UnicodeCache* unicode_cache,
+ const char* str, int flags, double empty_string_val) {
const char* end = str + StrLength(str);
- return InternalStringToDouble(str, end, flags, empty_string_val);
+ return InternalStringToDouble(unicode_cache, str, end, flags,
+ empty_string_val);
}
-double StringToDouble(Vector<const char> str,
+double StringToDouble(UnicodeCache* unicode_cache,
+ Vector<const char> str,
int flags,
double empty_string_val) {
const char* end = str.start() + str.length();
- return InternalStringToDouble(str.start(), end, flags, empty_string_val);
+ return InternalStringToDouble(unicode_cache, str.start(), end, flags,
+ empty_string_val);
+}
+
+double StringToDouble(UnicodeCache* unicode_cache,
+ Vector<const uc16> str,
+ int flags,
+ double empty_string_val) {
+ const uc16* end = str.start() + str.length();
+ return InternalStringToDouble(unicode_cache, str.start(), end, flags,
+ empty_string_val);
}
@@ -1066,4 +1109,23 @@ char* DoubleToRadixCString(double value, int radix) {
}
+static Mutex* dtoa_lock_one = OS::CreateMutex();
+static Mutex* dtoa_lock_zero = OS::CreateMutex();
+
+
} } // namespace v8::internal
+
+
+extern "C" {
+void ACQUIRE_DTOA_LOCK(int n) {
+ ASSERT(n == 0 || n == 1);
+ (n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->Lock();
+}
+
+
+void FREE_DTOA_LOCK(int n) {
+ ASSERT(n == 0 || n == 1);
+ (n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->
+ Unlock();
+}
+}
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 312e6aee5..4cbeeca82 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,8 @@
#ifndef V8_CONVERSIONS_H_
#define V8_CONVERSIONS_H_
+#include "scanner-base.h"
+
namespace v8 {
namespace internal {
@@ -91,15 +93,26 @@ static inline uint32_t NumberToUint32(Object* number);
// Converts a string into a double value according to ECMA-262 9.3.1
-double StringToDouble(String* str, int flags, double empty_string_val = 0);
-double StringToDouble(Vector<const char> str,
+double StringToDouble(UnicodeCache* unicode_cache,
+ String* str,
+ int flags,
+ double empty_string_val = 0);
+double StringToDouble(UnicodeCache* unicode_cache,
+ Vector<const char> str,
+ int flags,
+ double empty_string_val = 0);
+double StringToDouble(UnicodeCache* unicode_cache,
+ Vector<const uc16> str,
int flags,
double empty_string_val = 0);
// This version expects a zero-terminated character array.
-double StringToDouble(const char* str, int flags, double empty_string_val = 0);
+double StringToDouble(UnicodeCache* unicode_cache,
+ const char* str,
+ int flags,
+ double empty_string_val = 0);
// Converts a string into an integer.
-double StringToInt(String* str, int radix);
+double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
// Converts a double to a string value according to ECMA-262 9.8.1.
// The buffer should be large enough for any floating point number.
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index 239a5f7a0..faad6d409 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -28,14 +28,22 @@
#include "v8.h"
#include "counters.h"
+#include "isolate.h"
#include "platform.h"
namespace v8 {
namespace internal {
-CounterLookupCallback StatsTable::lookup_function_ = NULL;
-CreateHistogramCallback StatsTable::create_histogram_function_ = NULL;
-AddHistogramSampleCallback StatsTable::add_histogram_sample_function_ = NULL;
+StatsTable::StatsTable()
+ : lookup_function_(NULL),
+ create_histogram_function_(NULL),
+ add_histogram_sample_function_(NULL) {}
+
+
+int* StatsCounter::FindLocationInStatsTable() const {
+ return Isolate::Current()->stats_table()->FindLocation(name_);
+}
+
// Start the timer.
void StatsCounterTimer::Start() {
@@ -71,8 +79,15 @@ void HistogramTimer::Stop() {
// Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- StatsTable::AddHistogramSample(histogram_, milliseconds);
+ Isolate::Current()->stats_table()->
+ AddHistogramSample(histogram_, milliseconds);
}
}
+
+void* HistogramTimer::CreateHistogram() const {
+ return Isolate::Current()->stats_table()->
+ CreateHistogram(name_, 0, 10000, 50);
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 048fdaabf..6498a0242 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -38,27 +38,27 @@ namespace internal {
// counters for monitoring. Counters can be looked up and
// manipulated by name.
-class StatsTable : public AllStatic {
+class StatsTable {
public:
// Register an application-defined function where
// counters can be looked up.
- static void SetCounterFunction(CounterLookupCallback f) {
+ void SetCounterFunction(CounterLookupCallback f) {
lookup_function_ = f;
}
// Register an application-defined function to create
// a histogram for passing to the AddHistogramSample function
- static void SetCreateHistogramFunction(CreateHistogramCallback f) {
+ void SetCreateHistogramFunction(CreateHistogramCallback f) {
create_histogram_function_ = f;
}
// Register an application-defined function to add a sample
// to a histogram created with CreateHistogram function
- static void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
+ void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
add_histogram_sample_function_ = f;
}
- static bool HasCounterFunction() {
+ bool HasCounterFunction() const {
return lookup_function_ != NULL;
}
@@ -68,7 +68,7 @@ class StatsTable : public AllStatic {
// may receive a different location to store it's counter.
// The return value must not be cached and re-used across
// threads, although a single thread is free to cache it.
- static int* FindLocation(const char* name) {
+ int* FindLocation(const char* name) {
if (!lookup_function_) return NULL;
return lookup_function_(name);
}
@@ -78,25 +78,31 @@ class StatsTable : public AllStatic {
// function. min and max define the expected minimum and maximum
// sample values. buckets is the maximum number of buckets
// that the samples will be grouped into.
- static void* CreateHistogram(const char* name,
- int min,
- int max,
- size_t buckets) {
+ void* CreateHistogram(const char* name,
+ int min,
+ int max,
+ size_t buckets) {
if (!create_histogram_function_) return NULL;
return create_histogram_function_(name, min, max, buckets);
}
// Add a sample to a histogram created with the CreateHistogram
// function.
- static void AddHistogramSample(void* histogram, int sample) {
+ void AddHistogramSample(void* histogram, int sample) {
if (!add_histogram_sample_function_) return;
return add_histogram_sample_function_(histogram, sample);
}
private:
- static CounterLookupCallback lookup_function_;
- static CreateHistogramCallback create_histogram_function_;
- static AddHistogramSampleCallback add_histogram_sample_function_;
+ StatsTable();
+
+ CounterLookupCallback lookup_function_;
+ CreateHistogramCallback create_histogram_function_;
+ AddHistogramSampleCallback add_histogram_sample_function_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(StatsTable);
};
// StatsCounters are dynamically created values which can be tracked in
@@ -166,9 +172,12 @@ struct StatsCounter {
if (lookup_done_)
return ptr_;
lookup_done_ = true;
- ptr_ = StatsTable::FindLocation(name_);
+ ptr_ = FindLocationInStatsTable();
return ptr_;
}
+
+ private:
+ int* FindLocationInStatsTable() const;
};
// StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 };
@@ -216,10 +225,13 @@ struct HistogramTimer {
void* GetHistogram() {
if (!lookup_done_) {
lookup_done_ = true;
- histogram_ = StatsTable::CreateHistogram(name_, 0, 10000, 50);
+ histogram_ = CreateHistogram();
}
return histogram_;
}
+
+ private:
+ void* CreateHistogram() const;
};
// Helper class for scoping a HistogramTimer.
diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h
index 440dedca6..d7a23a518 100644
--- a/deps/v8/src/cpu-profiler-inl.h
+++ b/deps/v8/src/cpu-profiler-inl.h
@@ -32,6 +32,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
+#include <new>
#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
#include "unbound-queue-inl.h"
@@ -41,8 +42,8 @@ namespace internal {
void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->AddCode(start, entry, size);
- if (sfi_address != NULL) {
- entry->set_shared_id(code_map->GetSFITag(sfi_address));
+ if (shared != NULL) {
+ entry->set_shared_id(code_map->GetSharedId(shared));
}
}
@@ -57,28 +58,15 @@ void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
-void SFIMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
+void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->MoveCode(from, to);
}
-TickSampleEventRecord* TickSampleEventRecord::init(void* value) {
- TickSampleEventRecord* result =
- reinterpret_cast<TickSampleEventRecord*>(value);
- result->filler = 1;
- ASSERT(result->filler != SamplingCircularQueue::kClear);
- // Init the required fields only.
- result->sample.pc = NULL;
- result->sample.frames_count = 0;
- return result;
-}
-
-
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
generator_->Tick();
TickSampleEventRecord* evt =
- TickSampleEventRecord::init(ticks_buffer_.Enqueue());
- evt->order = enqueue_order_; // No increment!
+ new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
return &evt->sample;
}
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index ad04a003b..8b10e8188 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -69,7 +69,7 @@ void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, prefix, name);
rec->size = 1;
- rec->sfi_address = NULL;
+ rec->shared = NULL;
events_buffer_.Enqueue(evt_rec);
}
@@ -80,7 +80,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
int line_number,
Address start,
unsigned size,
- Address sfi_address) {
+ Address shared) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -89,7 +89,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
rec->size = size;
- rec->sfi_address = sfi_address;
+ rec->shared = shared;
events_buffer_.Enqueue(evt_rec);
}
@@ -106,7 +106,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name);
rec->size = size;
- rec->sfi_address = NULL;
+ rec->shared = NULL;
events_buffer_.Enqueue(evt_rec);
}
@@ -123,7 +123,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, args_count);
rec->size = size;
- rec->sfi_address = NULL;
+ rec->shared = NULL;
events_buffer_.Enqueue(evt_rec);
}
@@ -149,10 +149,12 @@ void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
}
-void ProfilerEventsProcessor::SFIMoveEvent(Address from, Address to) {
+void ProfilerEventsProcessor::SharedFunctionInfoMoveEvent(Address from,
+ Address to) {
CodeEventsContainer evt_rec;
- SFIMoveEventRecord* rec = &evt_rec.SFIMoveEventRecord_;
- rec->type = CodeEventRecord::SFI_MOVE;
+ SharedFunctionInfoMoveEventRecord* rec =
+ &evt_rec.SharedFunctionInfoMoveEventRecord_;
+ rec->type = CodeEventRecord::SHARED_FUNC_MOVE;
rec->order = ++enqueue_order_;
rec->from = from;
rec->to = to;
@@ -179,18 +181,16 @@ void ProfilerEventsProcessor::RegExpCodeCreateEvent(
void ProfilerEventsProcessor::AddCurrentStack() {
- TickSampleEventRecord record;
+ TickSampleEventRecord record(enqueue_order_);
TickSample* sample = &record.sample;
- sample->state = Top::current_vm_state();
+ Isolate* isolate = Isolate::Current();
+ sample->state = isolate->current_vm_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
- sample->tos = NULL;
- sample->frames_count = 0;
- for (StackTraceFrameIterator it;
+ for (StackTraceFrameIterator it(isolate);
!it.done() && sample->frames_count < TickSample::kMaxFramesCount;
it.Advance()) {
sample->stack[sample->frames_count++] = it.frame()->pc();
}
- record.order = enqueue_order_;
ticks_from_vm_buffer_.Enqueue(record);
}
@@ -239,7 +239,7 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
// A paranoid check to make sure that we don't get a memory overrun
// in case of frames_count having a wild value.
if (record.sample.frames_count < 0
- || record.sample.frames_count >= TickSample::kMaxFramesCount)
+ || record.sample.frames_count > TickSample::kMaxFramesCount)
record.sample.frames_count = 0;
generator_->RecordTickSample(record.sample);
ticks_buffer_.FinishDequeue();
@@ -270,82 +270,109 @@ void ProfilerEventsProcessor::Run() {
}
-CpuProfiler* CpuProfiler::singleton_ = NULL;
-Atomic32 CpuProfiler::is_profiling_ = false;
-
void CpuProfiler::StartProfiling(const char* title) {
- ASSERT(singleton_ != NULL);
- singleton_->StartCollectingProfile(title);
+ ASSERT(Isolate::Current()->cpu_profiler() != NULL);
+ Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
}
void CpuProfiler::StartProfiling(String* title) {
- ASSERT(singleton_ != NULL);
- singleton_->StartCollectingProfile(title);
+ ASSERT(Isolate::Current()->cpu_profiler() != NULL);
+ Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
}
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
- return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
+ Isolate* isolate = Isolate::Current();
+ return is_profiling(isolate) ?
+ isolate->cpu_profiler()->StopCollectingProfile(title) : NULL;
}
CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
- return is_profiling() ?
- singleton_->StopCollectingProfile(security_token, title) : NULL;
+ Isolate* isolate = Isolate::Current();
+ return is_profiling(isolate) ?
+ isolate->cpu_profiler()->StopCollectingProfile(
+ security_token, title) : NULL;
}
int CpuProfiler::GetProfilesCount() {
- ASSERT(singleton_ != NULL);
+ ASSERT(Isolate::Current()->cpu_profiler() != NULL);
// The count of profiles doesn't depend on a security token.
- return singleton_->profiles_->Profiles(
+ return Isolate::Current()->cpu_profiler()->profiles_->Profiles(
TokenEnumerator::kNoSecurityToken)->length();
}
CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
- ASSERT(singleton_ != NULL);
- const int token = singleton_->token_enumerator_->GetTokenId(security_token);
- return singleton_->profiles_->Profiles(token)->at(index);
+ ASSERT(Isolate::Current()->cpu_profiler() != NULL);
+ CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
+ const int token = profiler->token_enumerator_->GetTokenId(security_token);
+ return profiler->profiles_->Profiles(token)->at(index);
}
CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
- ASSERT(singleton_ != NULL);
- const int token = singleton_->token_enumerator_->GetTokenId(security_token);
- return singleton_->profiles_->GetProfile(token, uid);
+ ASSERT(Isolate::Current()->cpu_profiler() != NULL);
+ CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
+ const int token = profiler->token_enumerator_->GetTokenId(security_token);
+ return profiler->profiles_->GetProfile(token, uid);
}
-TickSample* CpuProfiler::TickSampleEvent() {
- if (CpuProfiler::is_profiling()) {
- return singleton_->processor_->TickSampleEvent();
+TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
+ if (CpuProfiler::is_profiling(isolate)) {
+ return isolate->cpu_profiler()->processor_->TickSampleEvent();
} else {
return NULL;
}
}
+void CpuProfiler::DeleteAllProfiles() {
+ Isolate* isolate = Isolate::Current();
+ ASSERT(isolate->cpu_profiler() != NULL);
+ if (is_profiling(isolate)) {
+ isolate->cpu_profiler()->StopProcessor();
+ }
+ isolate->cpu_profiler()->ResetProfiles();
+}
+
+
+void CpuProfiler::DeleteProfile(CpuProfile* profile) {
+ ASSERT(Isolate::Current()->cpu_profiler() != NULL);
+ Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile);
+ delete profile;
+}
+
+
+bool CpuProfiler::HasDetachedProfiles() {
+ ASSERT(Isolate::Current()->cpu_profiler() != NULL);
+ return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles();
+}
+
+
void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
- singleton_->processor_->CallbackCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, const char* comment) {
- singleton_->processor_->CodeCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
tag, comment, code->address(), code->ExecutableSize());
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name) {
- singleton_->processor_->CodeCreateEvent(
+ Isolate* isolate = Isolate::Current();
+ isolate->cpu_profiler()->processor_->CodeCreateEvent(
tag,
name,
- Heap::empty_string(),
+ isolate->heap()->empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
@@ -357,10 +384,11 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* name) {
- singleton_->processor_->CodeCreateEvent(
+ Isolate* isolate = Isolate::Current();
+ isolate->cpu_profiler()->processor_->CodeCreateEvent(
tag,
name,
- Heap::empty_string(),
+ isolate->heap()->empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
@@ -372,7 +400,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* source, int line) {
- singleton_->processor_->CodeCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
tag,
shared->DebugName(),
source,
@@ -385,7 +413,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count) {
- singleton_->processor_->CodeCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
tag,
args_count,
code->address(),
@@ -394,28 +422,29 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CpuProfiler::CodeMoveEvent(Address from, Address to) {
- singleton_->processor_->CodeMoveEvent(from, to);
+ Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to);
}
void CpuProfiler::CodeDeleteEvent(Address from) {
- singleton_->processor_->CodeDeleteEvent(from);
+ Isolate::Current()->cpu_profiler()->processor_->CodeDeleteEvent(from);
}
-void CpuProfiler::SFIMoveEvent(Address from, Address to) {
- singleton_->processor_->SFIMoveEvent(from, to);
+void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) {
+ CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
+ profiler->processor_->SharedFunctionInfoMoveEvent(from, to);
}
void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
- singleton_->processor_->CallbackCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, "get ", name, entry_point);
}
void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
- singleton_->processor_->RegExpCodeCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent(
Logger::REG_EXP_TAG,
"RegExp: ",
source,
@@ -425,7 +454,7 @@ void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
- singleton_->processor_->CallbackCreateEvent(
+ Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, "set ", name, entry_point);
}
@@ -435,7 +464,9 @@ CpuProfiler::CpuProfiler()
next_profile_uid_(1),
token_enumerator_(new TokenEnumerator()),
generator_(NULL),
- processor_(NULL) {
+ processor_(NULL),
+ need_to_stop_sampler_(false),
+ is_profiling_(false) {
}
@@ -445,6 +476,11 @@ CpuProfiler::~CpuProfiler() {
}
+void CpuProfiler::ResetProfiles() {
+ delete profiles_;
+ profiles_ = new CpuProfilesCollection();
+}
+
void CpuProfiler::StartCollectingProfile(const char* title) {
if (profiles_->StartProfiling(title, next_profile_uid_++)) {
StartProcessorIfNotStarted();
@@ -460,27 +496,32 @@ void CpuProfiler::StartCollectingProfile(String* title) {
void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
+ Isolate* isolate = Isolate::Current();
+
// Disable logging when using the new implementation.
- saved_logging_nesting_ = Logger::logging_nesting_;
- Logger::logging_nesting_ = 0;
+ saved_logging_nesting_ = isolate->logger()->logging_nesting_;
+ isolate->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
NoBarrier_Store(&is_profiling_, true);
processor_->Start();
// Enumerate stuff we already have in the heap.
- if (Heap::HasBeenSetup()) {
+ if (isolate->heap()->HasBeenSetup()) {
if (!FLAG_prof_browser_mode) {
bool saved_log_code_flag = FLAG_log_code;
FLAG_log_code = true;
- Logger::LogCodeObjects();
+ isolate->logger()->LogCodeObjects();
FLAG_log_code = saved_log_code_flag;
}
- Logger::LogCompiledFunctions();
- Logger::LogAccessorCallbacks();
+ isolate->logger()->LogCompiledFunctions();
+ isolate->logger()->LogAccessorCallbacks();
}
// Enable stack sampling.
- Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
- if (!sampler->IsActive()) sampler->Start();
+ Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
+ if (!sampler->IsActive()) {
+ sampler->Start();
+ need_to_stop_sampler_ = true;
+ }
sampler->IncreaseProfilingDepth();
}
}
@@ -511,19 +552,26 @@ CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
- if (profiles_->IsLastProfile(title)) {
- Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
- sampler->DecreaseProfilingDepth();
+ if (profiles_->IsLastProfile(title)) StopProcessor();
+}
+
+
+void CpuProfiler::StopProcessor() {
+ Logger* logger = Isolate::Current()->logger();
+ Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
+ sampler->DecreaseProfilingDepth();
+ if (need_to_stop_sampler_) {
sampler->Stop();
- processor_->Stop();
- processor_->Join();
- delete processor_;
- delete generator_;
- processor_ = NULL;
- NoBarrier_Store(&is_profiling_, false);
- generator_ = NULL;
- Logger::logging_nesting_ = saved_logging_nesting_;
+ need_to_stop_sampler_ = false;
}
+ processor_->Stop();
+ processor_->Join();
+ delete processor_;
+ delete generator_;
+ processor_ = NULL;
+ NoBarrier_Store(&is_profiling_, false);
+ generator_ = NULL;
+ logger->logging_nesting_ = saved_logging_nesting_;
}
} } // namespace v8::internal
@@ -535,8 +583,9 @@ namespace internal {
void CpuProfiler::Setup() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (singleton_ == NULL) {
- singleton_ = new CpuProfiler();
+ Isolate* isolate = Isolate::Current();
+ if (isolate->cpu_profiler() == NULL) {
+ isolate->set_cpu_profiler(new CpuProfiler());
}
#endif
}
@@ -544,10 +593,11 @@ void CpuProfiler::Setup() {
void CpuProfiler::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (singleton_ != NULL) {
- delete singleton_;
+ Isolate* isolate = Isolate::Current();
+ if (isolate->cpu_profiler() != NULL) {
+ delete isolate->cpu_profiler();
}
- singleton_ = NULL;
+ isolate->set_cpu_profiler(NULL);
#endif
}
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 1ebbfebf7..42d79a578 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -30,6 +30,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
+#include "allocation.h"
#include "atomicops.h"
#include "circular-queue.h"
#include "unbound-queue.h"
@@ -46,11 +47,11 @@ class HashMap;
class ProfileGenerator;
class TokenEnumerator;
-#define CODE_EVENTS_TYPE_LIST(V) \
- V(CODE_CREATION, CodeCreateEventRecord) \
- V(CODE_MOVE, CodeMoveEventRecord) \
- V(CODE_DELETE, CodeDeleteEventRecord) \
- V(SFI_MOVE, SFIMoveEventRecord)
+#define CODE_EVENTS_TYPE_LIST(V) \
+ V(CODE_CREATION, CodeCreateEventRecord) \
+ V(CODE_MOVE, CodeMoveEventRecord) \
+ V(CODE_DELETE, CodeDeleteEventRecord) \
+ V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
class CodeEventRecord {
@@ -73,7 +74,7 @@ class CodeCreateEventRecord : public CodeEventRecord {
Address start;
CodeEntry* entry;
unsigned size;
- Address sfi_address;
+ Address shared;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@@ -96,7 +97,7 @@ class CodeDeleteEventRecord : public CodeEventRecord {
};
-class SFIMoveEventRecord : public CodeEventRecord {
+class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
public:
Address from;
Address to;
@@ -105,10 +106,14 @@ class SFIMoveEventRecord : public CodeEventRecord {
};
-class TickSampleEventRecord BASE_EMBEDDED {
+class TickSampleEventRecord {
public:
- TickSampleEventRecord()
- : filler(1) {
+ // The parameterless constructor is used when we dequeue data from
+ // the ticks buffer.
+ TickSampleEventRecord() { }
+ explicit TickSampleEventRecord(unsigned order)
+ : filler(1),
+ order(order) {
ASSERT(filler != SamplingCircularQueue::kClear);
}
@@ -124,8 +129,6 @@ class TickSampleEventRecord BASE_EMBEDDED {
static TickSampleEventRecord* cast(void* value) {
return reinterpret_cast<TickSampleEventRecord*>(value);
}
-
- INLINE(static TickSampleEventRecord* init(void* value));
};
@@ -149,7 +152,7 @@ class ProfilerEventsProcessor : public Thread {
String* name,
String* resource_name, int line_number,
Address start, unsigned size,
- Address sfi_address);
+ Address shared);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
const char* name,
Address start, unsigned size);
@@ -158,7 +161,7 @@ class ProfilerEventsProcessor : public Thread {
Address start, unsigned size);
void CodeMoveEvent(Address from, Address to);
void CodeDeleteEvent(Address from);
- void SFIMoveEvent(Address from, Address to);
+ void SharedFunctionInfoMoveEvent(Address from, Address to);
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix, String* name,
Address start, unsigned size);
@@ -196,21 +199,23 @@ class ProfilerEventsProcessor : public Thread {
} } // namespace v8::internal
-#define PROFILE(Call) \
- LOG(Call); \
- do { \
- if (v8::internal::CpuProfiler::is_profiling()) { \
- v8::internal::CpuProfiler::Call; \
- } \
+#define PROFILE(isolate, Call) \
+ LOG(isolate, Call); \
+ do { \
+ if (v8::internal::CpuProfiler::is_profiling(isolate)) { \
+ v8::internal::CpuProfiler::Call; \
+ } \
} while (false)
#else
-#define PROFILE(Call) LOG(Call)
+#define PROFILE(isolate, Call) LOG(isolate, Call)
#endif // ENABLE_LOGGING_AND_PROFILING
namespace v8 {
namespace internal {
+
+// TODO(isolates): isolatify this class.
class CpuProfiler {
public:
static void Setup();
@@ -224,9 +229,12 @@ class CpuProfiler {
static int GetProfilesCount();
static CpuProfile* GetProfile(Object* security_token, int index);
static CpuProfile* FindProfile(Object* security_token, unsigned uid);
+ static void DeleteAllProfiles();
+ static void DeleteProfile(CpuProfile* profile);
+ static bool HasDetachedProfiles();
// Invoked from stack sampler (thread or signal handler.)
- static TickSample* TickSampleEvent();
+ static TickSample* TickSampleEvent(Isolate* isolate);
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
@@ -251,10 +259,13 @@ class CpuProfiler {
static void GetterCallbackEvent(String* name, Address entry_point);
static void RegExpCodeCreateEvent(Code* code, String* source);
static void SetterCallbackEvent(String* name, Address entry_point);
- static void SFIMoveEvent(Address from, Address to);
+ static void SharedFunctionInfoMoveEvent(Address from, Address to);
- static INLINE(bool is_profiling()) {
- return NoBarrier_Load(&is_profiling_);
+ // TODO(isolates): this doesn't have to use atomics anymore.
+
+ static INLINE(bool is_profiling(Isolate* isolate)) {
+ CpuProfiler* profiler = isolate->cpu_profiler();
+ return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
}
private:
@@ -266,6 +277,8 @@ class CpuProfiler {
CpuProfile* StopCollectingProfile(const char* title);
CpuProfile* StopCollectingProfile(Object* security_token, String* title);
void StopProcessorIfLastProfile(const char* title);
+ void StopProcessor();
+ void ResetProfiles();
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
@@ -273,12 +286,11 @@ class CpuProfiler {
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
int saved_logging_nesting_;
-
- static CpuProfiler* singleton_;
- static Atomic32 is_profiling_;
+ bool need_to_stop_sampler_;
+ Atomic32 is_profiling_;
#else
- static INLINE(bool is_profiling()) { return false; }
+ static INLINE(bool is_profiling(Isolate* isolate)) { return false; }
#endif // ENABLE_LOGGING_AND_PROFILING
private:
diff --git a/deps/v8/src/cpu.h b/deps/v8/src/cpu.h
index ddc402f7d..2525484a0 100644
--- a/deps/v8/src/cpu.h
+++ b/deps/v8/src/cpu.h
@@ -36,6 +36,8 @@
#ifndef V8_CPU_H_
#define V8_CPU_H_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
@@ -53,6 +55,8 @@ class CPU : public AllStatic {
// Initializes the cpu architecture support. Called once at VM startup.
static void Setup();
+ static bool SupportsCrankshaft();
+
// Flush instruction cache.
static void FlushICache(void* start, size_t size);
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index 8a3886c67..06622057b 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -272,6 +272,7 @@ RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
void RemoteDebugger::HandleMessageReceived(char* message) {
+ Locker lock;
HandleScope scope;
// Print the event details.
@@ -300,6 +301,7 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
void RemoteDebugger::HandleKeyboardCommand(char* command) {
+ Locker lock;
HandleScope scope;
// Convert the debugger command to a JSON debugger request.
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 335bd2b4b..658fd4ff0 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -311,10 +311,6 @@ static Handle<Value> GetStdout(int child_fd,
int read_timeout,
int total_timeout) {
Handle<String> accumulator = String::Empty();
- const char* source = "(function(a, b) { return a + b; })";
- Handle<Value> cons_as_obj(Script::Compile(String::New(source))->Run());
- Handle<Function> cons_function(Function::Cast(*cons_as_obj));
- Handle<Value> cons_args[2];
int fullness = 0;
static const int kStdoutReadBufferSize = 4096;
@@ -350,12 +346,7 @@ static Handle<Value> GetStdout(int child_fd,
bytes_read + fullness :
LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness);
Handle<String> addition = String::New(buffer, length);
- cons_args[0] = accumulator;
- cons_args[1] = addition;
- accumulator = Handle<String>::Cast(cons_function->Call(
- Shell::utility_context()->Global(),
- 2,
- cons_args));
+ accumulator = String::Concat(accumulator, addition);
fullness = bytes_read + fullness - length;
memcpy(buffer, buffer + length, fullness);
}
@@ -375,8 +366,10 @@ static Handle<Value> GetStdout(int child_fd,
// a parent process hangs on waiting while a child process is already a zombie.
// See http://code.google.com/p/v8/issues/detail?id=401.
#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__)
+#if !defined(__FreeBSD__)
#define HAS_WAITID 1
#endif
+#endif
// Get exit status of child.
diff --git a/deps/v8/src/d8-readline.cc b/deps/v8/src/d8-readline.cc
index 67fc9eff7..08395e53d 100644
--- a/deps/v8/src/d8-readline.cc
+++ b/deps/v8/src/d8-readline.cc
@@ -30,6 +30,8 @@
#include <readline/readline.h> // NOLINT
#include <readline/history.h> // NOLINT
+// The readline includes leaves RETURN defined which breaks V8 compilation.
+#undef RETURN
#include "d8.h"
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 349ec9041..6f948c6e5 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -26,8 +26,13 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdlib.h>
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+#include <bzlib.h>
+#endif
#include <errno.h>
+#include <stdlib.h>
+
+#include "v8.h"
#include "d8.h"
#include "d8-debug.h"
@@ -216,6 +221,103 @@ Handle<Value> Shell::Load(const Arguments& args) {
}
+Handle<Value> Shell::CreateExternalArray(const Arguments& args,
+ ExternalArrayType type,
+ size_t element_size) {
+ ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
+ element_size == 8);
+ if (args.Length() != 1) {
+ return ThrowException(
+ String::New("Array constructor needs one parameter."));
+ }
+ size_t length = 0;
+ if (args[0]->IsUint32()) {
+ length = args[0]->Uint32Value();
+ } else if (args[0]->IsNumber()) {
+ double raw_length = args[0]->NumberValue();
+ if (raw_length < 0) {
+ return ThrowException(String::New("Array length must not be negative."));
+ }
+ if (raw_length > v8::internal::ExternalArray::kMaxLength) {
+ return ThrowException(
+ String::New("Array length exceeds maximum length."));
+ }
+ length = static_cast<size_t>(raw_length);
+ } else {
+ return ThrowException(String::New("Array length must be a number."));
+ }
+ if (length > static_cast<size_t>(internal::ExternalArray::kMaxLength)) {
+ return ThrowException(String::New("Array length exceeds maximum length."));
+ }
+ void* data = calloc(length, element_size);
+ if (data == NULL) {
+ return ThrowException(String::New("Memory allocation failed."));
+ }
+ Handle<Object> array = Object::New();
+ Persistent<Object> persistent_array = Persistent<Object>::New(array);
+ persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
+ persistent_array.MarkIndependent();
+ array->SetIndexedPropertiesToExternalArrayData(data, type, length);
+ array->Set(String::New("length"), Int32::New(length), ReadOnly);
+ array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
+ return array;
+}
+
+
+void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
+ free(data);
+ object.Dispose();
+}
+
+
+Handle<Value> Shell::Int8Array(const Arguments& args) {
+ return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
+}
+
+
+Handle<Value> Shell::Uint8Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalUnsignedByteArray, sizeof(uint8_t));
+}
+
+
+Handle<Value> Shell::Int16Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalShortArray, sizeof(int16_t));
+}
+
+
+Handle<Value> Shell::Uint16Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalUnsignedShortArray,
+ sizeof(uint16_t));
+}
+
+
+Handle<Value> Shell::Int32Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalIntArray, sizeof(int32_t));
+}
+
+
+Handle<Value> Shell::Uint32Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalUnsignedIntArray, sizeof(uint32_t));
+}
+
+
+Handle<Value> Shell::Float32Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalFloatArray,
+ sizeof(float)); // NOLINT
+}
+
+
+Handle<Value> Shell::Float64Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalDoubleArray,
+ sizeof(double)); // NOLINT
+}
+
+
+Handle<Value> Shell::PixelArray(const Arguments& args) {
+ return CreateExternalArray(args, kExternalPixelArray, sizeof(uint8_t));
+}
+
+
Handle<Value> Shell::Yield(const Arguments& args) {
v8::Unlocker unlocker;
return Undefined();
@@ -264,6 +366,11 @@ void Shell::ReportException(v8::TryCatch* try_catch) {
printf("^");
}
printf("\n");
+ v8::String::Utf8Value stack_trace(try_catch->StackTrace());
+ if (stack_trace.length() > 0) {
+ const char* stack_trace_string = ToCString(stack_trace);
+ printf("%s\n", stack_trace_string);
+ }
}
}
@@ -401,20 +508,79 @@ void Shell::AddHistogramSample(void* histogram, int sample) {
counter->AddSample(sample);
}
+void Shell::InstallUtilityScript() {
+ Locker lock;
+ HandleScope scope;
+ // If we use the utility context, we have to set the security tokens so that
+ // utility, evaluation and debug context can all access each other.
+ utility_context_->SetSecurityToken(Undefined());
+ evaluation_context_->SetSecurityToken(Undefined());
+ Context::Scope utility_scope(utility_context_);
-void Shell::Initialize() {
- Shell::counter_map_ = new CounterMap();
- // Set up counters
- if (i::StrLength(i::FLAG_map_counters) != 0)
- MapCounters(i::FLAG_map_counters);
- if (i::FLAG_dump_counters) {
- V8::SetCounterFunction(LookupCounter);
- V8::SetCreateHistogramFunction(CreateHistogram);
- V8::SetAddHistogramSampleFunction(AddHistogramSample);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Install the debugger object in the utility scope
+ i::Debug* debug = i::Isolate::Current()->debug();
+ debug->Load();
+ i::Handle<i::JSObject> js_debug
+ = i::Handle<i::JSObject>(debug->debug_context()->global());
+ utility_context_->Global()->Set(String::New("$debug"),
+ Utils::ToLocal(js_debug));
+ debug->debug_context()->set_security_token(HEAP->undefined_value());
+#endif
+
+ // Run the d8 shell utility script in the utility context
+ int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
+ i::Vector<const char> shell_source =
+ i::NativesCollection<i::D8>::GetRawScriptSource(source_index);
+ i::Vector<const char> shell_source_name =
+ i::NativesCollection<i::D8>::GetScriptName(source_index);
+ Handle<String> source = String::New(shell_source.start(),
+ shell_source.length());
+ Handle<String> name = String::New(shell_source_name.start(),
+ shell_source_name.length());
+ Handle<Script> script = Script::Compile(source, name);
+ script->Run();
+
+ // Mark the d8 shell script as native to avoid it showing up as normal source
+ // in the debugger.
+ i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
+ i::Handle<i::Script> script_object = compiled_script->IsJSFunction()
+ ? i::Handle<i::Script>(i::Script::cast(
+ i::JSFunction::cast(*compiled_script)->shared()->script()))
+ : i::Handle<i::Script>(i::Script::cast(
+ i::SharedFunctionInfo::cast(*compiled_script)->script()));
+ script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
+}
+
+
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+class BZip2Decompressor : public v8::StartupDataDecompressor {
+ public:
+ virtual ~BZip2Decompressor() { }
+
+ protected:
+ virtual int DecompressData(char* raw_data,
+ int* raw_data_size,
+ const char* compressed_data,
+ int compressed_data_size) {
+ ASSERT_EQ(v8::StartupData::kBZip2,
+ v8::V8::GetCompressedStartupDataAlgorithm());
+ unsigned int decompressed_size = *raw_data_size;
+ int result =
+ BZ2_bzBuffToBuffDecompress(raw_data,
+ &decompressed_size,
+ const_cast<char*>(compressed_data),
+ compressed_data_size,
+ 0, 1);
+ if (result == BZ_OK) {
+ *raw_data_size = decompressed_size;
+ }
+ return result;
}
+};
+#endif
- // Initialize the global objects
- HandleScope scope;
+Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
@@ -425,6 +591,26 @@ void Shell::Initialize() {
global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
global_template->Set(String::New("version"), FunctionTemplate::New(Version));
+ // Bind the handlers for external arrays.
+ global_template->Set(String::New("Int8Array"),
+ FunctionTemplate::New(Int8Array));
+ global_template->Set(String::New("Uint8Array"),
+ FunctionTemplate::New(Uint8Array));
+ global_template->Set(String::New("Int16Array"),
+ FunctionTemplate::New(Int16Array));
+ global_template->Set(String::New("Uint16Array"),
+ FunctionTemplate::New(Uint16Array));
+ global_template->Set(String::New("Int32Array"),
+ FunctionTemplate::New(Int32Array));
+ global_template->Set(String::New("Uint32Array"),
+ FunctionTemplate::New(Uint32Array));
+ global_template->Set(String::New("Float32Array"),
+ FunctionTemplate::New(Float32Array));
+ global_template->Set(String::New("Float64Array"),
+ FunctionTemplate::New(Float64Array));
+ global_template->Set(String::New("PixelArray"),
+ FunctionTemplate::New(PixelArray));
+
#ifdef LIVE_OBJECT_LIST
global_template->Set(String::New("lol_is_enabled"), Boolean::New(true));
#else
@@ -435,63 +621,38 @@ void Shell::Initialize() {
AddOSMethods(os_templ);
global_template->Set(String::New("os"), os_templ);
- utility_context_ = Context::New(NULL, global_template);
- utility_context_->SetSecurityToken(Undefined());
- Context::Scope utility_scope(utility_context_);
+ return global_template;
+}
- i::JSArguments js_args = i::FLAG_js_arguments;
- i::Handle<i::FixedArray> arguments_array =
- i::Factory::NewFixedArray(js_args.argc());
- for (int j = 0; j < js_args.argc(); j++) {
- i::Handle<i::String> arg =
- i::Factory::NewStringFromUtf8(i::CStrVector(js_args[j]));
- arguments_array->set(j, *arg);
- }
- i::Handle<i::JSArray> arguments_jsarray =
- i::Factory::NewJSArrayWithElements(arguments_array);
- global_template->Set(String::New("arguments"),
- Utils::ToLocal(arguments_jsarray));
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Install the debugger object in the utility scope
- i::Debug::Load();
- i::Handle<i::JSObject> debug
- = i::Handle<i::JSObject>(i::Debug::debug_context()->global());
- utility_context_->Global()->Set(String::New("$debug"),
- Utils::ToLocal(debug));
+void Shell::Initialize(bool test_shell) {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+ BZip2Decompressor startup_data_decompressor;
+ int bz2_result = startup_data_decompressor.Decompress();
+ if (bz2_result != BZ_OK) {
+ fprintf(stderr, "bzip error code: %d\n", bz2_result);
+ exit(1);
+ }
#endif
- // Run the d8 shell utility script in the utility context
- int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
- i::Vector<const char> shell_source
- = i::NativesCollection<i::D8>::GetScriptSource(source_index);
- i::Vector<const char> shell_source_name
- = i::NativesCollection<i::D8>::GetScriptName(source_index);
- Handle<String> source = String::New(shell_source.start(),
- shell_source.length());
- Handle<String> name = String::New(shell_source_name.start(),
- shell_source_name.length());
- Handle<Script> script = Script::Compile(source, name);
- script->Run();
+ Shell::counter_map_ = new CounterMap();
+ // Set up counters
+ if (i::StrLength(i::FLAG_map_counters) != 0)
+ MapCounters(i::FLAG_map_counters);
+ if (i::FLAG_dump_counters) {
+ V8::SetCounterFunction(LookupCounter);
+ V8::SetCreateHistogramFunction(CreateHistogram);
+ V8::SetAddHistogramSampleFunction(AddHistogramSample);
+ }
- // Mark the d8 shell script as native to avoid it showing up as normal source
- // in the debugger.
- i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
- i::Handle<i::Script> script_object = compiled_script->IsJSFunction()
- ? i::Handle<i::Script>(i::Script::cast(
- i::JSFunction::cast(*compiled_script)->shared()->script()))
- : i::Handle<i::Script>(i::Script::cast(
- i::SharedFunctionInfo::cast(*compiled_script)->script()));
- script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
+ if (test_shell) return;
- // Create the evaluation context
- evaluation_context_ = Context::New(NULL, global_template);
- evaluation_context_->SetSecurityToken(Undefined());
+ Locker lock;
+ HandleScope scope;
+ Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
+ utility_context_ = Context::New(NULL, global_template);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // Set the security token of the debug context to allow access.
- i::Debug::debug_context()->set_security_token(i::Heap::undefined_value());
-
// Start the debugger agent if requested.
if (i::FLAG_debugger_agent) {
v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
@@ -505,6 +666,33 @@ void Shell::Initialize() {
}
+void Shell::RenewEvaluationContext() {
+ // Initialize the global objects
+ HandleScope scope;
+ Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
+
+ // (Re-)create the evaluation context
+ if (!evaluation_context_.IsEmpty()) {
+ evaluation_context_.Dispose();
+ }
+ evaluation_context_ = Context::New(NULL, global_template);
+ Context::Scope utility_scope(evaluation_context_);
+
+ i::JSArguments js_args = i::FLAG_js_arguments;
+ i::Handle<i::FixedArray> arguments_array =
+ FACTORY->NewFixedArray(js_args.argc());
+ for (int j = 0; j < js_args.argc(); j++) {
+ i::Handle<i::String> arg =
+ FACTORY->NewStringFromUtf8(i::CStrVector(js_args[j]));
+ arguments_array->set(j, *arg);
+ }
+ i::Handle<i::JSArray> arguments_jsarray =
+ FACTORY->NewJSArrayWithElements(arguments_array);
+ evaluation_context_->Global()->Set(String::New("arguments"),
+ Utils::ToLocal(arguments_jsarray));
+}
+
+
void Shell::OnExit() {
if (i::FLAG_dump_counters) {
::printf("+----------------------------------------+-------------+\n");
@@ -587,6 +775,7 @@ void Shell::RunShell() {
if (i::FLAG_debugger) {
printf("JavaScript debugger enabled\n");
}
+
editor->Open();
while (true) {
Locker locker;
@@ -620,21 +809,7 @@ void ShellThread::Run() {
// Prepare the context for this thread.
Locker locker;
HandleScope scope;
- Handle<ObjectTemplate> global_template = ObjectTemplate::New();
- global_template->Set(String::New("print"),
- FunctionTemplate::New(Shell::Print));
- global_template->Set(String::New("write"),
- FunctionTemplate::New(Shell::Write));
- global_template->Set(String::New("read"),
- FunctionTemplate::New(Shell::Read));
- global_template->Set(String::New("readline"),
- FunctionTemplate::New(Shell::ReadLine));
- global_template->Set(String::New("load"),
- FunctionTemplate::New(Shell::Load));
- global_template->Set(String::New("yield"),
- FunctionTemplate::New(Shell::Yield));
- global_template->Set(String::New("version"),
- FunctionTemplate::New(Shell::Version));
+ Handle<ObjectTemplate> global_template = Shell::CreateGlobalTemplate();
char* ptr = const_cast<char*>(files_.start());
while ((ptr != NULL) && (*ptr != '\0')) {
@@ -648,7 +823,6 @@ void ShellThread::Run() {
}
Persistent<Context> thread_context = Context::New(NULL, global_template);
- thread_context->SetSecurityToken(Undefined());
Context::Scope context_scope(thread_context);
while ((ptr != NULL) && (*ptr != '\0')) {
@@ -674,15 +848,7 @@ void ShellThread::Run() {
}
}
-
-int Shell::Main(int argc, char* argv[]) {
- i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
- if (i::FLAG_help) {
- return 1;
- }
- Initialize();
- bool run_shell = (argc == 1);
-
+int Shell::RunMain(int argc, char* argv[], bool* executed) {
// Default use preemption if threads are created.
bool use_preemption = true;
@@ -693,16 +859,14 @@ int Shell::Main(int argc, char* argv[]) {
i::List<i::Thread*> threads(1);
{
- // Acquire the V8 lock once initialization has finished. Since the thread
- // below may spawn new threads accessing V8 holding the V8 lock here is
- // mandatory.
+ // Since the thread below may spawn new threads accessing V8 holding the
+ // V8 lock here is mandatory.
Locker locker;
+ RenewEvaluationContext();
Context::Scope context_scope(evaluation_context_);
for (int i = 1; i < argc; i++) {
char* str = argv[i];
- if (strcmp(str, "--shell") == 0) {
- run_shell = true;
- } else if (strcmp(str, "--preemption") == 0) {
+ if (strcmp(str, "--preemption") == 0) {
use_preemption = true;
} else if (strcmp(str, "--no-preemption") == 0) {
use_preemption = false;
@@ -717,7 +881,7 @@ int Shell::Main(int argc, char* argv[]) {
} else {
printf("Missing value for --preemption-interval\n");
return 1;
- }
+ }
} else if (strcmp(str, "-f") == 0) {
// Ignore any -f flags for compatibility with other stand-alone
// JavaScript engines.
@@ -728,12 +892,12 @@ int Shell::Main(int argc, char* argv[]) {
// Execute argument given to -e option directly.
v8::HandleScope handle_scope;
v8::Handle<v8::String> file_name = v8::String::New("unnamed");
- v8::Handle<v8::String> source = v8::String::New(argv[i + 1]);
+ v8::Handle<v8::String> source = v8::String::New(argv[++i]);
+ (*executed) = true;
if (!ExecuteString(source, file_name, false, true)) {
OnExit();
return 1;
}
- i++;
} else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
int size = 0;
const char* files = ReadChars(argv[++i], &size);
@@ -743,11 +907,13 @@ int Shell::Main(int argc, char* argv[]) {
i::Vector<const char>(files, size));
thread->Start();
threads.Add(thread);
+ (*executed) = true;
} else {
// Use all other arguments as names of files to load and run.
HandleScope handle_scope;
Handle<String> file_name = v8::String::New(str);
Handle<String> source = ReadFile(str);
+ (*executed) = true;
if (source.IsEmpty()) {
printf("Error reading '%s'\n", str);
return 1;
@@ -763,17 +929,8 @@ int Shell::Main(int argc, char* argv[]) {
if (threads.length() > 0 && use_preemption) {
Locker::StartPreemption(preemption_interval);
}
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Run the remote debugger if requested.
- if (i::FLAG_remote_debugger) {
- RunRemoteDebugger(i::FLAG_debugger_port);
- return 0;
- }
-#endif
}
- if (run_shell)
- RunShell();
+
for (int i = 0; i < threads.length(); i++) {
i::Thread* thread = threads[i];
thread->Join();
@@ -784,9 +941,83 @@ int Shell::Main(int argc, char* argv[]) {
}
+int Shell::Main(int argc, char* argv[]) {
+ // Figure out if we're requested to stress the optimization
+ // infrastructure by running tests multiple times and forcing
+ // optimization in the last run.
+ bool FLAG_stress_opt = false;
+ bool FLAG_stress_deopt = false;
+ bool FLAG_interactive_shell = false;
+ bool FLAG_test_shell = false;
+ bool script_executed = false;
+
+ for (int i = 0; i < argc; i++) {
+ if (strcmp(argv[i], "--stress-opt") == 0) {
+ FLAG_stress_opt = true;
+ argv[i] = NULL;
+ } else if (strcmp(argv[i], "--stress-deopt") == 0) {
+ FLAG_stress_deopt = true;
+ argv[i] = NULL;
+ } else if (strcmp(argv[i], "--noalways-opt") == 0) {
+ // No support for stressing if we can't use --always-opt.
+ FLAG_stress_opt = false;
+ FLAG_stress_deopt = false;
+ } else if (strcmp(argv[i], "--shell") == 0) {
+ FLAG_interactive_shell = true;
+ argv[i] = NULL;
+ } else if (strcmp(argv[i], "--test") == 0) {
+ FLAG_test_shell = true;
+ argv[i] = NULL;
+ }
+ }
+
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+
+ Initialize(FLAG_test_shell);
+
+ int result = 0;
+ if (FLAG_stress_opt || FLAG_stress_deopt) {
+ v8::Testing::SetStressRunType(
+ FLAG_stress_opt ? v8::Testing::kStressTypeOpt
+ : v8::Testing::kStressTypeDeopt);
+ int stress_runs = v8::Testing::GetStressRuns();
+ for (int i = 0; i < stress_runs && result == 0; i++) {
+ printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
+ v8::Testing::PrepareStressRun(i);
+ result = RunMain(argc, argv, &script_executed);
+ }
+ printf("======== Full Deoptimization =======\n");
+ v8::Testing::DeoptimizeAll();
+ } else {
+ result = RunMain(argc, argv, &script_executed);
+ }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Run remote debugger if requested, but never on --test
+ if (i::FLAG_remote_debugger && !FLAG_test_shell) {
+ InstallUtilityScript();
+ RunRemoteDebugger(i::FLAG_debugger_port);
+ return 0;
+ }
+#endif
+
+ // Run interactive shell if explicitly requested or if no script has been
+ // executed, but never on --test
+ if ((FLAG_interactive_shell || !script_executed) && !FLAG_test_shell) {
+ InstallUtilityScript();
+ RunShell();
+ }
+
+ v8::V8::Dispose();
+
+ return result;
+}
+
} // namespace v8
+#ifndef GOOGLE3
int main(int argc, char* argv[]) {
return v8::Shell::Main(argc, argv);
}
+#endif
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index 3283e38ae..8b52ed9a9 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -38,7 +38,10 @@
'../src',
],
'defines': [
+ 'ENABLE_LOGGING_AND_PROFILING',
'ENABLE_DEBUGGER_SUPPORT',
+ 'ENABLE_VMSTATE_TRACKING',
+ 'V8_FAST_TLS',
],
'sources': [
'd8.cc',
@@ -49,6 +52,9 @@
[ 'OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
'sources': [ 'd8-posix.cc', ]
}],
+ [ 'OS=="win"', {
+ 'sources': [ 'd8-windows.cc', ]
+ }],
],
},
{
@@ -58,6 +64,7 @@
'variables': {
'js_files': [
'd8.js',
+ 'macros.py',
],
},
'actions': [
@@ -69,7 +76,6 @@
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
- '<(SHARED_INTERMEDIATE_DIR)/d8-js-empty.cc',
],
'action': [
'python',
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index de1fe0de7..e22546999 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -28,10 +28,10 @@
#ifndef V8_D8_H_
#define V8_D8_H_
+#include "allocation.h"
#include "v8.h"
#include "hashmap.h"
-
namespace v8 {
@@ -104,6 +104,7 @@ class CounterMap {
i::HashMap* map_;
i::HashMap::Entry* entry_;
};
+
private:
static int Hash(const char* name);
static bool Match(void* key1, void* key2);
@@ -119,7 +120,6 @@ class Shell: public i::AllStatic {
bool report_exceptions);
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(TryCatch* try_catch);
- static void Initialize();
static void OnExit();
static int* LookupCounter(const char* name);
static void* CreateHistogram(const char* name,
@@ -129,8 +129,14 @@ class Shell: public i::AllStatic {
static void AddHistogramSample(void* histogram, int sample);
static void MapCounters(const char* name);
static Handle<String> ReadFile(const char* name);
+ static void Initialize(bool test_shell);
+ static void RenewEvaluationContext();
+ static void InstallUtilityScript();
static void RunShell();
+ static int RunScript(char* filename);
+ static int RunMain(int argc, char* argv[], bool* executed);
static int Main(int argc, char* argv[]);
+ static Handle<ObjectTemplate> CreateGlobalTemplate();
static Handle<Array> GetCompletions(Handle<String> text,
Handle<String> full);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -150,6 +156,15 @@ class Shell: public i::AllStatic {
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadLine(const Arguments& args);
static Handle<Value> Load(const Arguments& args);
+ static Handle<Value> Int8Array(const Arguments& args);
+ static Handle<Value> Uint8Array(const Arguments& args);
+ static Handle<Value> Int16Array(const Arguments& args);
+ static Handle<Value> Uint16Array(const Arguments& args);
+ static Handle<Value> Int32Array(const Arguments& args);
+ static Handle<Value> Uint32Array(const Arguments& args);
+ static Handle<Value> Float32Array(const Arguments& args);
+ static Handle<Value> Float64Array(const Arguments& args);
+ static Handle<Value> PixelArray(const Arguments& args);
// The OS object on the global object contains methods for performing
// operating system calls:
//
@@ -187,10 +202,9 @@ class Shell: public i::AllStatic {
static void AddOSMethods(Handle<ObjectTemplate> os_template);
- static Handle<Context> utility_context() { return utility_context_; }
-
static const char* kHistoryFileName;
static const char* kPrompt;
+
private:
static Persistent<Context> utility_context_;
static Persistent<Context> evaluation_context_;
@@ -201,6 +215,10 @@ class Shell: public i::AllStatic {
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
static Counter* GetCounter(const char* name, bool is_histogram);
+ static Handle<Value> CreateExternalArray(const Arguments& args,
+ ExternalArrayType type,
+ size_t element_size);
+ static void ExternalArrayWeakCallback(Persistent<Value> object, void* data);
};
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index 979807888..033455e9d 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -977,9 +977,14 @@ DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
// specification it is considered a function break point.
pos = target.indexOf(':');
if (pos > 0) {
- type = 'script';
var tmp = target.substring(pos + 1, target.length);
target = target.substring(0, pos);
+ if (target[0] == '/' && target[target.length - 1] == '/') {
+ type = 'scriptRegExp';
+ target = target.substring(1, target.length - 1);
+ } else {
+ type = 'script';
+ }
// Check for both line and column.
pos = tmp.indexOf(':');
@@ -1984,6 +1989,9 @@ function DebugResponseDetails(response) {
if (breakpoint.script_name) {
result += ' script_name=' + breakpoint.script_name;
}
+ if (breakpoint.script_regexp) {
+ result += ' script_regexp=' + breakpoint.script_regexp;
+ }
result += ' line=' + (breakpoint.line + 1);
if (breakpoint.column != null) {
result += ' column=' + (breakpoint.column + 1);
diff --git a/deps/v8/src/data-flow.cc b/deps/v8/src/data-flow.cc
index 9c02ff48e..6a3b05cc8 100644
--- a/deps/v8/src/data-flow.cc
+++ b/deps/v8/src/data-flow.cc
@@ -63,483 +63,4 @@ void BitVector::Iterator::Advance() {
current_value_ = val >> 1;
}
-
-bool AssignedVariablesAnalyzer::Analyze(CompilationInfo* info) {
- Scope* scope = info->scope();
- int size = scope->num_parameters() + scope->num_stack_slots();
- if (size == 0) return true;
- AssignedVariablesAnalyzer analyzer(info, size);
- return analyzer.Analyze();
-}
-
-
-AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(CompilationInfo* info,
- int size)
- : info_(info), av_(size) {
-}
-
-
-bool AssignedVariablesAnalyzer::Analyze() {
- ASSERT(av_.length() > 0);
- VisitStatements(info_->function()->body());
- return !HasStackOverflow();
-}
-
-
-Variable* AssignedVariablesAnalyzer::FindSmiLoopVariable(ForStatement* stmt) {
- // The loop must have all necessary parts.
- if (stmt->init() == NULL || stmt->cond() == NULL || stmt->next() == NULL) {
- return NULL;
- }
- // The initialization statement has to be a simple assignment.
- Assignment* init = stmt->init()->StatementAsSimpleAssignment();
- if (init == NULL) return NULL;
-
- // We only deal with local variables.
- Variable* loop_var = init->target()->AsVariableProxy()->AsVariable();
- if (loop_var == NULL || !loop_var->IsStackAllocated()) return NULL;
-
- // Don't try to get clever with const or dynamic variables.
- if (loop_var->mode() != Variable::VAR) return NULL;
-
- // The initial value has to be a smi.
- Literal* init_lit = init->value()->AsLiteral();
- if (init_lit == NULL || !init_lit->handle()->IsSmi()) return NULL;
- int init_value = Smi::cast(*init_lit->handle())->value();
-
- // The condition must be a compare of variable with <, <=, >, or >=.
- CompareOperation* cond = stmt->cond()->AsCompareOperation();
- if (cond == NULL) return NULL;
- if (cond->op() != Token::LT
- && cond->op() != Token::LTE
- && cond->op() != Token::GT
- && cond->op() != Token::GTE) return NULL;
-
- // The lhs must be the same variable as in the init expression.
- if (cond->left()->AsVariableProxy()->AsVariable() != loop_var) return NULL;
-
- // The rhs must be a smi.
- Literal* term_lit = cond->right()->AsLiteral();
- if (term_lit == NULL || !term_lit->handle()->IsSmi()) return NULL;
- int term_value = Smi::cast(*term_lit->handle())->value();
-
- // The count operation updates the same variable as in the init expression.
- CountOperation* update = stmt->next()->StatementAsCountOperation();
- if (update == NULL) return NULL;
- if (update->expression()->AsVariableProxy()->AsVariable() != loop_var) {
- return NULL;
- }
-
- // The direction of the count operation must agree with the start and the end
- // value. We currently do not allow the initial value to be the same as the
- // terminal value. This _would_ be ok as long as the loop body never executes
- // or executes exactly one time.
- if (init_value == term_value) return NULL;
- if (init_value < term_value && update->op() != Token::INC) return NULL;
- if (init_value > term_value && update->op() != Token::DEC) return NULL;
-
- // Check that the update operation cannot overflow the smi range. This can
- // occur in the two cases where the loop bound is equal to the largest or
- // smallest smi.
- if (update->op() == Token::INC && term_value == Smi::kMaxValue) return NULL;
- if (update->op() == Token::DEC && term_value == Smi::kMinValue) return NULL;
-
- // Found a smi loop variable.
- return loop_var;
-}
-
-int AssignedVariablesAnalyzer::BitIndex(Variable* var) {
- ASSERT(var != NULL);
- ASSERT(var->IsStackAllocated());
- Slot* slot = var->AsSlot();
- if (slot->type() == Slot::PARAMETER) {
- return slot->index();
- } else {
- return info_->scope()->num_parameters() + slot->index();
- }
-}
-
-
-void AssignedVariablesAnalyzer::RecordAssignedVar(Variable* var) {
- ASSERT(var != NULL);
- if (var->IsStackAllocated()) {
- av_.Add(BitIndex(var));
- }
-}
-
-
-void AssignedVariablesAnalyzer::MarkIfTrivial(Expression* expr) {
- Variable* var = expr->AsVariableProxy()->AsVariable();
- if (var != NULL &&
- var->IsStackAllocated() &&
- !var->is_arguments() &&
- var->mode() != Variable::CONST &&
- (var->is_this() || !av_.Contains(BitIndex(var)))) {
- expr->AsVariableProxy()->MarkAsTrivial();
- }
-}
-
-
-void AssignedVariablesAnalyzer::ProcessExpression(Expression* expr) {
- BitVector saved_av(av_);
- av_.Clear();
- Visit(expr);
- av_.Union(saved_av);
-}
-
-void AssignedVariablesAnalyzer::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void AssignedVariablesAnalyzer::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitEmptyStatement(EmptyStatement* stmt) {
- // Do nothing.
-}
-
-
-void AssignedVariablesAnalyzer::VisitIfStatement(IfStatement* stmt) {
- ProcessExpression(stmt->condition());
- Visit(stmt->then_statement());
- Visit(stmt->else_statement());
-}
-
-
-void AssignedVariablesAnalyzer::VisitContinueStatement(
- ContinueStatement* stmt) {
- // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitBreakStatement(BreakStatement* stmt) {
- // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitReturnStatement(ReturnStatement* stmt) {
- ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
- ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWithExitStatement(
- WithExitStatement* stmt) {
- // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitSwitchStatement(SwitchStatement* stmt) {
- BitVector result(av_);
- av_.Clear();
- Visit(stmt->tag());
- result.Union(av_);
- for (int i = 0; i < stmt->cases()->length(); i++) {
- CaseClause* clause = stmt->cases()->at(i);
- if (!clause->is_default()) {
- av_.Clear();
- Visit(clause->label());
- result.Union(av_);
- }
- VisitStatements(clause->statements());
- }
- av_.Union(result);
-}
-
-
-void AssignedVariablesAnalyzer::VisitDoWhileStatement(DoWhileStatement* stmt) {
- ProcessExpression(stmt->cond());
- Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
- ProcessExpression(stmt->cond());
- Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
- if (stmt->init() != NULL) Visit(stmt->init());
- if (stmt->cond() != NULL) ProcessExpression(stmt->cond());
- if (stmt->next() != NULL) Visit(stmt->next());
-
- // Process loop body. After visiting the loop body av_ contains
- // the assigned variables of the loop body.
- BitVector saved_av(av_);
- av_.Clear();
- Visit(stmt->body());
-
- Variable* var = FindSmiLoopVariable(stmt);
- if (var != NULL && !av_.Contains(BitIndex(var))) {
- stmt->set_loop_variable(var);
- }
- av_.Union(saved_av);
-}
-
-
-void AssignedVariablesAnalyzer::VisitForInStatement(ForInStatement* stmt) {
- ProcessExpression(stmt->each());
- ProcessExpression(stmt->enumerable());
- Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitTryCatchStatement(
- TryCatchStatement* stmt) {
- Visit(stmt->try_block());
- Visit(stmt->catch_block());
-}
-
-
-void AssignedVariablesAnalyzer::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- Visit(stmt->try_block());
- Visit(stmt->finally_block());
-}
-
-
-void AssignedVariablesAnalyzer::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
- // Nothing to do.
- ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- // Nothing to do.
- ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitConditional(Conditional* expr) {
- ASSERT(av_.IsEmpty());
-
- Visit(expr->condition());
-
- BitVector result(av_);
- av_.Clear();
- Visit(expr->then_expression());
- result.Union(av_);
-
- av_.Clear();
- Visit(expr->else_expression());
- av_.Union(result);
-}
-
-
-void AssignedVariablesAnalyzer::VisitVariableProxy(VariableProxy* expr) {
- // Nothing to do.
- ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitLiteral(Literal* expr) {
- // Nothing to do.
- ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitRegExpLiteral(RegExpLiteral* expr) {
- // Nothing to do.
- ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
- ASSERT(av_.IsEmpty());
- BitVector result(av_.length());
- for (int i = 0; i < expr->properties()->length(); i++) {
- Visit(expr->properties()->at(i)->value());
- result.Union(av_);
- av_.Clear();
- }
- av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
- ASSERT(av_.IsEmpty());
- BitVector result(av_.length());
- for (int i = 0; i < expr->values()->length(); i++) {
- Visit(expr->values()->at(i));
- result.Union(av_);
- av_.Clear();
- }
- av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- ASSERT(av_.IsEmpty());
- Visit(expr->key());
- ProcessExpression(expr->value());
-}
-
-
-void AssignedVariablesAnalyzer::VisitAssignment(Assignment* expr) {
- ASSERT(av_.IsEmpty());
-
- // There are three kinds of assignments: variable assignments, property
- // assignments, and reference errors (invalid left-hand sides).
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
-
- if (var != NULL) {
- MarkIfTrivial(expr->value());
- Visit(expr->value());
- if (expr->is_compound()) {
- // Left-hand side occurs also as an rvalue.
- MarkIfTrivial(expr->target());
- ProcessExpression(expr->target());
- }
- RecordAssignedVar(var);
-
- } else if (prop != NULL) {
- MarkIfTrivial(expr->value());
- Visit(expr->value());
- if (!prop->key()->IsPropertyName()) {
- MarkIfTrivial(prop->key());
- ProcessExpression(prop->key());
- }
- MarkIfTrivial(prop->obj());
- ProcessExpression(prop->obj());
-
- } else {
- Visit(expr->target());
- }
-}
-
-
-void AssignedVariablesAnalyzer::VisitThrow(Throw* expr) {
- ASSERT(av_.IsEmpty());
- Visit(expr->exception());
-}
-
-
-void AssignedVariablesAnalyzer::VisitProperty(Property* expr) {
- ASSERT(av_.IsEmpty());
- if (!expr->key()->IsPropertyName()) {
- MarkIfTrivial(expr->key());
- Visit(expr->key());
- }
- MarkIfTrivial(expr->obj());
- ProcessExpression(expr->obj());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCall(Call* expr) {
- ASSERT(av_.IsEmpty());
- Visit(expr->expression());
- BitVector result(av_);
- for (int i = 0; i < expr->arguments()->length(); i++) {
- av_.Clear();
- Visit(expr->arguments()->at(i));
- result.Union(av_);
- }
- av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCallNew(CallNew* expr) {
- ASSERT(av_.IsEmpty());
- Visit(expr->expression());
- BitVector result(av_);
- for (int i = 0; i < expr->arguments()->length(); i++) {
- av_.Clear();
- Visit(expr->arguments()->at(i));
- result.Union(av_);
- }
- av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCallRuntime(CallRuntime* expr) {
- ASSERT(av_.IsEmpty());
- BitVector result(av_);
- for (int i = 0; i < expr->arguments()->length(); i++) {
- av_.Clear();
- Visit(expr->arguments()->at(i));
- result.Union(av_);
- }
- av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
- ASSERT(av_.IsEmpty());
- MarkIfTrivial(expr->expression());
- Visit(expr->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitIncrementOperation(
- IncrementOperation* expr) {
- UNREACHABLE();
-}
-
-
-void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) {
- ASSERT(av_.IsEmpty());
- if (expr->is_prefix()) MarkIfTrivial(expr->expression());
- Visit(expr->expression());
-
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (var != NULL) RecordAssignedVar(var);
-}
-
-
-void AssignedVariablesAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
- ASSERT(av_.IsEmpty());
- MarkIfTrivial(expr->right());
- Visit(expr->right());
- MarkIfTrivial(expr->left());
- ProcessExpression(expr->left());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) {
- ASSERT(av_.IsEmpty());
- MarkIfTrivial(expr->right());
- Visit(expr->right());
- MarkIfTrivial(expr->left());
- ProcessExpression(expr->left());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCompareToNull(CompareToNull* expr) {
- ASSERT(av_.IsEmpty());
- MarkIfTrivial(expr->expression());
- Visit(expr->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitThisFunction(ThisFunction* expr) {
- // Nothing to do.
- ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h
index 79d760f5a..d69d6c7a5 100644
--- a/deps/v8/src/data-flow.h
+++ b/deps/v8/src/data-flow.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,7 @@
#include "v8.h"
+#include "allocation.h"
#include "ast.h"
#include "compiler.h"
#include "zone-inl.h"
@@ -37,9 +38,6 @@
namespace v8 {
namespace internal {
-// Forward declarations.
-class Node;
-
class BitVector: public ZoneObject {
public:
// Iterator for the elements of this BitVector.
@@ -90,7 +88,7 @@ class BitVector: public ZoneObject {
explicit BitVector(int length)
: length_(length),
data_length_(SizeFor(length)),
- data_(Zone::NewArray<uint32_t>(data_length_)) {
+ data_(ZONE->NewArray<uint32_t>(data_length_)) {
ASSERT(length > 0);
Clear();
}
@@ -98,7 +96,7 @@ class BitVector: public ZoneObject {
BitVector(const BitVector& other)
: length_(other.length()),
data_length_(SizeFor(length_)),
- data_(Zone::NewArray<uint32_t>(data_length_)) {
+ data_(ZONE->NewArray<uint32_t>(data_length_)) {
CopyFrom(other);
}
@@ -201,178 +199,6 @@ class BitVector: public ZoneObject {
uint32_t* data_;
};
-
-// An implementation of a sparse set whose elements are drawn from integers
-// in the range [0..universe_size[. It supports constant-time Contains,
-// destructive Add, and destructuve Remove operations and linear-time (in
-// the number of elements) destructive Union.
-class SparseSet: public ZoneObject {
- public:
- // Iterator for sparse set elements. Elements should not be added or
- // removed during iteration.
- class Iterator BASE_EMBEDDED {
- public:
- explicit Iterator(SparseSet* target) : target_(target), current_(0) {
- ASSERT(++target->iterator_count_ > 0);
- }
- ~Iterator() {
- ASSERT(target_->iterator_count_-- > 0);
- }
- bool Done() const { return current_ >= target_->dense_.length(); }
- void Advance() {
- ASSERT(!Done());
- ++current_;
- }
- int Current() {
- ASSERT(!Done());
- return target_->dense_[current_];
- }
-
- private:
- SparseSet* target_;
- int current_;
-
- friend class SparseSet;
- };
-
- explicit SparseSet(int universe_size)
- : dense_(4),
- sparse_(Zone::NewArray<int>(universe_size)) {
-#ifdef DEBUG
- size_ = universe_size;
- iterator_count_ = 0;
-#endif
- }
-
- bool Contains(int n) const {
- ASSERT(0 <= n && n < size_);
- int dense_index = sparse_[n];
- return (0 <= dense_index) &&
- (dense_index < dense_.length()) &&
- (dense_[dense_index] == n);
- }
-
- void Add(int n) {
- ASSERT(0 <= n && n < size_);
- ASSERT(iterator_count_ == 0);
- if (!Contains(n)) {
- sparse_[n] = dense_.length();
- dense_.Add(n);
- }
- }
-
- void Remove(int n) {
- ASSERT(0 <= n && n < size_);
- ASSERT(iterator_count_ == 0);
- if (Contains(n)) {
- int dense_index = sparse_[n];
- int last = dense_.RemoveLast();
- if (dense_index < dense_.length()) {
- dense_[dense_index] = last;
- sparse_[last] = dense_index;
- }
- }
- }
-
- void Union(const SparseSet& other) {
- for (int i = 0; i < other.dense_.length(); ++i) {
- Add(other.dense_[i]);
- }
- }
-
- private:
- // The set is implemented as a pair of a growable dense list and an
- // uninitialized sparse array.
- ZoneList<int> dense_;
- int* sparse_;
-#ifdef DEBUG
- int size_;
- int iterator_count_;
-#endif
-};
-
-
-// Simple fixed-capacity list-based worklist (managed as a queue) of
-// pointers to T.
-template<typename T>
-class WorkList BASE_EMBEDDED {
- public:
- // The worklist cannot grow bigger than size. We keep one item empty to
- // distinguish between empty and full.
- explicit WorkList(int size)
- : capacity_(size + 1), head_(0), tail_(0), queue_(capacity_) {
- for (int i = 0; i < capacity_; i++) queue_.Add(NULL);
- }
-
- bool is_empty() { return head_ == tail_; }
-
- bool is_full() {
- // The worklist is full if head is at 0 and tail is at capacity - 1:
- // head == 0 && tail == capacity-1 ==> tail - head == capacity - 1
- // or if tail is immediately to the left of head:
- // tail+1 == head ==> tail - head == -1
- int diff = tail_ - head_;
- return (diff == -1 || diff == capacity_ - 1);
- }
-
- void Insert(T* item) {
- ASSERT(!is_full());
- queue_[tail_++] = item;
- if (tail_ == capacity_) tail_ = 0;
- }
-
- T* Remove() {
- ASSERT(!is_empty());
- T* item = queue_[head_++];
- if (head_ == capacity_) head_ = 0;
- return item;
- }
-
- private:
- int capacity_; // Including one empty slot.
- int head_; // Where the first item is.
- int tail_; // Where the next inserted item will go.
- List<T*> queue_;
-};
-
-
-// Computes the set of assigned variables and annotates variables proxies
-// that are trivial sub-expressions and for-loops where the loop variable
-// is guaranteed to be a smi.
-class AssignedVariablesAnalyzer : public AstVisitor {
- public:
- static bool Analyze(CompilationInfo* info);
-
- private:
- AssignedVariablesAnalyzer(CompilationInfo* info, int bits);
- bool Analyze();
-
- Variable* FindSmiLoopVariable(ForStatement* stmt);
-
- int BitIndex(Variable* var);
-
- void RecordAssignedVar(Variable* var);
-
- void MarkIfTrivial(Expression* expr);
-
- // Visits an expression saving the accumulator before, clearing
- // it before visting and restoring it after visiting.
- void ProcessExpression(Expression* expr);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- CompilationInfo* info_;
-
- // Accumulator for assigned variables set.
- BitVector av_;
-
- DISALLOW_COPY_AND_ASSIGN(AssignedVariablesAnalyzer);
-};
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index 242ab7bbc..5a2e9a234 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -684,7 +684,7 @@ function DateGetUTCDate() {
// ECMA 262 - 15.9.5.16
function DateGetDay() {
- var t = %_ValueOf(this);
+ var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return WeekDay(LocalTimeNoCheck(t));
}
@@ -692,7 +692,7 @@ function DateGetDay() {
// ECMA 262 - 15.9.5.17
function DateGetUTCDay() {
- var t = %_ValueOf(this);
+ var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return WeekDay(t);
}
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h
index ac28c6225..7f8fac83e 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/dateparser-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -34,9 +34,11 @@ namespace v8 {
namespace internal {
template <typename Char>
-bool DateParser::Parse(Vector<Char> str, FixedArray* out) {
+bool DateParser::Parse(Vector<Char> str,
+ FixedArray* out,
+ UnicodeCache* unicode_cache) {
ASSERT(out->length() >= OUTPUT_SIZE);
- InputReader<Char> in(str);
+ InputReader<Char> in(unicode_cache, str);
TimeZoneComposer tz;
TimeComposer time;
DayComposer day;
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index 40e56f302..6e87c3418 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,7 @@
#ifndef V8_DATEPARSER_H_
#define V8_DATEPARSER_H_
+#include "allocation.h"
#include "char-predicates-inl.h"
#include "scanner-base.h"
@@ -49,7 +50,7 @@ class DateParser : public AllStatic {
// [7]: UTC offset in seconds, or null value if no timezone specified
// If parsing fails, return false (content of output array is not defined).
template <typename Char>
- static bool Parse(Vector<Char> str, FixedArray* output);
+ static bool Parse(Vector<Char> str, FixedArray* output, UnicodeCache* cache);
enum {
YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND, UTC_OFFSET, OUTPUT_SIZE
@@ -67,10 +68,11 @@ class DateParser : public AllStatic {
template <typename Char>
class InputReader BASE_EMBEDDED {
public:
- explicit InputReader(Vector<Char> s)
+ InputReader(UnicodeCache* unicode_cache, Vector<Char> s)
: index_(0),
buffer_(s),
- has_read_number_(false) {
+ has_read_number_(false),
+ unicode_cache_(unicode_cache) {
Next();
}
@@ -121,7 +123,7 @@ class DateParser : public AllStatic {
}
bool SkipWhiteSpace() {
- if (ScannerConstants::kIsWhiteSpace.get(ch_)) {
+ if (unicode_cache_->IsWhiteSpace(ch_)) {
Next();
return true;
}
@@ -157,6 +159,7 @@ class DateParser : public AllStatic {
Vector<Char> buffer_;
bool has_read_number_;
uint32_t ch_;
+ UnicodeCache* unicode_cache_;
};
enum KeywordType { INVALID, MONTH_NAME, TIME_ZONE_NAME, AM_PM };
diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc
index 6901079b9..520bc6292 100644
--- a/deps/v8/src/debug-agent.cc
+++ b/deps/v8/src/debug-agent.cc
@@ -38,11 +38,11 @@ namespace internal {
// Public V8 debugger API message handler function. This function just delegates
// to the debugger agent through it's data parameter.
void DebuggerAgentMessageHandler(const v8::Debug::Message& message) {
- DebuggerAgent::instance_->DebuggerMessage(message);
+ DebuggerAgent* agent = Isolate::Current()->debugger_agent_instance();
+ ASSERT(agent != NULL);
+ agent->DebuggerMessage(message);
}
-// static
-DebuggerAgent* DebuggerAgent::instance_ = NULL;
// Debugger agent main thread.
void DebuggerAgent::Run() {
@@ -102,21 +102,22 @@ void DebuggerAgent::WaitUntilListening() {
listening_->Wait();
}
+static const char* kCreateSessionMessage =
+ "Remote debugging session already active\r\n";
+
void DebuggerAgent::CreateSession(Socket* client) {
ScopedLock with(session_access_);
// If another session is already established terminate this one.
if (session_ != NULL) {
- static const char* message = "Remote debugging session already active\r\n";
-
- client->Send(message, StrLength(message));
+ client->Send(kCreateSessionMessage, StrLength(kCreateSessionMessage));
delete client;
return;
}
// Create a new session and hook up the debug message handler.
session_ = new DebuggerAgentSession(this, client);
- v8::Debug::SetMessageHandler2(DebuggerAgentMessageHandler);
+ isolate_->debugger()->SetMessageHandler(DebuggerAgentMessageHandler);
session_->Start();
}
@@ -202,7 +203,9 @@ void DebuggerAgentSession::Run() {
// Send the request received to the debugger.
v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp.start()),
- len);
+ len,
+ NULL,
+ reinterpret_cast<v8::Isolate*>(agent_->isolate()));
if (is_closing_session) {
// Session is closed.
@@ -224,8 +227,8 @@ void DebuggerAgentSession::Shutdown() {
}
-const char* DebuggerAgentUtil::kContentLength = "Content-Length";
-int DebuggerAgentUtil::kContentLengthSize =
+const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
+const int DebuggerAgentUtil::kContentLengthSize =
StrLength(kContentLength);
diff --git a/deps/v8/src/debug-agent.h b/deps/v8/src/debug-agent.h
index 4cedb8318..e16787197 100644
--- a/deps/v8/src/debug-agent.h
+++ b/deps/v8/src/debug-agent.h
@@ -43,24 +43,27 @@ class DebuggerAgentSession;
// handles connection from a remote debugger.
class DebuggerAgent: public Thread {
public:
- explicit DebuggerAgent(const char* name, int port)
+ DebuggerAgent(const char* name, int port)
: Thread(name),
+ isolate_(Isolate::Current()),
name_(StrDup(name)), port_(port),
server_(OS::CreateSocket()), terminate_(false),
session_access_(OS::CreateMutex()), session_(NULL),
terminate_now_(OS::CreateSemaphore(0)),
listening_(OS::CreateSemaphore(0)) {
- ASSERT(instance_ == NULL);
- instance_ = this;
+ ASSERT(isolate_->debugger_agent_instance() == NULL);
+ isolate_->set_debugger_agent_instance(this);
}
~DebuggerAgent() {
- instance_ = NULL;
+ isolate_->set_debugger_agent_instance(NULL);
delete server_;
}
void Shutdown();
void WaitUntilListening();
+ Isolate* isolate() { return isolate_; }
+
private:
void Run();
void CreateSession(Socket* socket);
@@ -68,6 +71,7 @@ class DebuggerAgent: public Thread {
void CloseSession();
void OnSessionClosed(DebuggerAgentSession* session);
+ Isolate* isolate_;
SmartPointer<const char> name_; // Name of the embedding application.
int port_; // Port to use for the agent.
Socket* server_; // Server socket for listen/accept.
@@ -77,8 +81,6 @@ class DebuggerAgent: public Thread {
Semaphore* terminate_now_; // Semaphore to signal termination.
Semaphore* listening_;
- static DebuggerAgent* instance_;
-
friend class DebuggerAgentSession;
friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message);
@@ -112,8 +114,8 @@ class DebuggerAgentSession: public Thread {
// Utility methods factored out to be used by the D8 shell as well.
class DebuggerAgentUtil {
public:
- static const char* kContentLength;
- static int kContentLengthSize;
+ static const char* const kContentLength;
+ static const int kContentLengthSize;
static SmartPointer<char> ReceiveMessage(const Socket* conn);
static bool SendConnectMessage(const Socket* conn,
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index bc0f966fb..c632e4771 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -68,7 +68,8 @@ Debug.ScriptCompilationType = { Host: 0,
// The different script break point types.
Debug.ScriptBreakPointType = { ScriptId: 0,
- ScriptName: 1 };
+ ScriptName: 1,
+ ScriptRegExp: 2 };
function ScriptTypeFlag(type) {
return (1 << type);
@@ -255,8 +256,12 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
this.type_ = type;
if (type == Debug.ScriptBreakPointType.ScriptId) {
this.script_id_ = script_id_or_name;
- } else { // type == Debug.ScriptBreakPointType.ScriptName
+ } else if (type == Debug.ScriptBreakPointType.ScriptName) {
this.script_name_ = script_id_or_name;
+ } else if (type == Debug.ScriptBreakPointType.ScriptRegExp) {
+ this.script_regexp_object_ = new RegExp(script_id_or_name);
+ } else {
+ throw new Error("Unexpected breakpoint type " + type);
}
this.line_ = opt_line || 0;
this.column_ = opt_column;
@@ -309,6 +314,11 @@ ScriptBreakPoint.prototype.script_name = function() {
};
+ScriptBreakPoint.prototype.script_regexp_object = function() {
+ return this.script_regexp_object_;
+};
+
+
ScriptBreakPoint.prototype.line = function() {
return this.line_;
};
@@ -384,10 +394,19 @@ ScriptBreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
ScriptBreakPoint.prototype.matchesScript = function(script) {
if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
return this.script_id_ == script.id;
- } else { // this.type_ == Debug.ScriptBreakPointType.ScriptName
- return this.script_name_ == script.nameOrSourceURL() &&
- script.line_offset <= this.line_ &&
- this.line_ < script.line_offset + script.lineCount();
+ } else {
+ // We might want to account columns here as well.
+ if (!(script.line_offset <= this.line_ &&
+ this.line_ < script.line_offset + script.lineCount())) {
+ return false;
+ }
+ if (this.type_ == Debug.ScriptBreakPointType.ScriptName) {
+ return this.script_name_ == script.nameOrSourceURL();
+ } else if (this.type_ == Debug.ScriptBreakPointType.ScriptRegExp) {
+ return this.script_regexp_object_.test(script.nameOrSourceURL());
+ } else {
+ throw new Error("Unexpected breakpoint type " + this.type_);
+ }
}
};
@@ -431,7 +450,8 @@ ScriptBreakPoint.prototype.set = function (script) {
}
var actual_location = script.locationFromPosition(actual_position, true);
break_point.actual_location = { line: actual_location.line,
- column: actual_location.column };
+ column: actual_location.column,
+ script_id: script.id };
this.break_points_.push(break_point);
return break_point;
};
@@ -644,7 +664,8 @@ Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
actual_position += this.sourcePosition(func);
var actual_location = script.locationFromPosition(actual_position, true);
break_point.actual_location = { line: actual_location.line,
- column: actual_location.column };
+ column: actual_location.column,
+ script_id: script.id };
break_point.setCondition(opt_condition);
return break_point.number();
}
@@ -799,6 +820,15 @@ Debug.setScriptBreakPointByName = function(script_name,
}
+Debug.setScriptBreakPointByRegExp = function(script_regexp,
+ opt_line, opt_column,
+ opt_condition, opt_groupId) {
+ return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptRegExp,
+ script_regexp, opt_line, opt_column,
+ opt_condition, opt_groupId);
+}
+
+
Debug.enableScriptBreakPoint = function(break_point_number) {
var script_break_point = this.findScriptBreakPoint(break_point_number, false);
script_break_point.enable();
@@ -1335,7 +1365,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
try {
try {
// Convert the JSON string to an object.
- request = %CompileString('(' + json_request + ')')();
+ request = JSON.parse(json_request);
// Create an initial response.
response = this.createResponse(request);
@@ -1549,12 +1579,7 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
response.failed('Missing argument "type" or "target"');
return;
}
- if (type != 'function' && type != 'handle' &&
- type != 'script' && type != 'scriptId') {
- response.failed('Illegal type "' + type + '"');
- return;
- }
-
+
// Either function or script break point.
var break_point_number;
if (type == 'function') {
@@ -1598,9 +1623,16 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
break_point_number =
Debug.setScriptBreakPointByName(target, line, column, condition,
groupId);
- } else { // type == 'scriptId.
+ } else if (type == 'scriptId') {
break_point_number =
Debug.setScriptBreakPointById(target, line, column, condition, groupId);
+ } else if (type == 'scriptRegExp') {
+ break_point_number =
+ Debug.setScriptBreakPointByRegExp(target, line, column, condition,
+ groupId);
+ } else {
+ response.failed('Illegal type "' + type + '"');
+ return;
}
// Set additional break point properties.
@@ -1621,9 +1653,14 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
response.body.type = 'scriptId';
response.body.script_id = break_point.script_id();
- } else {
+ } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
response.body.type = 'scriptName';
response.body.script_name = break_point.script_name();
+ } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
+ response.body.type = 'scriptRegExp';
+ response.body.script_regexp = break_point.script_regexp_object().source;
+ } else {
+ throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
}
response.body.line = break_point.line();
response.body.column = break_point.column();
@@ -1753,9 +1790,14 @@ DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, resp
if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
description.type = 'scriptId';
description.script_id = break_point.script_id();
- } else {
+ } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
description.type = 'scriptName';
description.script_name = break_point.script_name();
+ } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
+ description.type = 'scriptRegExp';
+ description.script_regexp = break_point.script_regexp_object().source;
+ } else {
+ throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
}
array.push(description);
}
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index c47394121..f341fc6f1 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -51,6 +51,26 @@ namespace v8 {
namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
+
+
+Debug::Debug(Isolate* isolate)
+ : has_break_points_(false),
+ script_cache_(NULL),
+ debug_info_list_(NULL),
+ disable_break_(false),
+ break_on_exception_(false),
+ break_on_uncaught_exception_(false),
+ debug_break_return_(NULL),
+ debug_break_slot_(NULL),
+ isolate_(isolate) {
+ memset(registers_, 0, sizeof(JSCallerSavedBuffer));
+}
+
+
+Debug::~Debug() {
+}
+
+
static void PrintLn(v8::Local<v8::Value> value) {
v8::Local<v8::String> s = value->ToString();
ScopedVector<char> data(s->Length() + 1);
@@ -64,22 +84,28 @@ static void PrintLn(v8::Local<v8::Value> value) {
static Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind) {
- CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugBreak(argc, kind), Code);
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(
+ isolate,
+ isolate->stub_cache()->ComputeCallDebugBreak(argc, kind),
+ Code);
}
static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
+ Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
- StubCache::ComputeCallDebugPrepareStepIn(argc, kind), Code);
+ isolate,
+ isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind),
+ Code);
}
-static v8::Handle<v8::Context> GetDebugEventContext() {
- Handle<Context> context = Debug::debugger_entry()->GetContext();
- // Top::context() may have been NULL when "script collected" event occured.
- if (*context == NULL) {
- return v8::Local<v8::Context>();
- }
+static v8::Handle<v8::Context> GetDebugEventContext(Isolate* isolate) {
+ Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
+ // Isolate::context() may have been NULL when "script collected" event
+ // occured.
+ if (context.is_null()) return v8::Local<v8::Context>();
Handle<Context> global_context(context->global_context());
return v8::Utils::ToLocal(global_context);
}
@@ -142,7 +168,7 @@ void BreakLocationIterator::Next() {
Code* code = Code::GetCodeFromTargetAddress(target);
if ((code->is_inline_cache_stub() &&
!code->is_binary_op_stub() &&
- !code->is_type_recording_binary_op_stub() &&
+ !code->is_unary_op_stub() &&
!code->is_compare_ic_stub()) ||
RelocInfo::IsConstructCall(rmode())) {
break_point_++;
@@ -452,21 +478,6 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
// calling convention used by the call site.
Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
rinfo()->set_target_address(dbgbrk_code->entry());
-
- // For stubs that refer back to an inlined version clear the cached map for
- // the inlined case to always go through the IC. As long as the break point
- // is set the patching performed by the runtime system will take place in
- // the code copy and will therefore have no effect on the running code
- // keeping it from using the inlined code.
- if (code->is_keyed_load_stub()) {
- KeyedLoadIC::ClearInlinedVersion(pc());
- } else if (code->is_keyed_store_stub()) {
- KeyedStoreIC::ClearInlinedVersion(pc());
- } else if (code->is_load_stub()) {
- LoadIC::ClearInlinedVersion(pc());
- } else if (code->is_store_stub()) {
- StoreIC::ClearInlinedVersion(pc());
- }
}
}
@@ -474,20 +485,6 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
void BreakLocationIterator::ClearDebugBreakAtIC() {
// Patch the code to the original invoke.
rinfo()->set_target_address(original_rinfo()->target_address());
-
- RelocInfo::Mode mode = rmode();
- if (RelocInfo::IsCodeTarget(mode)) {
- AssertNoAllocation nogc;
- Address target = original_rinfo()->target_address();
- Code* code = Code::GetCodeFromTargetAddress(target);
-
- // Restore the inlined version of keyed stores to get back to the
- // fast case. We need to patch back the keyed store because no
- // patching happens when running normally. For keyed loads, the
- // map check will get patched back when running normally after ICs
- // have been cleared at GC.
- if (code->is_keyed_store_stub()) KeyedStoreIC::RestoreInlinedVersion(pc());
- }
}
@@ -535,11 +532,6 @@ void BreakLocationIterator::RinfoNext() {
}
-bool Debug::has_break_points_ = false;
-ScriptCache* Debug::script_cache_ = NULL;
-DebugInfoListNode* Debug::debug_info_list_ = NULL;
-
-
// Threading support.
void Debug::ThreadInit() {
thread_local_.break_count_ = 0;
@@ -552,16 +544,13 @@ void Debug::ThreadInit() {
thread_local_.step_into_fp_ = 0;
thread_local_.step_out_fp_ = 0;
thread_local_.after_break_target_ = 0;
+ // TODO(isolates): frames_are_dropped_?
thread_local_.debugger_entry_ = NULL;
thread_local_.pending_interrupts_ = 0;
thread_local_.restarter_frame_function_pointer_ = NULL;
}
-JSCallerSavedBuffer Debug::registers_;
-Debug::ThreadLocal Debug::thread_local_;
-
-
char* Debug::ArchiveDebug(char* storage) {
char* to = storage;
memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
@@ -584,7 +573,7 @@ char* Debug::RestoreDebug(char* storage) {
int Debug::ArchiveSpacePerThread() {
- return sizeof(ThreadLocal) + sizeof(registers_);
+ return sizeof(ThreadLocal) + sizeof(JSCallerSavedBuffer);
}
@@ -614,22 +603,8 @@ Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
const int Debug::kFrameDropperFrameSize = 4;
-
-
-
-// Default break enabled.
-bool Debug::disable_break_ = false;
-
-// Default call debugger on uncaught exception.
-bool Debug::break_on_exception_ = false;
-bool Debug::break_on_uncaught_exception_ = false;
-
-Handle<Context> Debug::debug_context_ = Handle<Context>();
-Code* Debug::debug_break_return_ = NULL;
-Code* Debug::debug_break_slot_ = NULL;
-
-
void ScriptCache::Add(Handle<Script> script) {
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
// Create an entry in the hash map for the script.
int id = Smi::cast(script->id())->value();
HashMap::Entry* entry =
@@ -642,15 +617,18 @@ void ScriptCache::Add(Handle<Script> script) {
// Globalize the script object, make it weak and use the location of the
// global handle as the value in the hash map.
Handle<Script> script_ =
- Handle<Script>::cast((GlobalHandles::Create(*script)));
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
- this, ScriptCache::HandleWeakScript);
+ Handle<Script>::cast(
+ (global_handles->Create(*script)));
+ global_handles->MakeWeak(
+ reinterpret_cast<Object**>(script_.location()),
+ this,
+ ScriptCache::HandleWeakScript);
entry->value = script_.location();
}
Handle<FixedArray> ScriptCache::GetScripts() {
- Handle<FixedArray> instances = Factory::NewFixedArray(occupancy());
+ Handle<FixedArray> instances = FACTORY->NewFixedArray(occupancy());
int count = 0;
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry->value != NULL);
@@ -664,21 +642,23 @@ Handle<FixedArray> ScriptCache::GetScripts() {
void ScriptCache::ProcessCollectedScripts() {
+ Debugger* debugger = Isolate::Current()->debugger();
for (int i = 0; i < collected_scripts_.length(); i++) {
- Debugger::OnScriptCollected(collected_scripts_[i]);
+ debugger->OnScriptCollected(collected_scripts_[i]);
}
collected_scripts_.Clear();
}
void ScriptCache::Clear() {
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
// Iterate the script cache to get rid of all the weak handles.
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry != NULL);
Object** location = reinterpret_cast<Object**>(entry->value);
ASSERT((*location)->IsScript());
- GlobalHandles::ClearWeakness(location);
- GlobalHandles::Destroy(location);
+ global_handles->ClearWeakness(location);
+ global_handles->Destroy(location);
}
// Clear the content of the hash map.
HashMap::Clear();
@@ -708,17 +688,18 @@ void Debug::Setup(bool create_heap_objects) {
if (create_heap_objects) {
// Get code to handle debug break on return.
debug_break_return_ =
- Builtins::builtin(Builtins::Return_DebugBreak);
+ isolate_->builtins()->builtin(Builtins::kReturn_DebugBreak);
ASSERT(debug_break_return_->IsCode());
// Get code to handle debug break in debug break slots.
debug_break_slot_ =
- Builtins::builtin(Builtins::Slot_DebugBreak);
+ isolate_->builtins()->builtin(Builtins::kSlot_DebugBreak);
ASSERT(debug_break_slot_->IsCode());
}
}
void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
+ Debug* debug = Isolate::Current()->debug();
DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
// We need to clear all breakpoints associated with the function to restore
// original code and avoid patching the code twice later because
@@ -726,9 +707,9 @@ void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
// Runtime::FindSharedFunctionInfoInScript.
BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
it.ClearAllDebugBreak();
- RemoveDebugInfo(node->debug_info());
+ debug->RemoveDebugInfo(node->debug_info());
#ifdef DEBUG
- node = Debug::debug_info_list_;
+ node = debug->debug_info_list_;
while (node != NULL) {
ASSERT(node != reinterpret_cast<DebugInfoListNode*>(data));
node = node->next();
@@ -738,20 +719,27 @@ void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
// Globalize the request debug info object and make it weak.
- debug_info_ = Handle<DebugInfo>::cast((GlobalHandles::Create(debug_info)));
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
- this, Debug::HandleWeakDebugInfo);
+ debug_info_ = Handle<DebugInfo>::cast(
+ (global_handles->Create(debug_info)));
+ global_handles->MakeWeak(
+ reinterpret_cast<Object**>(debug_info_.location()),
+ this,
+ Debug::HandleWeakDebugInfo);
}
DebugInfoListNode::~DebugInfoListNode() {
- GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_info_.location()));
+ Isolate::Current()->global_handles()->Destroy(
+ reinterpret_cast<Object**>(debug_info_.location()));
}
bool Debug::CompileDebuggerScript(int index) {
- HandleScope scope;
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
// Bail out if the index is invalid.
if (index == -1) {
@@ -759,33 +747,31 @@ bool Debug::CompileDebuggerScript(int index) {
}
// Find source and name for the requested script.
- Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
+ Handle<String> source_code =
+ isolate->bootstrapper()->NativesSourceLookup(index);
Vector<const char> name = Natives::GetScriptName(index);
- Handle<String> script_name = Factory::NewStringFromAscii(name);
+ Handle<String> script_name = factory->NewStringFromAscii(name);
// Compile the script.
- bool allow_natives_syntax = FLAG_allow_natives_syntax;
- FLAG_allow_natives_syntax = true;
Handle<SharedFunctionInfo> function_info;
function_info = Compiler::Compile(source_code,
script_name,
0, 0, NULL, NULL,
Handle<String>::null(),
NATIVES_CODE);
- FLAG_allow_natives_syntax = allow_natives_syntax;
// Silently ignore stack overflows during compilation.
if (function_info.is_null()) {
- ASSERT(Top::has_pending_exception());
- Top::clear_pending_exception();
+ ASSERT(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
return false;
}
// Execute the shared function in the debugger context.
- Handle<Context> context = Top::global_context();
+ Handle<Context> context = isolate->global_context();
bool caught_exception = false;
Handle<JSFunction> function =
- Factory::NewFunctionFromSharedFunctionInfo(function_info, context);
+ factory->NewFunctionFromSharedFunctionInfo(function_info, context);
Handle<Object> result =
Execution::TryCall(function, Handle<Object>(context->global()),
0, NULL, &caught_exception);
@@ -795,7 +781,7 @@ bool Debug::CompileDebuggerScript(int index) {
Handle<Object> message = MessageHandler::MakeMessageObject(
"error_loading_debugger", NULL, Vector<Handle<Object> >::empty(),
Handle<String>(), Handle<JSArray>());
- MessageHandler::ReportMessage(NULL, message);
+ MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
return false;
}
@@ -810,38 +796,44 @@ bool Debug::Load() {
// Return if debugger is already loaded.
if (IsLoaded()) return true;
+ Debugger* debugger = isolate_->debugger();
+
// Bail out if we're already in the process of compiling the native
// JavaScript source code for the debugger.
- if (Debugger::compiling_natives() || Debugger::is_loading_debugger())
+ if (debugger->compiling_natives() ||
+ debugger->is_loading_debugger())
return false;
- Debugger::set_loading_debugger(true);
+ debugger->set_loading_debugger(true);
// Disable breakpoints and interrupts while compiling and running the
// debugger scripts including the context creation code.
DisableBreak disable(true);
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(isolate_);
// Create the debugger context.
- HandleScope scope;
+ HandleScope scope(isolate_);
Handle<Context> context =
- Bootstrapper::CreateEnvironment(Handle<Object>::null(),
- v8::Handle<ObjectTemplate>(),
- NULL);
+ isolate_->bootstrapper()->CreateEnvironment(
+ isolate_,
+ Handle<Object>::null(),
+ v8::Handle<ObjectTemplate>(),
+ NULL);
// Use the debugger context.
- SaveContext save;
- Top::set_context(*context);
+ SaveContext save(isolate_);
+ isolate_->set_context(*context);
// Expose the builtins object in the debugger context.
- Handle<String> key = Factory::LookupAsciiSymbol("builtins");
+ Handle<String> key = isolate_->factory()->LookupAsciiSymbol("builtins");
Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate_,
SetProperty(global, key, Handle<Object>(global->builtins()),
NONE, kNonStrictMode),
false);
// Compile the JavaScript for the debugger in the debugger context.
- Debugger::set_compiling_natives(true);
+ debugger->set_compiling_natives(true);
bool caught_exception =
!CompileDebuggerScript(Natives::GetIndex("mirror")) ||
!CompileDebuggerScript(Natives::GetIndex("debug"));
@@ -851,11 +843,11 @@ bool Debug::Load() {
!CompileDebuggerScript(Natives::GetIndex("liveedit"));
}
- Debugger::set_compiling_natives(false);
+ debugger->set_compiling_natives(false);
// Make sure we mark the debugger as not loading before we might
// return.
- Debugger::set_loading_debugger(false);
+ debugger->set_loading_debugger(false);
// Check for caught exceptions.
if (caught_exception) return false;
@@ -877,7 +869,8 @@ void Debug::Unload() {
DestroyScriptCache();
// Clear debugger context global handle.
- GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_context_.location()));
+ Isolate::Current()->global_handles()->Destroy(
+ reinterpret_cast<Object**>(debug_context_.location()));
debug_context_ = Handle<Context>();
}
@@ -896,29 +889,30 @@ void Debug::Iterate(ObjectVisitor* v) {
Object* Debug::Break(Arguments args) {
- HandleScope scope;
+ Heap* heap = isolate_->heap();
+ HandleScope scope(isolate_);
ASSERT(args.length() == 0);
thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
// Get the top-most JavaScript frame.
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(isolate_);
JavaScriptFrame* frame = it.frame();
// Just continue if breaks are disabled or debugger cannot be loaded.
if (disable_break() || !Load()) {
SetAfterBreakTarget(frame);
- return Heap::undefined_value();
+ return heap->undefined_value();
}
// Enter the debugger.
EnterDebugger debugger;
if (debugger.FailedToEnter()) {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
// Postpone interrupt during breakpoint processing.
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(isolate_);
// Get the debug info (create it if it does not exist).
Handle<SharedFunctionInfo> shared =
@@ -940,7 +934,7 @@ Object* Debug::Break(Arguments args) {
// If there is one or more real break points check whether any of these are
// triggered.
- Handle<Object> break_points_hit(Heap::undefined_value());
+ Handle<Object> break_points_hit(heap->undefined_value());
if (break_location_iterator.HasBreakPoint()) {
Handle<Object> break_point_objects =
Handle<Object>(break_location_iterator.BreakPointObjects());
@@ -949,7 +943,7 @@ Object* Debug::Break(Arguments args) {
// If step out is active skip everything until the frame where we need to step
// out to is reached, unless real breakpoint is hit.
- if (Debug::StepOutActive() && frame->fp() != Debug::step_out_fp() &&
+ if (StepOutActive() && frame->fp() != step_out_fp() &&
break_points_hit->IsUndefined() ) {
// Step count should always be 0 for StepOut.
ASSERT(thread_local_.step_count_ == 0);
@@ -963,7 +957,7 @@ Object* Debug::Break(Arguments args) {
ClearStepping();
// Notify the debug event listeners.
- Debugger::OnDebugBreak(break_points_hit, false);
+ isolate_->debugger()->OnDebugBreak(break_points_hit, false);
} else if (thread_local_.last_step_action_ != StepNone) {
// Hold on to last step action as it is cleared by the call to
// ClearStepping.
@@ -979,23 +973,37 @@ Object* Debug::Break(Arguments args) {
if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
SetAfterBreakTarget(frame);
- } else if (thread_local_.frame_drop_mode_ == FRAME_DROPPED_IN_IC_CALL) {
+ } else if (thread_local_.frame_drop_mode_ ==
+ FRAME_DROPPED_IN_IC_CALL) {
// We must have been calling IC stub. Do not go there anymore.
- Code* plain_return = Builtins::builtin(Builtins::PlainReturn_LiveEdit);
+ Code* plain_return = isolate_->builtins()->builtin(
+ Builtins::kPlainReturn_LiveEdit);
thread_local_.after_break_target_ = plain_return->entry();
} else if (thread_local_.frame_drop_mode_ ==
FRAME_DROPPED_IN_DEBUG_SLOT_CALL) {
// Debug break slot stub does not return normally, instead it manually
// cleans the stack and jumps. We should patch the jump address.
- Code* plain_return = Builtins::builtin(Builtins::FrameDropper_LiveEdit);
+ Code* plain_return = isolate_->builtins()->builtin(
+ Builtins::kFrameDropper_LiveEdit);
thread_local_.after_break_target_ = plain_return->entry();
- } else if (thread_local_.frame_drop_mode_ == FRAME_DROPPED_IN_DIRECT_CALL) {
+ } else if (thread_local_.frame_drop_mode_ ==
+ FRAME_DROPPED_IN_DIRECT_CALL) {
// Nothing to do, after_break_target is not used here.
+ } else if (thread_local_.frame_drop_mode_ ==
+ FRAME_DROPPED_IN_RETURN_CALL) {
+ Code* plain_return = isolate_->builtins()->builtin(
+ Builtins::kFrameDropper_LiveEdit);
+ thread_local_.after_break_target_ = plain_return->entry();
} else {
UNREACHABLE();
}
- return Heap::undefined_value();
+ return heap->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Object*, Debug_Break) {
+ return isolate->debug()->Break(args);
}
@@ -1003,52 +1011,58 @@ Object* Debug::Break(Arguments args) {
// triggered. This function returns a JSArray with the break point objects
// which is triggered.
Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
- int break_points_hit_count = 0;
- Handle<JSArray> break_points_hit = Factory::NewJSArray(1);
+ Factory* factory = isolate_->factory();
- // If there are multiple break points they are in a FixedArray.
+ // Count the number of break points hit. If there are multiple break points
+ // they are in a FixedArray.
+ Handle<FixedArray> break_points_hit;
+ int break_points_hit_count = 0;
ASSERT(!break_point_objects->IsUndefined());
if (break_point_objects->IsFixedArray()) {
Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
+ break_points_hit = factory->NewFixedArray(array->length());
for (int i = 0; i < array->length(); i++) {
Handle<Object> o(array->get(i));
if (CheckBreakPoint(o)) {
- SetElement(break_points_hit, break_points_hit_count++, o);
+ break_points_hit->set(break_points_hit_count++, *o);
}
}
} else {
+ break_points_hit = factory->NewFixedArray(1);
if (CheckBreakPoint(break_point_objects)) {
- SetElement(break_points_hit,
- break_points_hit_count++,
- break_point_objects);
+ break_points_hit->set(break_points_hit_count++, *break_point_objects);
}
}
// Return undefined if no break points were triggered.
if (break_points_hit_count == 0) {
- return Factory::undefined_value();
+ return factory->undefined_value();
}
- return break_points_hit;
+ // Return break points hit as a JSArray.
+ Handle<JSArray> result = factory->NewJSArrayWithElements(break_points_hit);
+ result->set_length(Smi::FromInt(break_points_hit_count));
+ return result;
}
// Check whether a single break point object is triggered.
bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
- HandleScope scope;
+ Factory* factory = isolate_->factory();
+ HandleScope scope(isolate_);
// Ignore check if break point object is not a JSObject.
if (!break_point_object->IsJSObject()) return true;
- // Get the function CheckBreakPoint (defined in debug.js).
+ // Get the function IsBreakPointTriggered (defined in debug-debugger.js).
Handle<String> is_break_point_triggered_symbol =
- Factory::LookupAsciiSymbol("IsBreakPointTriggered");
+ factory->LookupAsciiSymbol("IsBreakPointTriggered");
Handle<JSFunction> check_break_point =
Handle<JSFunction>(JSFunction::cast(
debug_context()->global()->GetPropertyNoExceptionThrown(
*is_break_point_triggered_symbol)));
// Get the break id as an object.
- Handle<Object> break_id = Factory::NewNumberFromInt(Debug::break_id());
+ Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
// Call HandleBreakPointx.
bool caught_exception = false;
@@ -1058,8 +1072,7 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
reinterpret_cast<Object**>(break_point_object.location())
};
Handle<Object> result = Execution::TryCall(check_break_point,
- Top::builtins(), argc, argv,
- &caught_exception);
+ isolate_->js_builtins_object(), argc, argv, &caught_exception);
// If exception or non boolean result handle as not triggered
if (caught_exception || !result->IsBoolean()) {
@@ -1067,7 +1080,8 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
}
// Return whether the break point is triggered.
- return *result == Heap::true_value();
+ ASSERT(!result.is_null());
+ return (*result)->IsTrue();
}
@@ -1088,7 +1102,7 @@ Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
Handle<Object> break_point_object,
int* source_position) {
- HandleScope scope;
+ HandleScope scope(isolate_);
if (!EnsureDebugInfo(shared)) {
// Return if retrieving debug info failed.
@@ -1112,7 +1126,7 @@ void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
- HandleScope scope;
+ HandleScope scope(isolate_);
DebugInfoListNode* node = debug_info_list_;
while (node != NULL) {
@@ -1185,7 +1199,7 @@ void Debug::FloodHandlerWithOneShot() {
// If there is no JavaScript stack don't do anything.
return;
}
- for (JavaScriptFrameIterator it(id); !it.done(); it.Advance()) {
+ for (JavaScriptFrameIterator it(isolate_, id); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->HasHandler()) {
Handle<SharedFunctionInfo> shared =
@@ -1218,7 +1232,7 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
void Debug::PrepareStep(StepAction step_action, int step_count) {
- HandleScope scope;
+ HandleScope scope(isolate_);
ASSERT(Debug::InDebugger());
// Remember this step action and count.
@@ -1240,7 +1254,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// If there is no JavaScript stack don't do anything.
return;
}
- JavaScriptFrameIterator frames_it(id);
+ JavaScriptFrameIterator frames_it(isolate_, id);
JavaScriptFrame* frame = frames_it.frame();
// First of all ensure there is one-shot break points in the top handler
@@ -1366,8 +1380,10 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// Reverse lookup required as the minor key cannot be retrieved
// from the code object.
Handle<Object> obj(
- Heap::code_stubs()->SlowReverseLookup(*call_function_stub));
- ASSERT(*obj != Heap::undefined_value());
+ isolate_->heap()->code_stubs()->SlowReverseLookup(
+ *call_function_stub));
+ ASSERT(!obj.is_null());
+ ASSERT(!(*obj)->IsUndefined());
ASSERT(obj->IsSmi());
// Get the STUB key and extract major and minor key.
uint32_t key = Smi::cast(*obj)->value();
@@ -1485,18 +1501,16 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
return ComputeCallDebugBreak(code->arguments_count(), code->kind());
case Code::LOAD_IC:
- return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
+ return Isolate::Current()->builtins()->LoadIC_DebugBreak();
case Code::STORE_IC:
- return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
+ return Isolate::Current()->builtins()->StoreIC_DebugBreak();
case Code::KEYED_LOAD_IC:
- return Handle<Code>(
- Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
+ return Isolate::Current()->builtins()->KeyedLoadIC_DebugBreak();
case Code::KEYED_STORE_IC:
- return Handle<Code>(
- Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
+ return Isolate::Current()->builtins()->KeyedStoreIC_DebugBreak();
default:
UNREACHABLE();
@@ -1504,13 +1518,13 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
}
if (RelocInfo::IsConstructCall(mode)) {
Handle<Code> result =
- Handle<Code>(Builtins::builtin(Builtins::ConstructCall_DebugBreak));
+ Isolate::Current()->builtins()->ConstructCall_DebugBreak();
return result;
}
if (code->kind() == Code::STUB) {
ASSERT(code->major_key() == CodeStub::CallFunction);
Handle<Code> result =
- Handle<Code>(Builtins::builtin(Builtins::StubNoRegisters_DebugBreak));
+ Isolate::Current()->builtins()->StubNoRegisters_DebugBreak();
return result;
}
@@ -1522,13 +1536,15 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
// Simple function for returning the source positions for active break points.
Handle<Object> Debug::GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared) {
- if (!HasDebugInfo(shared)) return Handle<Object>(Heap::undefined_value());
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ if (!HasDebugInfo(shared)) return Handle<Object>(heap->undefined_value());
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
if (debug_info->GetBreakPointCount() == 0) {
- return Handle<Object>(Heap::undefined_value());
+ return Handle<Object>(heap->undefined_value());
}
Handle<FixedArray> locations =
- Factory::NewFixedArray(debug_info->GetBreakPointCount());
+ isolate->factory()->NewFixedArray(debug_info->GetBreakPointCount());
int count = 0;
for (int i = 0; i < debug_info->break_points()->length(); i++) {
if (!debug_info->break_points()->get(i)->IsUndefined()) {
@@ -1574,13 +1590,13 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// Flood the function with one-shot break points if it is called from where
// step into was requested.
- if (fp == Debug::step_in_fp()) {
+ if (fp == step_in_fp()) {
// Don't allow step into functions in the native context.
if (!function->IsBuiltin()) {
if (function->shared()->code() ==
- Builtins::builtin(Builtins::FunctionApply) ||
+ Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply) ||
function->shared()->code() ==
- Builtins::builtin(Builtins::FunctionCall)) {
+ Isolate::Current()->builtins()->builtin(Builtins::kFunctionCall)) {
// Handle function.apply and function.call separately to flood the
// function to be called and not the code for Builtins::FunctionApply or
// Builtins::FunctionCall. The receiver of call/apply is the target
@@ -1674,7 +1690,7 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
}
// Create the debug info object.
- Handle<DebugInfo> debug_info = Factory::NewDebugInfo(shared);
+ Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
// Add debug info to the list.
DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
@@ -1701,7 +1717,8 @@ void Debug::RemoveDebugInfo(Handle<DebugInfo> debug_info) {
} else {
prev->set_next(current->next());
}
- current->debug_info()->shared()->set_debug_info(Heap::undefined_value());
+ current->debug_info()->shared()->set_debug_info(
+ isolate_->heap()->undefined_value());
delete current;
// If there are no more debug info objects there are not more break
@@ -1719,7 +1736,7 @@ void Debug::RemoveDebugInfo(Handle<DebugInfo> debug_info) {
void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
- HandleScope scope;
+ HandleScope scope(isolate_);
// Get the executing function in which the debug break occurred.
Handle<SharedFunctionInfo> shared =
@@ -1733,7 +1750,7 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
Handle<Code> original_code(debug_info->original_code());
#ifdef DEBUG
// Get the code which is actually executing.
- Handle<Code> frame_code(frame->code());
+ Handle<Code> frame_code(frame->LookupCode());
ASSERT(frame_code.is_identical_to(code));
#endif
@@ -1802,7 +1819,7 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
- HandleScope scope;
+ HandleScope scope(isolate_);
// Get the executing function in which the debug break occurred.
Handle<SharedFunctionInfo> shared =
@@ -1815,7 +1832,7 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
Handle<Code> code(debug_info->code());
#ifdef DEBUG
// Get the code which is actually executing.
- Handle<Code> frame_code(frame->code());
+ Handle<Code> frame_code(frame->LookupCode());
ASSERT(frame_code.is_identical_to(code));
#endif
@@ -1846,19 +1863,19 @@ void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
bool Debug::IsDebugGlobal(GlobalObject* global) {
- return IsLoaded() && global == Debug::debug_context()->global();
+ return IsLoaded() && global == debug_context()->global();
}
void Debug::ClearMirrorCache() {
- PostponeInterruptsScope postpone;
- HandleScope scope;
- ASSERT(Top::context() == *Debug::debug_context());
+ PostponeInterruptsScope postpone(isolate_);
+ HandleScope scope(isolate_);
+ ASSERT(isolate_->context() == *Debug::debug_context());
// Clear the mirror cache.
Handle<String> function_name =
- Factory::LookupSymbol(CStrVector("ClearMirrorCache"));
- Handle<Object> fun(Top::global()->GetPropertyNoExceptionThrown(
+ isolate_->factory()->LookupSymbol(CStrVector("ClearMirrorCache"));
+ Handle<Object> fun(Isolate::Current()->global()->GetPropertyNoExceptionThrown(
*function_name));
ASSERT(fun->IsJSFunction());
bool caught_exception;
@@ -1870,13 +1887,14 @@ void Debug::ClearMirrorCache() {
void Debug::CreateScriptCache() {
- HandleScope scope;
+ Heap* heap = isolate_->heap();
+ HandleScope scope(isolate_);
// Perform two GCs to get rid of all unreferenced scripts. The first GC gets
// rid of all the cached script wrappers and the second gets rid of the
// scripts which are no longer referenced.
- Heap::CollectAllGarbage(false);
- Heap::CollectAllGarbage(false);
+ heap->CollectAllGarbage(false);
+ heap->CollectAllGarbage(false);
ASSERT(script_cache_ == NULL);
script_cache_ = new ScriptCache();
@@ -1919,12 +1937,12 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
// If the script cache is not active just return an empty array.
ASSERT(script_cache_ != NULL);
if (script_cache_ == NULL) {
- Factory::NewFixedArray(0);
+ isolate_->factory()->NewFixedArray(0);
}
// Perform GC to get unreferenced scripts evicted from the cache before
// returning the content.
- Heap::CollectAllGarbage(false);
+ isolate_->heap()->CollectAllGarbage(false);
// Get the scripts from the cache.
return script_cache_->GetScripts();
@@ -1939,51 +1957,65 @@ void Debug::AfterGarbageCollection() {
}
-Mutex* Debugger::debugger_access_ = OS::CreateMutex();
-Handle<Object> Debugger::event_listener_ = Handle<Object>();
-Handle<Object> Debugger::event_listener_data_ = Handle<Object>();
-bool Debugger::compiling_natives_ = false;
-bool Debugger::is_loading_debugger_ = false;
-bool Debugger::never_unload_debugger_ = false;
-v8::Debug::MessageHandler2 Debugger::message_handler_ = NULL;
-bool Debugger::debugger_unload_pending_ = false;
-v8::Debug::HostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
-Mutex* Debugger::dispatch_handler_access_ = OS::CreateMutex();
-v8::Debug::DebugMessageDispatchHandler
- Debugger::debug_message_dispatch_handler_ = NULL;
-MessageDispatchHelperThread* Debugger::message_dispatch_helper_thread_ = NULL;
-int Debugger::host_dispatch_micros_ = 100 * 1000;
-DebuggerAgent* Debugger::agent_ = NULL;
-LockingCommandMessageQueue Debugger::command_queue_(kQueueInitialSize);
-Semaphore* Debugger::command_received_ = OS::CreateSemaphore(0);
-LockingCommandMessageQueue Debugger::event_command_queue_(kQueueInitialSize);
+Debugger::Debugger(Isolate* isolate)
+ : debugger_access_(OS::CreateMutex()),
+ event_listener_(Handle<Object>()),
+ event_listener_data_(Handle<Object>()),
+ compiling_natives_(false),
+ is_loading_debugger_(false),
+ never_unload_debugger_(false),
+ message_handler_(NULL),
+ debugger_unload_pending_(false),
+ host_dispatch_handler_(NULL),
+ dispatch_handler_access_(OS::CreateMutex()),
+ debug_message_dispatch_handler_(NULL),
+ message_dispatch_helper_thread_(NULL),
+ host_dispatch_micros_(100 * 1000),
+ agent_(NULL),
+ command_queue_(isolate->logger(), kQueueInitialSize),
+ command_received_(OS::CreateSemaphore(0)),
+ event_command_queue_(isolate->logger(), kQueueInitialSize),
+ isolate_(isolate) {
+}
+
+
+Debugger::~Debugger() {
+ delete debugger_access_;
+ debugger_access_ = 0;
+ delete dispatch_handler_access_;
+ dispatch_handler_access_ = 0;
+ delete command_received_;
+ command_received_ = 0;
+}
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
int argc, Object*** argv,
bool* caught_exception) {
- ASSERT(Top::context() == *Debug::debug_context());
+ ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
// Create the execution state object.
- Handle<String> constructor_str = Factory::LookupSymbol(constructor_name);
- Handle<Object> constructor(Top::global()->GetPropertyNoExceptionThrown(
- *constructor_str));
+ Handle<String> constructor_str =
+ isolate_->factory()->LookupSymbol(constructor_name);
+ Handle<Object> constructor(
+ isolate_->global()->GetPropertyNoExceptionThrown(*constructor_str));
ASSERT(constructor->IsJSFunction());
if (!constructor->IsJSFunction()) {
*caught_exception = true;
- return Factory::undefined_value();
+ return isolate_->factory()->undefined_value();
}
Handle<Object> js_object = Execution::TryCall(
Handle<JSFunction>::cast(constructor),
- Handle<JSObject>(Debug::debug_context()->global()), argc, argv,
- caught_exception);
+ Handle<JSObject>(isolate_->debug()->debug_context()->global()),
+ argc, argv, caught_exception);
return js_object;
}
Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
// Create the execution state object.
- Handle<Object> break_id = Factory::NewNumberFromInt(Debug::break_id());
+ Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
+ isolate_->debug()->break_id());
const int argc = 1;
Object** argv[argc] = { break_id.location() };
return MakeJSObject(CStrVector("MakeExecutionState"),
@@ -2009,12 +2041,13 @@ Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state,
Handle<Object> exception,
bool uncaught,
bool* caught_exception) {
+ Factory* factory = isolate_->factory();
// Create the new exception event object.
const int argc = 3;
Object** argv[argc] = { exec_state.location(),
exception.location(),
- uncaught ? Factory::true_value().location() :
- Factory::false_value().location()};
+ uncaught ? factory->true_value().location() :
+ factory->false_value().location()};
return MakeJSObject(CStrVector("MakeExceptionEvent"),
argc, argv, caught_exception);
}
@@ -2033,14 +2066,15 @@ Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
bool before,
bool* caught_exception) {
+ Factory* factory = isolate_->factory();
// Create the compile event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> script_wrapper = GetScriptWrapper(script);
const int argc = 3;
Object** argv[argc] = { exec_state.location(),
script_wrapper.location(),
- before ? Factory::true_value().location() :
- Factory::false_value().location() };
+ before ? factory->true_value().location() :
+ factory->false_value().location() };
return MakeJSObject(CStrVector("MakeCompileEvent"),
argc,
@@ -2065,20 +2099,21 @@ Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
void Debugger::OnException(Handle<Object> exception, bool uncaught) {
- HandleScope scope;
+ HandleScope scope(isolate_);
+ Debug* debug = isolate_->debug();
// Bail out based on state or if there is no listener for this event
- if (Debug::InDebugger()) return;
+ if (debug->InDebugger()) return;
if (!Debugger::EventActive(v8::Exception)) return;
// Bail out if exception breaks are not active
if (uncaught) {
// Uncaught exceptions are reported by either flags.
- if (!(Debug::break_on_uncaught_exception() ||
- Debug::break_on_exception())) return;
+ if (!(debug->break_on_uncaught_exception() ||
+ debug->break_on_exception())) return;
} else {
// Caught exceptions are reported is activated.
- if (!Debug::break_on_exception()) return;
+ if (!debug->break_on_exception()) return;
}
// Enter the debugger.
@@ -2086,7 +2121,7 @@ void Debugger::OnException(Handle<Object> exception, bool uncaught) {
if (debugger.FailedToEnter()) return;
// Clear all current stepping setup.
- Debug::ClearStepping();
+ debug->ClearStepping();
// Create the event data object.
bool caught_exception = false;
Handle<Object> exec_state = MakeExecutionState(&caught_exception);
@@ -2108,16 +2143,16 @@ void Debugger::OnException(Handle<Object> exception, bool uncaught) {
void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
bool auto_continue) {
- HandleScope scope;
+ HandleScope scope(isolate_);
// Debugger has already been entered by caller.
- ASSERT(Top::context() == *Debug::debug_context());
+ ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
// Bail out if there is no listener for this event
if (!Debugger::EventActive(v8::Break)) return;
// Debugger must be entered in advance.
- ASSERT(Top::context() == *Debug::debug_context());
+ ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
// Create the event data object.
bool caught_exception = false;
@@ -2140,10 +2175,10 @@ void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
void Debugger::OnBeforeCompile(Handle<Script> script) {
- HandleScope scope;
+ HandleScope scope(isolate_);
// Bail out based on state or if there is no listener for this event
- if (Debug::InDebugger()) return;
+ if (isolate_->debug()->InDebugger()) return;
if (compiling_natives()) return;
if (!EventActive(v8::BeforeCompile)) return;
@@ -2169,10 +2204,11 @@ void Debugger::OnBeforeCompile(Handle<Script> script) {
// Handle debugger actions when a new script is compiled.
void Debugger::OnAfterCompile(Handle<Script> script,
AfterCompileFlags after_compile_flags) {
- HandleScope scope;
+ HandleScope scope(isolate_);
+ Debug* debug = isolate_->debug();
// Add the newly compiled script to the script cache.
- Debug::AddScriptToScriptCache(script);
+ debug->AddScriptToScriptCache(script);
// No more to do if not debugging.
if (!IsDebuggerActive()) return;
@@ -2181,7 +2217,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
if (compiling_natives()) return;
// Store whether in debugger before entering debugger.
- bool in_debugger = Debug::InDebugger();
+ bool in_debugger = debug->InDebugger();
// Enter the debugger.
EnterDebugger debugger;
@@ -2192,9 +2228,9 @@ void Debugger::OnAfterCompile(Handle<Script> script,
// Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
Handle<String> update_script_break_points_symbol =
- Factory::LookupAsciiSymbol("UpdateScriptBreakPoints");
+ isolate_->factory()->LookupAsciiSymbol("UpdateScriptBreakPoints");
Handle<Object> update_script_break_points =
- Handle<Object>(Debug::debug_context()->global()->
+ Handle<Object>(debug->debug_context()->global()->
GetPropertyNoExceptionThrown(*update_script_break_points_symbol));
if (!update_script_break_points->IsJSFunction()) {
return;
@@ -2211,7 +2247,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
Handle<Object> result = Execution::TryCall(
Handle<JSFunction>::cast(update_script_break_points),
- Top::builtins(), argc, argv,
+ Isolate::Current()->js_builtins_object(), argc, argv,
&caught_exception);
if (caught_exception) {
return;
@@ -2236,7 +2272,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
void Debugger::OnScriptCollected(int id) {
- HandleScope scope;
+ HandleScope scope(isolate_);
// No more to do if not debugging.
if (!IsDebuggerActive()) return;
@@ -2265,11 +2301,11 @@ void Debugger::OnScriptCollected(int id) {
void Debugger::ProcessDebugEvent(v8::DebugEvent event,
Handle<JSObject> event_data,
bool auto_continue) {
- HandleScope scope;
+ HandleScope scope(isolate_);
// Clear any pending debug break if this is a real break.
if (!auto_continue) {
- Debug::clear_interrupt_pending(DEBUGBREAK);
+ isolate_->debug()->clear_interrupt_pending(DEBUGBREAK);
}
// Create the execution state.
@@ -2311,7 +2347,7 @@ void Debugger::CallEventCallback(v8::DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
v8::Debug::ClientData* client_data) {
- if (event_listener_->IsProxy()) {
+ if (event_listener_->IsForeign()) {
CallCEventCallback(event, exec_state, event_data, client_data);
} else {
CallJSEventCallback(event, exec_state, event_data);
@@ -2323,9 +2359,9 @@ void Debugger::CallCEventCallback(v8::DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
v8::Debug::ClientData* client_data) {
- Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
+ Handle<Foreign> callback_obj(Handle<Foreign>::cast(event_listener_));
v8::Debug::EventCallback2 callback =
- FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->proxy());
+ FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->address());
EventDetailsImpl event_details(
event,
Handle<JSObject>::cast(exec_state),
@@ -2349,25 +2385,27 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<Object>::cast(event_data).location(),
event_listener_data_.location() };
bool caught_exception = false;
- Execution::TryCall(fun, Top::global(), argc, argv, &caught_exception);
+ Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception);
// Silently ignore exceptions from debug event listeners.
}
Handle<Context> Debugger::GetDebugContext() {
- never_unload_debugger_ = true;
- EnterDebugger debugger;
- return Debug::debug_context();
+ never_unload_debugger_ = true;
+ EnterDebugger debugger;
+ return isolate_->debug()->debug_context();
}
void Debugger::UnloadDebugger() {
+ Debug* debug = isolate_->debug();
+
// Make sure that there are no breakpoints left.
- Debug::ClearAllBreakPoints();
+ debug->ClearAllBreakPoints();
// Unload the debugger if feasible.
if (!never_unload_debugger_) {
- Debug::Unload();
+ debug->Unload();
}
// Clear the flag indicating that the debugger should be unloaded.
@@ -2379,9 +2417,9 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
bool auto_continue) {
- HandleScope scope;
+ HandleScope scope(isolate_);
- if (!Debug::Load()) return;
+ if (!isolate_->debug()->Load()) return;
// Process the individual events.
bool sendEventMessage = false;
@@ -2410,8 +2448,8 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
// The debug command interrupt flag might have been set when the command was
// added. It should be enough to clear the flag only once while we are in the
// debugger.
- ASSERT(Debug::InDebugger());
- StackGuard::Continue(DEBUGCOMMAND);
+ ASSERT(isolate_->debug()->InDebugger());
+ isolate_->stack_guard()->Continue(DEBUGCOMMAND);
// Notify the debugger that a debug event has occurred unless auto continue is
// active in which case no event is send.
@@ -2474,7 +2512,8 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
// Get the command from the queue.
CommandMessage command = command_queue_.Get();
- Logger::DebugTag("Got request from command queue, in interactive loop.");
+ isolate_->logger()->DebugTag(
+ "Got request from command queue, in interactive loop.");
if (!Debugger::IsDebuggerActive()) {
// Delete command text and user data.
command.Dispose();
@@ -2548,17 +2587,18 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
void Debugger::SetEventListener(Handle<Object> callback,
Handle<Object> data) {
- HandleScope scope;
+ HandleScope scope(isolate_);
+ GlobalHandles* global_handles = isolate_->global_handles();
// Clear the global handles for the event listener and the event listener data
// object.
if (!event_listener_.is_null()) {
- GlobalHandles::Destroy(
+ global_handles->Destroy(
reinterpret_cast<Object**>(event_listener_.location()));
event_listener_ = Handle<Object>();
}
if (!event_listener_data_.is_null()) {
- GlobalHandles::Destroy(
+ global_handles->Destroy(
reinterpret_cast<Object**>(event_listener_data_.location()));
event_listener_data_ = Handle<Object>();
}
@@ -2566,11 +2606,13 @@ void Debugger::SetEventListener(Handle<Object> callback,
// If there is a new debug event listener register it together with its data
// object.
if (!callback->IsUndefined() && !callback->IsNull()) {
- event_listener_ = Handle<Object>::cast(GlobalHandles::Create(*callback));
+ event_listener_ = Handle<Object>::cast(
+ global_handles->Create(*callback));
if (data.is_null()) {
- data = Factory::undefined_value();
+ data = isolate_->factory()->undefined_value();
}
- event_listener_data_ = Handle<Object>::cast(GlobalHandles::Create(*data));
+ event_listener_data_ = Handle<Object>::cast(
+ global_handles->Create(*data));
}
ListenersChanged();
@@ -2585,7 +2627,7 @@ void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
if (handler == NULL) {
// Send an empty command to the debugger if in a break to make JavaScript
// run again if the debugger is closed.
- if (Debug::InDebugger()) {
+ if (isolate_->debug()->InDebugger()) {
ProcessCommand(Vector<const uint16_t>::empty());
}
}
@@ -2595,10 +2637,10 @@ void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
void Debugger::ListenersChanged() {
if (IsDebuggerActive()) {
// Disable the compilation cache when the debugger is active.
- CompilationCache::Disable();
+ isolate_->compilation_cache()->Disable();
debugger_unload_pending_ = false;
} else {
- CompilationCache::Enable();
+ isolate_->compilation_cache()->Enable();
// Unload the debugger if event listener and message handler cleared.
// Schedule this for later, because we may be in non-V8 thread.
debugger_unload_pending_ = true;
@@ -2619,7 +2661,7 @@ void Debugger::SetDebugMessageDispatchHandler(
debug_message_dispatch_handler_ = handler;
if (provide_locker && message_dispatch_helper_thread_ == NULL) {
- message_dispatch_helper_thread_ = new MessageDispatchHelperThread;
+ message_dispatch_helper_thread_ = new MessageDispatchHelperThread(isolate_);
message_dispatch_helper_thread_->Start();
}
}
@@ -2647,13 +2689,13 @@ void Debugger::ProcessCommand(Vector<const uint16_t> command,
Vector<uint16_t>(const_cast<uint16_t*>(command.start()),
command.length()),
client_data);
- Logger::DebugTag("Put command on command_queue.");
+ isolate_->logger()->DebugTag("Put command on command_queue.");
command_queue_.Put(message);
command_received_->Signal();
// Set the debug command break flag to have the command processed.
- if (!Debug::InDebugger()) {
- StackGuard::DebugCommand();
+ if (!isolate_->debug()->InDebugger()) {
+ isolate_->stack_guard()->DebugCommand();
}
MessageDispatchHelperThread* dispatch_thread;
@@ -2680,8 +2722,8 @@ void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
event_command_queue_.Put(message);
// Set the debug command break flag to have the command processed.
- if (!Debug::InDebugger()) {
- StackGuard::DebugCommand();
+ if (!isolate_->debug()->InDebugger()) {
+ isolate_->stack_guard()->DebugCommand();
}
}
@@ -2702,21 +2744,21 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
// Enter the debugger.
EnterDebugger debugger;
if (debugger.FailedToEnter()) {
- return Factory::undefined_value();
+ return isolate_->factory()->undefined_value();
}
// Create the execution state.
bool caught_exception = false;
Handle<Object> exec_state = MakeExecutionState(&caught_exception);
if (caught_exception) {
- return Factory::undefined_value();
+ return isolate_->factory()->undefined_value();
}
static const int kArgc = 2;
Object** argv[kArgc] = { exec_state.location(), data.location() };
Handle<Object> result = Execution::Call(
fun,
- Handle<Object>(Debug::debug_context_->global_proxy()),
+ Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
kArgc,
argv,
pending_exception);
@@ -2731,6 +2773,7 @@ static void StubMessageHandler2(const v8::Debug::Message& message) {
bool Debugger::StartAgent(const char* name, int port,
bool wait_for_connection) {
+ ASSERT(Isolate::Current() == isolate_);
if (wait_for_connection) {
// Suspend V8 if it is already running or set V8 to suspend whenever
// it starts.
@@ -2755,6 +2798,7 @@ bool Debugger::StartAgent(const char* name, int port,
void Debugger::StopAgent() {
+ ASSERT(Isolate::Current() == isolate_);
if (agent_ != NULL) {
agent_->Shutdown();
agent_->Join();
@@ -2765,6 +2809,7 @@ void Debugger::StopAgent() {
void Debugger::WaitForAgent() {
+ ASSERT(Isolate::Current() == isolate_);
if (agent_ != NULL)
agent_->WaitUntilListening();
}
@@ -2874,10 +2919,11 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const {
v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
- v8::Handle<v8::Context> context = GetDebugEventContext();
- // Top::context() may be NULL when "script collected" event occures.
+ Isolate* isolate = Isolate::Current();
+ v8::Handle<v8::Context> context = GetDebugEventContext(isolate);
+ // Isolate::context() may be NULL when "script collected" event occures.
ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
- return GetDebugEventContext();
+ return GetDebugEventContext(isolate);
}
@@ -2914,7 +2960,7 @@ v8::Handle<v8::Object> EventDetailsImpl::GetEventData() const {
v8::Handle<v8::Context> EventDetailsImpl::GetEventContext() const {
- return GetDebugEventContext();
+ return GetDebugEventContext(Isolate::Current());
}
@@ -3003,8 +3049,8 @@ void CommandMessageQueue::Expand() {
}
-LockingCommandMessageQueue::LockingCommandMessageQueue(int size)
- : queue_(size) {
+LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
+ : logger_(logger), queue_(size) {
lock_ = OS::CreateMutex();
}
@@ -3023,7 +3069,7 @@ bool LockingCommandMessageQueue::IsEmpty() const {
CommandMessage LockingCommandMessageQueue::Get() {
ScopedLock sl(lock_);
CommandMessage result = queue_.Get();
- Logger::DebugEvent("Get", result.text());
+ logger_->DebugEvent("Get", result.text());
return result;
}
@@ -3031,7 +3077,7 @@ CommandMessage LockingCommandMessageQueue::Get() {
void LockingCommandMessageQueue::Put(const CommandMessage& message) {
ScopedLock sl(lock_);
queue_.Put(message);
- Logger::DebugEvent("Put", message.text());
+ logger_->DebugEvent("Put", message.text());
}
@@ -3041,7 +3087,7 @@ void LockingCommandMessageQueue::Clear() {
}
-MessageDispatchHelperThread::MessageDispatchHelperThread()
+MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
: Thread("v8:MsgDispHelpr"),
sem_(OS::CreateSemaphore(0)), mutex_(OS::CreateMutex()),
already_signalled_(false) {
@@ -3075,7 +3121,7 @@ void MessageDispatchHelperThread::Run() {
}
{
Locker locker;
- Debugger::CallMessageDispatchHandler();
+ Isolate::Current()->debugger()->CallMessageDispatchHandler();
}
}
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 85c4d534f..c4d3c7e37 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -28,6 +28,8 @@
#ifndef V8_DEBUG_H_
#define V8_DEBUG_H_
+#include "allocation.h"
+#include "arguments.h"
#include "assembler.h"
#include "debug-agent.h"
#include "execution.h"
@@ -210,7 +212,6 @@ class DebugInfoListNode {
DebugInfoListNode* next_;
};
-
// This class contains the debugger support. The main purpose is to handle
// setting break points in the code.
//
@@ -220,33 +221,33 @@ class DebugInfoListNode {
// DebugInfo.
class Debug {
public:
- static void Setup(bool create_heap_objects);
- static bool Load();
- static void Unload();
- static bool IsLoaded() { return !debug_context_.is_null(); }
- static bool InDebugger() { return thread_local_.debugger_entry_ != NULL; }
- static void PreemptionWhileInDebugger();
- static void Iterate(ObjectVisitor* v);
-
- static Object* Break(Arguments args);
- static void SetBreakPoint(Handle<SharedFunctionInfo> shared,
- Handle<Object> break_point_object,
- int* source_position);
- static void ClearBreakPoint(Handle<Object> break_point_object);
- static void ClearAllBreakPoints();
- static void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
- static void FloodHandlerWithOneShot();
- static void ChangeBreakOnException(ExceptionBreakType type, bool enable);
- static bool IsBreakOnException(ExceptionBreakType type);
- static void PrepareStep(StepAction step_action, int step_count);
- static void ClearStepping();
- static bool StepNextContinue(BreakLocationIterator* break_location_iterator,
- JavaScriptFrame* frame);
+ void Setup(bool create_heap_objects);
+ bool Load();
+ void Unload();
+ bool IsLoaded() { return !debug_context_.is_null(); }
+ bool InDebugger() { return thread_local_.debugger_entry_ != NULL; }
+ void PreemptionWhileInDebugger();
+ void Iterate(ObjectVisitor* v);
+
+ Object* Break(Arguments args);
+ void SetBreakPoint(Handle<SharedFunctionInfo> shared,
+ Handle<Object> break_point_object,
+ int* source_position);
+ void ClearBreakPoint(Handle<Object> break_point_object);
+ void ClearAllBreakPoints();
+ void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
+ void FloodHandlerWithOneShot();
+ void ChangeBreakOnException(ExceptionBreakType type, bool enable);
+ bool IsBreakOnException(ExceptionBreakType type);
+ void PrepareStep(StepAction step_action, int step_count);
+ void ClearStepping();
+ bool StepNextContinue(BreakLocationIterator* break_location_iterator,
+ JavaScriptFrame* frame);
static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
static bool HasDebugInfo(Handle<SharedFunctionInfo> shared);
// Returns whether the operation succeeded.
- static bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
+ bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
// Returns true if the current stub call is patched to call the debugger.
static bool IsDebugBreak(Address addr);
@@ -266,66 +267,66 @@ class Debug {
Handle<SharedFunctionInfo> shared);
// Getter for the debug_context.
- inline static Handle<Context> debug_context() { return debug_context_; }
+ inline Handle<Context> debug_context() { return debug_context_; }
// Check whether a global object is the debug global object.
- static bool IsDebugGlobal(GlobalObject* global);
+ bool IsDebugGlobal(GlobalObject* global);
// Check whether this frame is just about to return.
- static bool IsBreakAtReturn(JavaScriptFrame* frame);
+ bool IsBreakAtReturn(JavaScriptFrame* frame);
// Fast check to see if any break points are active.
- inline static bool has_break_points() { return has_break_points_; }
+ inline bool has_break_points() { return has_break_points_; }
- static void NewBreak(StackFrame::Id break_frame_id);
- static void SetBreak(StackFrame::Id break_frame_id, int break_id);
- static StackFrame::Id break_frame_id() {
+ void NewBreak(StackFrame::Id break_frame_id);
+ void SetBreak(StackFrame::Id break_frame_id, int break_id);
+ StackFrame::Id break_frame_id() {
return thread_local_.break_frame_id_;
}
- static int break_id() { return thread_local_.break_id_; }
+ int break_id() { return thread_local_.break_id_; }
- static bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
- static void HandleStepIn(Handle<JSFunction> function,
- Handle<Object> holder,
- Address fp,
- bool is_constructor);
- static Address step_in_fp() { return thread_local_.step_into_fp_; }
- static Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; }
+ bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
+ void HandleStepIn(Handle<JSFunction> function,
+ Handle<Object> holder,
+ Address fp,
+ bool is_constructor);
+ Address step_in_fp() { return thread_local_.step_into_fp_; }
+ Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; }
- static bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
- static Address step_out_fp() { return thread_local_.step_out_fp_; }
+ bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
+ Address step_out_fp() { return thread_local_.step_out_fp_; }
- static EnterDebugger* debugger_entry() {
+ EnterDebugger* debugger_entry() {
return thread_local_.debugger_entry_;
}
- static void set_debugger_entry(EnterDebugger* entry) {
+ void set_debugger_entry(EnterDebugger* entry) {
thread_local_.debugger_entry_ = entry;
}
// Check whether any of the specified interrupts are pending.
- static bool is_interrupt_pending(InterruptFlag what) {
+ bool is_interrupt_pending(InterruptFlag what) {
return (thread_local_.pending_interrupts_ & what) != 0;
}
// Set specified interrupts as pending.
- static void set_interrupts_pending(InterruptFlag what) {
+ void set_interrupts_pending(InterruptFlag what) {
thread_local_.pending_interrupts_ |= what;
}
// Clear specified interrupts from pending.
- static void clear_interrupt_pending(InterruptFlag what) {
+ void clear_interrupt_pending(InterruptFlag what) {
thread_local_.pending_interrupts_ &= ~static_cast<int>(what);
}
// Getter and setter for the disable break state.
- static bool disable_break() { return disable_break_; }
- static void set_disable_break(bool disable_break) {
+ bool disable_break() { return disable_break_; }
+ void set_disable_break(bool disable_break) {
disable_break_ = disable_break;
}
// Getters for the current exception break state.
- static bool break_on_exception() { return break_on_exception_; }
- static bool break_on_uncaught_exception() {
+ bool break_on_exception() { return break_on_exception_; }
+ bool break_on_uncaught_exception() {
return break_on_uncaught_exception_;
}
@@ -337,34 +338,35 @@ class Debug {
};
// Support for setting the address to jump to when returning from break point.
- static Address* after_break_target_address() {
+ Address* after_break_target_address() {
return reinterpret_cast<Address*>(&thread_local_.after_break_target_);
}
- static Address* restarter_frame_function_pointer_address() {
+ Address* restarter_frame_function_pointer_address() {
Object*** address = &thread_local_.restarter_frame_function_pointer_;
return reinterpret_cast<Address*>(address);
}
// Support for saving/restoring registers when handling debug break calls.
- static Object** register_address(int r) {
+ Object** register_address(int r) {
return &registers_[r];
}
// Access to the debug break on return code.
- static Code* debug_break_return() { return debug_break_return_; }
- static Code** debug_break_return_address() {
+ Code* debug_break_return() { return debug_break_return_; }
+ Code** debug_break_return_address() {
return &debug_break_return_;
}
// Access to the debug break in debug break slot code.
- static Code* debug_break_slot() { return debug_break_slot_; }
- static Code** debug_break_slot_address() {
+ Code* debug_break_slot() { return debug_break_slot_; }
+ Code** debug_break_slot_address() {
return &debug_break_slot_;
}
static const int kEstimatedNofDebugInfoEntries = 16;
static const int kEstimatedNofBreakPointsInFunction = 16;
+ // Passed to MakeWeak.
static void HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data);
friend class Debugger;
@@ -372,22 +374,22 @@ class Debug {
friend void CheckDebuggerUnloaded(bool check_functions); // In test-debug.cc
// Threading support.
- static char* ArchiveDebug(char* to);
- static char* RestoreDebug(char* from);
+ char* ArchiveDebug(char* to);
+ char* RestoreDebug(char* from);
static int ArchiveSpacePerThread();
- static void FreeThreadResources() { }
+ void FreeThreadResources() { }
// Mirror cache handling.
- static void ClearMirrorCache();
+ void ClearMirrorCache();
// Script cache handling.
- static void CreateScriptCache();
- static void DestroyScriptCache();
- static void AddScriptToScriptCache(Handle<Script> script);
- static Handle<FixedArray> GetLoadedScripts();
+ void CreateScriptCache();
+ void DestroyScriptCache();
+ void AddScriptToScriptCache(Handle<Script> script);
+ Handle<FixedArray> GetLoadedScripts();
// Garbage collection notifications.
- static void AfterGarbageCollection();
+ void AfterGarbageCollection();
// Code generator routines.
static void GenerateSlot(MacroAssembler* masm);
@@ -421,10 +423,11 @@ class Debug {
FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
// The top JS frame had been calling some C++ function. The return address
// gets patched automatically.
- FRAME_DROPPED_IN_DIRECT_CALL
+ FRAME_DROPPED_IN_DIRECT_CALL,
+ FRAME_DROPPED_IN_RETURN_CALL
};
- static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
+ void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
FrameDropMode mode,
Object** restarter_frame_function_pointer);
@@ -445,35 +448,38 @@ class Debug {
static const bool kFrameDropperSupported;
private:
+ explicit Debug(Isolate* isolate);
+ ~Debug();
+
static bool CompileDebuggerScript(int index);
- static void ClearOneShot();
- static void ActivateStepIn(StackFrame* frame);
- static void ClearStepIn();
- static void ActivateStepOut(StackFrame* frame);
- static void ClearStepOut();
- static void ClearStepNext();
+ void ClearOneShot();
+ void ActivateStepIn(StackFrame* frame);
+ void ClearStepIn();
+ void ActivateStepOut(StackFrame* frame);
+ void ClearStepOut();
+ void ClearStepNext();
// Returns whether the compile succeeded.
- static void RemoveDebugInfo(Handle<DebugInfo> debug_info);
- static void SetAfterBreakTarget(JavaScriptFrame* frame);
- static Handle<Object> CheckBreakPoints(Handle<Object> break_point);
- static bool CheckBreakPoint(Handle<Object> break_point_object);
+ void RemoveDebugInfo(Handle<DebugInfo> debug_info);
+ void SetAfterBreakTarget(JavaScriptFrame* frame);
+ Handle<Object> CheckBreakPoints(Handle<Object> break_point);
+ bool CheckBreakPoint(Handle<Object> break_point_object);
// Global handle to debug context where all the debugger JavaScript code is
// loaded.
- static Handle<Context> debug_context_;
+ Handle<Context> debug_context_;
// Boolean state indicating whether any break points are set.
- static bool has_break_points_;
+ bool has_break_points_;
// Cache of all scripts in the heap.
- static ScriptCache* script_cache_;
+ ScriptCache* script_cache_;
// List of active debug info objects.
- static DebugInfoListNode* debug_info_list_;
+ DebugInfoListNode* debug_info_list_;
- static bool disable_break_;
- static bool break_on_exception_;
- static bool break_on_uncaught_exception_;
+ bool disable_break_;
+ bool break_on_exception_;
+ bool break_on_uncaught_exception_;
// Per-thread data.
class ThreadLocal {
@@ -526,20 +532,27 @@ class Debug {
};
// Storage location for registers when handling debug break calls
- static JSCallerSavedBuffer registers_;
- static ThreadLocal thread_local_;
- static void ThreadInit();
+ JSCallerSavedBuffer registers_;
+ ThreadLocal thread_local_;
+ void ThreadInit();
// Code to call for handling debug break on return.
- static Code* debug_break_return_;
+ Code* debug_break_return_;
// Code to call for handling debug break in debug break slots.
- static Code* debug_break_slot_;
+ Code* debug_break_slot_;
+
+ Isolate* isolate_;
+
+ friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(Debug);
};
+DECLARE_RUNTIME_FUNCTION(Object*, Debug_Break);
+
+
// Message delivered to the message handler callback. This is either a debugger
// event or the response to a command.
class MessageImpl: public v8::Debug::Message {
@@ -665,13 +678,14 @@ class MessageDispatchHelperThread;
// Mutex to CommandMessageQueue. Includes logging of all puts and gets.
class LockingCommandMessageQueue BASE_EMBEDDED {
public:
- explicit LockingCommandMessageQueue(int size);
+ LockingCommandMessageQueue(Logger* logger, int size);
~LockingCommandMessageQueue();
bool IsEmpty() const;
CommandMessage Get();
void Put(const CommandMessage& message);
void Clear();
private:
+ Logger* logger_;
CommandMessageQueue queue_;
Mutex* lock_;
DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
@@ -680,95 +694,97 @@ class LockingCommandMessageQueue BASE_EMBEDDED {
class Debugger {
public:
- static void DebugRequest(const uint16_t* json_request, int length);
-
- static Handle<Object> MakeJSObject(Vector<const char> constructor_name,
- int argc, Object*** argv,
- bool* caught_exception);
- static Handle<Object> MakeExecutionState(bool* caught_exception);
- static Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
- Handle<Object> break_points_hit,
- bool* caught_exception);
- static Handle<Object> MakeExceptionEvent(Handle<Object> exec_state,
- Handle<Object> exception,
- bool uncaught,
- bool* caught_exception);
- static Handle<Object> MakeNewFunctionEvent(Handle<Object> func,
- bool* caught_exception);
- static Handle<Object> MakeCompileEvent(Handle<Script> script,
- bool before,
- bool* caught_exception);
- static Handle<Object> MakeScriptCollectedEvent(int id,
- bool* caught_exception);
- static void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
- static void OnException(Handle<Object> exception, bool uncaught);
- static void OnBeforeCompile(Handle<Script> script);
+ ~Debugger();
+
+ void DebugRequest(const uint16_t* json_request, int length);
+
+ Handle<Object> MakeJSObject(Vector<const char> constructor_name,
+ int argc, Object*** argv,
+ bool* caught_exception);
+ Handle<Object> MakeExecutionState(bool* caught_exception);
+ Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
+ Handle<Object> break_points_hit,
+ bool* caught_exception);
+ Handle<Object> MakeExceptionEvent(Handle<Object> exec_state,
+ Handle<Object> exception,
+ bool uncaught,
+ bool* caught_exception);
+ Handle<Object> MakeNewFunctionEvent(Handle<Object> func,
+ bool* caught_exception);
+ Handle<Object> MakeCompileEvent(Handle<Script> script,
+ bool before,
+ bool* caught_exception);
+ Handle<Object> MakeScriptCollectedEvent(int id,
+ bool* caught_exception);
+ void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
+ void OnException(Handle<Object> exception, bool uncaught);
+ void OnBeforeCompile(Handle<Script> script);
enum AfterCompileFlags {
NO_AFTER_COMPILE_FLAGS,
SEND_WHEN_DEBUGGING
};
- static void OnAfterCompile(Handle<Script> script,
- AfterCompileFlags after_compile_flags);
- static void OnNewFunction(Handle<JSFunction> fun);
- static void OnScriptCollected(int id);
- static void ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
- bool auto_continue);
- static void NotifyMessageHandler(v8::DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- bool auto_continue);
- static void SetEventListener(Handle<Object> callback, Handle<Object> data);
- static void SetMessageHandler(v8::Debug::MessageHandler2 handler);
- static void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- int period);
- static void SetDebugMessageDispatchHandler(
+ void OnAfterCompile(Handle<Script> script,
+ AfterCompileFlags after_compile_flags);
+ void OnNewFunction(Handle<JSFunction> fun);
+ void OnScriptCollected(int id);
+ void ProcessDebugEvent(v8::DebugEvent event,
+ Handle<JSObject> event_data,
+ bool auto_continue);
+ void NotifyMessageHandler(v8::DebugEvent event,
+ Handle<JSObject> exec_state,
+ Handle<JSObject> event_data,
+ bool auto_continue);
+ void SetEventListener(Handle<Object> callback, Handle<Object> data);
+ void SetMessageHandler(v8::Debug::MessageHandler2 handler);
+ void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
+ int period);
+ void SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler,
bool provide_locker);
// Invoke the message handler function.
- static void InvokeMessageHandler(MessageImpl message);
+ void InvokeMessageHandler(MessageImpl message);
// Add a debugger command to the command queue.
- static void ProcessCommand(Vector<const uint16_t> command,
- v8::Debug::ClientData* client_data = NULL);
+ void ProcessCommand(Vector<const uint16_t> command,
+ v8::Debug::ClientData* client_data = NULL);
// Check whether there are commands in the command queue.
- static bool HasCommands();
+ bool HasCommands();
// Enqueue a debugger command to the command queue for event listeners.
- static void EnqueueDebugCommand(v8::Debug::ClientData* client_data = NULL);
+ void EnqueueDebugCommand(v8::Debug::ClientData* client_data = NULL);
- static Handle<Object> Call(Handle<JSFunction> fun,
- Handle<Object> data,
- bool* pending_exception);
+ Handle<Object> Call(Handle<JSFunction> fun,
+ Handle<Object> data,
+ bool* pending_exception);
// Start the debugger agent listening on the provided port.
- static bool StartAgent(const char* name, int port,
- bool wait_for_connection = false);
+ bool StartAgent(const char* name, int port,
+ bool wait_for_connection = false);
// Stop the debugger agent.
- static void StopAgent();
+ void StopAgent();
// Blocks until the agent has started listening for connections
- static void WaitForAgent();
+ void WaitForAgent();
- static void CallMessageDispatchHandler();
+ void CallMessageDispatchHandler();
- static Handle<Context> GetDebugContext();
+ Handle<Context> GetDebugContext();
// Unload the debugger if possible. Only called when no debugger is currently
// active.
- static void UnloadDebugger();
+ void UnloadDebugger();
friend void ForceUnloadDebugger(); // In test-debug.cc
- inline static bool EventActive(v8::DebugEvent event) {
+ inline bool EventActive(v8::DebugEvent event) {
ScopedLock with(debugger_access_);
// Check whether the message handler was been cleared.
if (debugger_unload_pending_) {
- if (Debug::debugger_entry() == NULL) {
+ if (isolate_->debug()->debugger_entry() == NULL) {
UnloadDebugger();
}
}
@@ -786,52 +802,58 @@ class Debugger {
return !compiling_natives_ && Debugger::IsDebuggerActive();
}
- static void set_compiling_natives(bool compiling_natives) {
+ void set_compiling_natives(bool compiling_natives) {
Debugger::compiling_natives_ = compiling_natives;
}
- static bool compiling_natives() { return Debugger::compiling_natives_; }
- static void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
- static bool is_loading_debugger() { return Debugger::is_loading_debugger_; }
+ bool compiling_natives() const { return compiling_natives_; }
+ void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
+ bool is_loading_debugger() const { return is_loading_debugger_; }
- static bool IsDebuggerActive();
+ bool IsDebuggerActive();
private:
- static void CallEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data);
- static void CallCEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- v8::Debug::ClientData* client_data);
- static void CallJSEventCallback(v8::DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data);
- static void ListenersChanged();
-
- static Mutex* debugger_access_; // Mutex guarding debugger variables.
- static Handle<Object> event_listener_; // Global handle to listener.
- static Handle<Object> event_listener_data_;
- static bool compiling_natives_; // Are we compiling natives?
- static bool is_loading_debugger_; // Are we loading the debugger?
- static bool never_unload_debugger_; // Can we unload the debugger?
- static v8::Debug::MessageHandler2 message_handler_;
- static bool debugger_unload_pending_; // Was message handler cleared?
- static v8::Debug::HostDispatchHandler host_dispatch_handler_;
- static Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
- static v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
- static MessageDispatchHelperThread* message_dispatch_helper_thread_;
- static int host_dispatch_micros_;
-
- static DebuggerAgent* agent_;
+ explicit Debugger(Isolate* isolate);
+
+ void CallEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data);
+ void CallCEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data);
+ void CallJSEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data);
+ void ListenersChanged();
+
+ Mutex* debugger_access_; // Mutex guarding debugger variables.
+ Handle<Object> event_listener_; // Global handle to listener.
+ Handle<Object> event_listener_data_;
+ bool compiling_natives_; // Are we compiling natives?
+ bool is_loading_debugger_; // Are we loading the debugger?
+ bool never_unload_debugger_; // Can we unload the debugger?
+ v8::Debug::MessageHandler2 message_handler_;
+ bool debugger_unload_pending_; // Was message handler cleared?
+ v8::Debug::HostDispatchHandler host_dispatch_handler_;
+ Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
+ v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
+ MessageDispatchHelperThread* message_dispatch_helper_thread_;
+ int host_dispatch_micros_;
+
+ DebuggerAgent* agent_;
static const int kQueueInitialSize = 4;
- static LockingCommandMessageQueue command_queue_;
- static Semaphore* command_received_; // Signaled for each command received.
+ LockingCommandMessageQueue command_queue_;
+ Semaphore* command_received_; // Signaled for each command received.
+ LockingCommandMessageQueue event_command_queue_;
- static LockingCommandMessageQueue event_command_queue_;
+ Isolate* isolate_;
friend class EnterDebugger;
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(Debugger);
};
@@ -842,38 +864,45 @@ class Debugger {
class EnterDebugger BASE_EMBEDDED {
public:
EnterDebugger()
- : prev_(Debug::debugger_entry()),
- has_js_frames_(!it_.done()) {
- ASSERT(prev_ != NULL || !Debug::is_interrupt_pending(PREEMPT));
- ASSERT(prev_ != NULL || !Debug::is_interrupt_pending(DEBUGBREAK));
+ : isolate_(Isolate::Current()),
+ prev_(isolate_->debug()->debugger_entry()),
+ it_(isolate_),
+ has_js_frames_(!it_.done()),
+ save_(isolate_) {
+ Debug* debug = isolate_->debug();
+ ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
+ ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
// Link recursive debugger entry.
- Debug::set_debugger_entry(this);
+ debug->set_debugger_entry(this);
// Store the previous break id and frame id.
- break_id_ = Debug::break_id();
- break_frame_id_ = Debug::break_frame_id();
+ break_id_ = debug->break_id();
+ break_frame_id_ = debug->break_frame_id();
// Create the new break info. If there is no JavaScript frames there is no
// break frame id.
if (has_js_frames_) {
- Debug::NewBreak(it_.frame()->id());
+ debug->NewBreak(it_.frame()->id());
} else {
- Debug::NewBreak(StackFrame::NO_ID);
+ debug->NewBreak(StackFrame::NO_ID);
}
// Make sure that debugger is loaded and enter the debugger context.
- load_failed_ = !Debug::Load();
+ load_failed_ = !debug->Load();
if (!load_failed_) {
// NOTE the member variable save which saves the previous context before
// this change.
- Top::set_context(*Debug::debug_context());
+ isolate_->set_context(*debug->debug_context());
}
}
~EnterDebugger() {
+ ASSERT(Isolate::Current() == isolate_);
+ Debug* debug = isolate_->debug();
+
// Restore to the previous break state.
- Debug::SetBreak(break_frame_id_, break_id_);
+ debug->SetBreak(break_frame_id_, break_id_);
// Check for leaving the debugger.
if (prev_ == NULL) {
@@ -881,43 +910,43 @@ class EnterDebugger BASE_EMBEDDED {
// pending exception as clearing the mirror cache calls back into
// JavaScript. This can happen if the v8::Debug::Call is used in which
// case the exception should end up in the calling code.
- if (!Top::has_pending_exception()) {
+ if (!isolate_->has_pending_exception()) {
// Try to avoid any pending debug break breaking in the clear mirror
// cache JavaScript code.
- if (StackGuard::IsDebugBreak()) {
- Debug::set_interrupts_pending(DEBUGBREAK);
- StackGuard::Continue(DEBUGBREAK);
+ if (isolate_->stack_guard()->IsDebugBreak()) {
+ debug->set_interrupts_pending(DEBUGBREAK);
+ isolate_->stack_guard()->Continue(DEBUGBREAK);
}
- Debug::ClearMirrorCache();
+ debug->ClearMirrorCache();
}
// Request preemption and debug break when leaving the last debugger entry
// if any of these where recorded while debugging.
- if (Debug::is_interrupt_pending(PREEMPT)) {
+ if (debug->is_interrupt_pending(PREEMPT)) {
// This re-scheduling of preemption is to avoid starvation in some
// debugging scenarios.
- Debug::clear_interrupt_pending(PREEMPT);
- StackGuard::Preempt();
+ debug->clear_interrupt_pending(PREEMPT);
+ isolate_->stack_guard()->Preempt();
}
- if (Debug::is_interrupt_pending(DEBUGBREAK)) {
- Debug::clear_interrupt_pending(DEBUGBREAK);
- StackGuard::DebugBreak();
+ if (debug->is_interrupt_pending(DEBUGBREAK)) {
+ debug->clear_interrupt_pending(DEBUGBREAK);
+ isolate_->stack_guard()->DebugBreak();
}
// If there are commands in the queue when leaving the debugger request
// that these commands are processed.
- if (Debugger::HasCommands()) {
- StackGuard::DebugCommand();
+ if (isolate_->debugger()->HasCommands()) {
+ isolate_->stack_guard()->DebugCommand();
}
// If leaving the debugger with the debugger no longer active unload it.
- if (!Debugger::IsDebuggerActive()) {
- Debugger::UnloadDebugger();
+ if (!isolate_->debugger()->IsDebuggerActive()) {
+ isolate_->debugger()->UnloadDebugger();
}
}
// Leaving this debugger entry.
- Debug::set_debugger_entry(prev_);
+ debug->set_debugger_entry(prev_);
}
// Check whether the debugger could be entered.
@@ -930,6 +959,7 @@ class EnterDebugger BASE_EMBEDDED {
inline Handle<Context> GetContext() { return save_.context(); }
private:
+ Isolate* isolate_;
EnterDebugger* prev_; // Previous debugger entry if entered recursively.
JavaScriptFrameIterator it_;
const bool has_js_frames_; // Were there any JavaScript frames?
@@ -943,15 +973,17 @@ class EnterDebugger BASE_EMBEDDED {
// Stack allocated class for disabling break.
class DisableBreak BASE_EMBEDDED {
public:
- explicit DisableBreak(bool disable_break) {
- prev_disable_break_ = Debug::disable_break();
- Debug::set_disable_break(disable_break);
+ explicit DisableBreak(bool disable_break) : isolate_(Isolate::Current()) {
+ prev_disable_break_ = isolate_->debug()->disable_break();
+ isolate_->debug()->set_disable_break(disable_break);
}
~DisableBreak() {
- Debug::set_disable_break(prev_disable_break_);
+ ASSERT(Isolate::Current() == isolate_);
+ isolate_->debug()->set_disable_break(prev_disable_break_);
}
private:
+ Isolate* isolate_;
// The previous state of the disable break used to restore the value when this
// object is destructed.
bool prev_disable_break_;
@@ -976,17 +1008,18 @@ class Debug_Address {
return Debug_Address(Debug::k_restarter_frame_function_pointer);
}
- Address address() const {
+ Address address(Isolate* isolate) const {
+ Debug* debug = isolate->debug();
switch (id_) {
case Debug::k_after_break_target_address:
- return reinterpret_cast<Address>(Debug::after_break_target_address());
+ return reinterpret_cast<Address>(debug->after_break_target_address());
case Debug::k_debug_break_return_address:
- return reinterpret_cast<Address>(Debug::debug_break_return_address());
+ return reinterpret_cast<Address>(debug->debug_break_return_address());
case Debug::k_debug_break_slot_address:
- return reinterpret_cast<Address>(Debug::debug_break_slot_address());
+ return reinterpret_cast<Address>(debug->debug_break_slot_address());
case Debug::k_restarter_frame_function_pointer:
return reinterpret_cast<Address>(
- Debug::restarter_frame_function_pointer_address());
+ debug->restarter_frame_function_pointer_address());
default:
UNREACHABLE();
return NULL;
@@ -1002,7 +1035,7 @@ class Debug_Address {
// to do this via v8::Debug::HostDispatchHandler
class MessageDispatchHelperThread: public Thread {
public:
- MessageDispatchHelperThread();
+ explicit MessageDispatchHelperThread(Isolate* isolate);
~MessageDispatchHelperThread();
void Schedule();
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 9db812b3e..e2e8a65e2 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -39,30 +39,50 @@
namespace v8 {
namespace internal {
-LargeObjectChunk* Deoptimizer::eager_deoptimization_entry_code_ = NULL;
-LargeObjectChunk* Deoptimizer::lazy_deoptimization_entry_code_ = NULL;
-Deoptimizer* Deoptimizer::current_ = NULL;
-DeoptimizingCodeListNode* Deoptimizer::deoptimizing_code_list_ = NULL;
+DeoptimizerData::DeoptimizerData() {
+ eager_deoptimization_entry_code_ = NULL;
+ lazy_deoptimization_entry_code_ = NULL;
+ current_ = NULL;
+ deoptimizing_code_list_ = NULL;
+}
+
+DeoptimizerData::~DeoptimizerData() {
+ if (eager_deoptimization_entry_code_ != NULL) {
+ eager_deoptimization_entry_code_->Free(EXECUTABLE);
+ eager_deoptimization_entry_code_ = NULL;
+ }
+ if (lazy_deoptimization_entry_code_ != NULL) {
+ lazy_deoptimization_entry_code_->Free(EXECUTABLE);
+ lazy_deoptimization_entry_code_ = NULL;
+ }
+}
Deoptimizer* Deoptimizer::New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
Address from,
- int fp_to_sp_delta) {
- Deoptimizer* deoptimizer =
- new Deoptimizer(function, type, bailout_id, from, fp_to_sp_delta);
- ASSERT(current_ == NULL);
- current_ = deoptimizer;
+ int fp_to_sp_delta,
+ Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ Deoptimizer* deoptimizer = new Deoptimizer(isolate,
+ function,
+ type,
+ bailout_id,
+ from,
+ fp_to_sp_delta);
+ ASSERT(isolate->deoptimizer_data()->current_ == NULL);
+ isolate->deoptimizer_data()->current_ = deoptimizer;
return deoptimizer;
}
-Deoptimizer* Deoptimizer::Grab() {
- Deoptimizer* result = current_;
+Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ Deoptimizer* result = isolate->deoptimizer_data()->current_;
ASSERT(result != NULL);
result->DeleteFrameDescriptions();
- current_ = NULL;
+ isolate->deoptimizer_data()->current_ = NULL;
return result;
}
@@ -155,7 +175,7 @@ void Deoptimizer::VisitAllOptimizedFunctions(
AssertNoAllocation no_allocation;
// Run through the list of all global contexts and deoptimize.
- Object* global = Heap::global_contexts_list();
+ Object* global = Isolate::Current()->heap()->global_contexts_list();
while (!global->IsUndefined()) {
VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(),
visitor);
@@ -170,7 +190,7 @@ void Deoptimizer::HandleWeakDeoptimizedCode(
reinterpret_cast<DeoptimizingCodeListNode*>(data);
RemoveDeoptimizingCode(*node->code());
#ifdef DEBUG
- node = Deoptimizer::deoptimizing_code_list_;
+ node = Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
while (node != NULL) {
ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data));
node = node->next();
@@ -184,12 +204,14 @@ void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
}
-Deoptimizer::Deoptimizer(JSFunction* function,
+Deoptimizer::Deoptimizer(Isolate* isolate,
+ JSFunction* function,
BailoutType type,
unsigned bailout_id,
Address from,
int fp_to_sp_delta)
- : function_(function),
+ : isolate_(isolate),
+ function_(function),
bailout_id_(bailout_id),
bailout_type_(type),
from_(from),
@@ -227,7 +249,7 @@ Deoptimizer::Deoptimizer(JSFunction* function,
ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(!optimized_code_->contains(from));
}
- ASSERT(Heap::allow_allocation(false));
+ ASSERT(HEAP->allow_allocation(false));
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
}
@@ -246,7 +268,7 @@ void Deoptimizer::DeleteFrameDescriptions() {
delete[] output_;
input_ = NULL;
output_ = NULL;
- ASSERT(!Heap::allow_allocation(true));
+ ASSERT(!HEAP->allow_allocation(true));
}
@@ -254,16 +276,17 @@ Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
ASSERT(id >= 0);
if (id >= kNumberOfEntries) return NULL;
LargeObjectChunk* base = NULL;
+ DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
- if (eager_deoptimization_entry_code_ == NULL) {
- eager_deoptimization_entry_code_ = CreateCode(type);
+ if (data->eager_deoptimization_entry_code_ == NULL) {
+ data->eager_deoptimization_entry_code_ = CreateCode(type);
}
- base = eager_deoptimization_entry_code_;
+ base = data->eager_deoptimization_entry_code_;
} else {
- if (lazy_deoptimization_entry_code_ == NULL) {
- lazy_deoptimization_entry_code_ = CreateCode(type);
+ if (data->lazy_deoptimization_entry_code_ == NULL) {
+ data->lazy_deoptimization_entry_code_ = CreateCode(type);
}
- base = lazy_deoptimization_entry_code_;
+ base = data->lazy_deoptimization_entry_code_;
}
return
static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
@@ -272,10 +295,11 @@ Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
LargeObjectChunk* base = NULL;
+ DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
- base = eager_deoptimization_entry_code_;
+ base = data->eager_deoptimization_entry_code_;
} else {
- base = lazy_deoptimization_entry_code_;
+ base = data->lazy_deoptimization_entry_code_;
}
if (base == NULL ||
addr < base->GetStartAddress() ||
@@ -289,23 +313,6 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
}
-void Deoptimizer::Setup() {
- // Do nothing yet.
-}
-
-
-void Deoptimizer::TearDown() {
- if (eager_deoptimization_entry_code_ != NULL) {
- eager_deoptimization_entry_code_->Free(EXECUTABLE);
- eager_deoptimization_entry_code_ = NULL;
- }
- if (lazy_deoptimization_entry_code_ != NULL) {
- lazy_deoptimization_entry_code_->Free(EXECUTABLE);
- lazy_deoptimization_entry_code_ = NULL;
- }
-}
-
-
int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
unsigned id,
SharedFunctionInfo* shared) {
@@ -332,9 +339,10 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
}
-int Deoptimizer::GetDeoptimizedCodeCount() {
+int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
int length = 0;
- DeoptimizingCodeListNode* node = Deoptimizer::deoptimizing_code_list_;
+ DeoptimizingCodeListNode* node =
+ isolate->deoptimizer_data()->deoptimizing_code_list_;
while (node != NULL) {
length++;
node = node->next();
@@ -411,7 +419,7 @@ void Deoptimizer::DoComputeOutputFrames() {
void Deoptimizer::MaterializeHeapNumbers() {
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
- Handle<Object> num = Factory::NewNumber(d.value());
+ Handle<Object> num = isolate_->factory()->NewNumber(d.value());
if (FLAG_trace_deopt) {
PrintF("Materializing a new heap number %p [%e] in slot %p\n",
reinterpret_cast<void*>(*num),
@@ -593,10 +601,11 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
- Heap::arguments_marker()->ShortPrint();
+ isolate_->heap()->arguments_marker()->ShortPrint();
PrintF(" ; arguments object\n");
}
- intptr_t value = reinterpret_cast<intptr_t>(Heap::arguments_marker());
+ intptr_t value = reinterpret_cast<intptr_t>(
+ isolate_->heap()->arguments_marker());
output_[frame_index]->SetFrameSlot(output_offset, value);
return;
}
@@ -883,25 +892,27 @@ LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
ASSERT(!Serializer::enabled());
- bool old_debug_code = FLAG_debug_code;
- FLAG_debug_code = false;
- MacroAssembler masm(NULL, 16 * KB);
+ MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
+ masm.set_emit_debug_code(false);
GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
+ if (chunk == NULL) {
+ V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
+ }
memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
- FLAG_debug_code = old_debug_code;
return chunk;
}
Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) {
- DeoptimizingCodeListNode* node = Deoptimizer::deoptimizing_code_list_;
+ DeoptimizingCodeListNode* node =
+ Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
while (node != NULL) {
if (node->code()->contains(addr)) return *node->code();
node = node->next();
@@ -911,15 +922,16 @@ Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) {
void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
- ASSERT(deoptimizing_code_list_ != NULL);
+ DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
+ ASSERT(data->deoptimizing_code_list_ != NULL);
// Run through the code objects to find this one and remove it.
DeoptimizingCodeListNode* prev = NULL;
- DeoptimizingCodeListNode* current = deoptimizing_code_list_;
+ DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
while (current != NULL) {
if (*current->code() == code) {
// Unlink from list. If prev is NULL we are looking at the first element.
if (prev == NULL) {
- deoptimizing_code_list_ = current->next();
+ data->deoptimizing_code_list_ = current->next();
} else {
prev->set_next(current->next());
}
@@ -1006,7 +1018,8 @@ int32_t TranslationIterator::Next() {
Handle<ByteArray> TranslationBuffer::CreateByteArray() {
int length = contents_.length();
- Handle<ByteArray> result = Factory::NewByteArray(length, TENURED);
+ Handle<ByteArray> result =
+ Isolate::Current()->factory()->NewByteArray(length, TENURED);
memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
return result;
}
@@ -1094,7 +1107,7 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
}
-#ifdef OBJECT_PRINT
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
const char* Translation::StringFor(Opcode opcode) {
switch (opcode) {
@@ -1129,16 +1142,117 @@ const char* Translation::StringFor(Opcode opcode) {
DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
// Globalize the code object and make it weak.
- code_ = Handle<Code>::cast((GlobalHandles::Create(code)));
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(code_.location()),
- this,
- Deoptimizer::HandleWeakDeoptimizedCode);
+ code_ = Handle<Code>::cast(global_handles->Create(code));
+ global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
+ this,
+ Deoptimizer::HandleWeakDeoptimizedCode);
}
DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
- GlobalHandles::Destroy(reinterpret_cast<Object**>(code_.location()));
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ global_handles->Destroy(reinterpret_cast<Object**>(code_.location()));
+}
+
+
+// We can't intermix stack decoding and allocations because
+// deoptimization infrastracture is not GC safe.
+// Thus we build a temporary structure in malloced space.
+SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
+ DeoptimizationInputData* data,
+ JavaScriptFrame* frame) {
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+
+ switch (opcode) {
+ case Translation::BEGIN:
+ case Translation::FRAME:
+ // Peeled off before getting here.
+ break;
+
+ case Translation::ARGUMENTS_OBJECT:
+ // This can be only emitted for local slots not for argument slots.
+ break;
+
+ case Translation::REGISTER:
+ case Translation::INT32_REGISTER:
+ case Translation::DOUBLE_REGISTER:
+ case Translation::DUPLICATE:
+ // We are at safepoint which corresponds to call. All registers are
+ // saved by caller so there would be no live registers at this
+ // point. Thus these translation commands should not be used.
+ break;
+
+ case Translation::STACK_SLOT: {
+ int slot_index = iterator->Next();
+ Address slot_addr = SlotAddress(frame, slot_index);
+ return SlotRef(slot_addr, SlotRef::TAGGED);
+ }
+
+ case Translation::INT32_STACK_SLOT: {
+ int slot_index = iterator->Next();
+ Address slot_addr = SlotAddress(frame, slot_index);
+ return SlotRef(slot_addr, SlotRef::INT32);
+ }
+
+ case Translation::DOUBLE_STACK_SLOT: {
+ int slot_index = iterator->Next();
+ Address slot_addr = SlotAddress(frame, slot_index);
+ return SlotRef(slot_addr, SlotRef::DOUBLE);
+ }
+
+ case Translation::LITERAL: {
+ int literal_index = iterator->Next();
+ return SlotRef(data->LiteralArray()->get(literal_index));
+ }
+ }
+
+ UNREACHABLE();
+ return SlotRef();
+}
+
+
+void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
+ int inlined_frame_index,
+ Vector<SlotRef>* args_slots) {
+ AssertNoAllocation no_gc;
+ int deopt_index = AstNode::kNoNumber;
+ DeoptimizationInputData* data =
+ static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
+ TranslationIterator it(data->TranslationByteArray(),
+ data->TranslationIndex(deopt_index)->value());
+ Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+ ASSERT(opcode == Translation::BEGIN);
+ int frame_count = it.Next();
+ USE(frame_count);
+ ASSERT(frame_count > inlined_frame_index);
+ int frames_to_skip = inlined_frame_index;
+ while (true) {
+ opcode = static_cast<Translation::Opcode>(it.Next());
+ // Skip over operands to advance to the next opcode.
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ if (opcode == Translation::FRAME) {
+ if (frames_to_skip == 0) {
+ // We reached the frame corresponding to the inlined function
+ // in question. Process the translation commands for the
+ // arguments.
+ //
+ // Skip the translation command for the receiver.
+ it.Skip(Translation::NumberOfOperandsFor(
+ static_cast<Translation::Opcode>(it.Next())));
+ // Compute slots for arguments.
+ for (int i = 0; i < args_slots->length(); ++i) {
+ (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
+ }
+ return;
+ }
+ frames_to_skip--;
+ }
+ }
+
+ UNREACHABLE();
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index f4cd409be..91030e2d8 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -30,6 +30,7 @@
#include "v8.h"
+#include "allocation.h"
#include "macro-assembler.h"
#include "zone-inl.h"
@@ -72,6 +73,31 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
};
+class Deoptimizer;
+
+
+class DeoptimizerData {
+ public:
+ DeoptimizerData();
+ ~DeoptimizerData();
+
+ private:
+ LargeObjectChunk* eager_deoptimization_entry_code_;
+ LargeObjectChunk* lazy_deoptimization_entry_code_;
+ Deoptimizer* current_;
+
+ // List of deoptimized code which still have references from active stack
+ // frames. These code objects are needed by the deoptimizer when deoptimizing
+ // a frame for which the code object for the function function has been
+ // changed from the code present when deoptimizing was done.
+ DeoptimizingCodeListNode* deoptimizing_code_list_;
+
+ friend class Deoptimizer;
+
+ DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
+};
+
+
class Deoptimizer : public Malloced {
public:
enum BailoutType {
@@ -86,8 +112,9 @@ class Deoptimizer : public Malloced {
BailoutType type,
unsigned bailout_id,
Address from,
- int fp_to_sp_delta);
- static Deoptimizer* Grab();
+ int fp_to_sp_delta,
+ Isolate* isolate);
+ static Deoptimizer* Grab(Isolate* isolate);
// Makes sure that there is enough room in the relocation
// information of a code object to perform lazy deoptimization
@@ -153,9 +180,6 @@ class Deoptimizer : public Malloced {
unsigned node_id,
SharedFunctionInfo* shared);
- static void Setup();
- static void TearDown();
-
// Code generation support.
static int input_offset() { return OFFSET_OF(Deoptimizer, input_); }
static int output_count_offset() {
@@ -163,7 +187,7 @@ class Deoptimizer : public Malloced {
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
- static int GetDeoptimizedCodeCount();
+ static int GetDeoptimizedCodeCount(Isolate* isolate);
static const int kNotDeoptimizationEntry = -1;
@@ -204,7 +228,8 @@ class Deoptimizer : public Malloced {
private:
static const int kNumberOfEntries = 4096;
- Deoptimizer(JSFunction* function,
+ Deoptimizer(Isolate* isolate,
+ JSFunction* function,
BailoutType type,
unsigned bailout_id,
Address from,
@@ -244,16 +269,7 @@ class Deoptimizer : public Malloced {
static Code* FindDeoptimizingCodeFromAddress(Address addr);
static void RemoveDeoptimizingCode(Code* code);
- static LargeObjectChunk* eager_deoptimization_entry_code_;
- static LargeObjectChunk* lazy_deoptimization_entry_code_;
- static Deoptimizer* current_;
-
- // List of deoptimized code which still have references from active stack
- // frames. These code objects are needed by the deoptimizer when deoptimizing
- // a frame for which the code object for the function function has been
- // changed from the code present when deoptimizing was done.
- static DeoptimizingCodeListNode* deoptimizing_code_list_;
-
+ Isolate* isolate_;
JSFunction* function_;
Code* optimized_code_;
unsigned bailout_id_;
@@ -283,7 +299,9 @@ class FrameDescription {
JSFunction* function);
void* operator new(size_t size, uint32_t frame_size) {
- return malloc(size + frame_size);
+ // Subtracts kPointerSize, as the member frame_content_ already supplies
+ // the first element of the area to store the frame.
+ return malloc(size + frame_size - kPointerSize);
}
void operator delete(void* description) {
@@ -367,7 +385,7 @@ class FrameDescription {
}
static int frame_content_offset() {
- return sizeof(FrameDescription);
+ return OFFSET_OF(FrameDescription, frame_content_);
}
private:
@@ -386,6 +404,10 @@ class FrameDescription {
// deoptimizing.
intptr_t continuation_;
+ // This must be at the end of the object as the object is allocated larger
+ // than it's definition indicate to extend this array.
+ intptr_t frame_content_[1];
+
intptr_t* GetFrameSlotPointer(unsigned offset) {
ASSERT(offset < frame_size_);
return reinterpret_cast<intptr_t*>(
@@ -473,7 +495,7 @@ class Translation BASE_EMBEDDED {
static int NumberOfOperandsFor(Opcode opcode);
-#ifdef OBJECT_PRINT
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
static const char* StringFor(Opcode opcode);
#endif
@@ -503,6 +525,78 @@ class DeoptimizingCodeListNode : public Malloced {
};
+class SlotRef BASE_EMBEDDED {
+ public:
+ enum SlotRepresentation {
+ UNKNOWN,
+ TAGGED,
+ INT32,
+ DOUBLE,
+ LITERAL
+ };
+
+ SlotRef()
+ : addr_(NULL), representation_(UNKNOWN) { }
+
+ SlotRef(Address addr, SlotRepresentation representation)
+ : addr_(addr), representation_(representation) { }
+
+ explicit SlotRef(Object* literal)
+ : literal_(literal), representation_(LITERAL) { }
+
+ Handle<Object> GetValue() {
+ switch (representation_) {
+ case TAGGED:
+ return Handle<Object>(Memory::Object_at(addr_));
+
+ case INT32: {
+ int value = Memory::int32_at(addr_);
+ if (Smi::IsValid(value)) {
+ return Handle<Object>(Smi::FromInt(value));
+ } else {
+ return Isolate::Current()->factory()->NewNumberFromInt(value);
+ }
+ }
+
+ case DOUBLE: {
+ double value = Memory::double_at(addr_);
+ return Isolate::Current()->factory()->NewNumber(value);
+ }
+
+ case LITERAL:
+ return literal_;
+
+ default:
+ UNREACHABLE();
+ return Handle<Object>::null();
+ }
+ }
+
+ static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
+ int inlined_frame_index,
+ Vector<SlotRef>* args_slots);
+
+ private:
+ Address addr_;
+ Handle<Object> literal_;
+ SlotRepresentation representation_;
+
+ static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
+ if (slot_index >= 0) {
+ const int offset = JavaScriptFrameConstants::kLocal0Offset;
+ return frame->fp() + offset - (slot_index * kPointerSize);
+ } else {
+ const int offset = JavaScriptFrameConstants::kLastParameterOffset;
+ return frame->fp() + offset - ((slot_index + 1) * kPointerSize);
+ }
+ }
+
+ static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
+ DeoptimizationInputData* data,
+ JavaScriptFrame* frame);
+};
+
+
} } // namespace v8::internal
#endif // V8_DEOPTIMIZER_H_
diff --git a/deps/v8/src/disasm.h b/deps/v8/src/disasm.h
index 6ecd1c8f3..f7f2d4120 100644
--- a/deps/v8/src/disasm.h
+++ b/deps/v8/src/disasm.h
@@ -44,6 +44,9 @@ class NameConverter {
virtual const char* NameOfAddress(byte* addr) const;
virtual const char* NameOfConstant(byte* addr) const;
virtual const char* NameInCode(byte* addr) const;
+
+ protected:
+ v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
};
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 243abf079..368c3a89c 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -28,7 +28,7 @@
#include "v8.h"
#include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
#include "deoptimizer.h"
#include "disasm.h"
@@ -65,24 +65,24 @@ class V8NameConverter: public disasm::NameConverter {
Code* code() const { return code_; }
private:
Code* code_;
+
+ EmbeddedVector<char, 128> v8_buffer_;
};
const char* V8NameConverter::NameOfAddress(byte* pc) const {
- static v8::internal::EmbeddedVector<char, 128> buffer;
-
- const char* name = Builtins::Lookup(pc);
+ const char* name = Isolate::Current()->builtins()->Lookup(pc);
if (name != NULL) {
- OS::SNPrintF(buffer, "%s (%p)", name, pc);
- return buffer.start();
+ OS::SNPrintF(v8_buffer_, "%s (%p)", name, pc);
+ return v8_buffer_.start();
}
if (code_ != NULL) {
int offs = static_cast<int>(pc - code_->instruction_start());
// print as code offset, if it seems reasonable
if (0 <= offs && offs < code_->instruction_size()) {
- OS::SNPrintF(buffer, "%d (%p)", offs, pc);
- return buffer.start();
+ OS::SNPrintF(v8_buffer_, "%d (%p)", offs, pc);
+ return v8_buffer_.start();
}
}
@@ -115,6 +115,7 @@ static int DecodeIt(FILE* f,
NoHandleAllocation ha;
AssertNoAllocation no_alloc;
ExternalReferenceEncoder ref_encoder;
+ Heap* heap = HEAP;
v8::internal::EmbeddedVector<char, 128> decode_buffer;
v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
@@ -256,8 +257,8 @@ static int DecodeIt(FILE* f,
} else if (kind == Code::STUB) {
// Reverse lookup required as the minor key cannot be retrieved
// from the code object.
- Object* obj = Heap::code_stubs()->SlowReverseLookup(code);
- if (obj != Heap::undefined_value()) {
+ Object* obj = heap->code_stubs()->SlowReverseLookup(code);
+ if (obj != heap->undefined_value()) {
ASSERT(obj->IsSmi());
// Get the STUB key and extract major and minor key.
uint32_t key = Smi::cast(obj)->value();
@@ -281,7 +282,11 @@ static int DecodeIt(FILE* f,
} else {
out.AddFormatted(" %s", Code::Kind2String(kind));
}
- } else if (rmode == RelocInfo::RUNTIME_ENTRY) {
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ out.AddFormatted(" (id = %d)", static_cast<int>(relocinfo.data()));
+ }
+ } else if (rmode == RelocInfo::RUNTIME_ENTRY &&
+ Isolate::Current()->deoptimizer_data() != NULL) {
// A runtime entry reloinfo might be a deoptimization bailout.
Address addr = relocinfo.target_address();
int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
diff --git a/deps/v8/src/disassembler.h b/deps/v8/src/disassembler.h
index 68a338d18..4a87dca67 100644
--- a/deps/v8/src/disassembler.h
+++ b/deps/v8/src/disassembler.h
@@ -28,6 +28,8 @@
#ifndef V8_DISASSEMBLER_H_
#define V8_DISASSEMBLER_H_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index de8f0a466..990eca2e5 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,7 +31,7 @@
#include "api.h"
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
#include "runtime-profiler.h"
#include "simulator.h"
@@ -42,14 +42,39 @@ namespace v8 {
namespace internal {
+StackGuard::StackGuard()
+ : isolate_(NULL) {
+}
+
+
+void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
+ ASSERT(isolate_ != NULL);
+ // Ignore attempts to interrupt when interrupts are postponed.
+ if (should_postpone_interrupts(lock)) return;
+ thread_local_.jslimit_ = kInterruptLimit;
+ thread_local_.climit_ = kInterruptLimit;
+ isolate_->heap()->SetStackLimits();
+}
+
+
+void StackGuard::reset_limits(const ExecutionAccess& lock) {
+ ASSERT(isolate_ != NULL);
+ thread_local_.jslimit_ = thread_local_.real_jslimit_;
+ thread_local_.climit_ = thread_local_.real_climit_;
+ isolate_->heap()->SetStackLimits();
+}
+
+
static Handle<Object> Invoke(bool construct,
Handle<JSFunction> func,
Handle<Object> receiver,
int argc,
Object*** args,
bool* has_pending_exception) {
+ Isolate* isolate = func->GetIsolate();
+
// Entering JavaScript.
- VMState state(JS);
+ VMState state(isolate, JS);
// Placeholder for return value.
MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
@@ -85,7 +110,7 @@ static Handle<Object> Invoke(bool construct,
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
- SaveContext save;
+ SaveContext save(isolate);
NoHandleAllocation na;
JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
@@ -103,35 +128,41 @@ static Handle<Object> Invoke(bool construct,
// Update the pending exception flag and return the value.
*has_pending_exception = value->IsException();
- ASSERT(*has_pending_exception == Top::has_pending_exception());
+ ASSERT(*has_pending_exception == Isolate::Current()->has_pending_exception());
if (*has_pending_exception) {
- Top::ReportPendingMessages();
- if (Top::pending_exception() == Failure::OutOfMemoryException()) {
- if (!HandleScopeImplementer::instance()->ignore_out_of_memory()) {
+ isolate->ReportPendingMessages();
+ if (isolate->pending_exception() == Failure::OutOfMemoryException()) {
+ if (!isolate->handle_scope_implementer()->ignore_out_of_memory()) {
V8::FatalProcessOutOfMemory("JS", true);
}
}
return Handle<Object>();
} else {
- Top::clear_pending_message();
+ isolate->clear_pending_message();
}
- return Handle<Object>(value->ToObjectUnchecked());
+ return Handle<Object>(value->ToObjectUnchecked(), isolate);
}
-Handle<Object> Execution::Call(Handle<JSFunction> func,
+Handle<Object> Execution::Call(Handle<Object> callable,
Handle<Object> receiver,
int argc,
Object*** args,
bool* pending_exception) {
+ if (!callable->IsJSFunction()) {
+ callable = TryGetFunctionDelegate(callable, pending_exception);
+ if (*pending_exception) return callable;
+ }
+ Handle<JSFunction> func = Handle<JSFunction>::cast(callable);
return Invoke(false, func, receiver, argc, args, pending_exception);
}
Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
Object*** args, bool* pending_exception) {
- return Invoke(true, func, Top::global(), argc, args, pending_exception);
+ return Invoke(true, func, Isolate::Current()->global(), argc, args,
+ pending_exception);
}
@@ -153,59 +184,71 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
if (*caught_exception) {
ASSERT(catcher.HasCaught());
- ASSERT(Top::has_pending_exception());
- ASSERT(Top::external_caught_exception());
- if (Top::pending_exception() == Heap::termination_exception()) {
- result = Factory::termination_exception();
+ Isolate* isolate = Isolate::Current();
+ ASSERT(isolate->has_pending_exception());
+ ASSERT(isolate->external_caught_exception());
+ if (isolate->pending_exception() ==
+ isolate->heap()->termination_exception()) {
+ result = isolate->factory()->termination_exception();
} else {
result = v8::Utils::OpenHandle(*catcher.Exception());
}
- Top::OptionalRescheduleException(true);
+ isolate->OptionalRescheduleException(true);
}
- ASSERT(!Top::has_pending_exception());
- ASSERT(!Top::external_caught_exception());
+ ASSERT(!Isolate::Current()->has_pending_exception());
+ ASSERT(!Isolate::Current()->external_caught_exception());
return result;
}
Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
ASSERT(!object->IsJSFunction());
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
// If you return a function from here, it will be called when an
// attempt is made to call the given object as a function.
- // Regular expressions can be called as functions in both Firefox
- // and Safari so we allow it too.
- if (object->IsJSRegExp()) {
- Handle<String> exec = Factory::exec_symbol();
- // TODO(lrn): Bug 617. We should use the default function here, not the
- // one on the RegExp object.
- Object* exec_function;
- { MaybeObject* maybe_exec_function = object->GetProperty(*exec);
- // This can lose an exception, but the alternative is to put a failure
- // object in a handle, which is not GC safe.
- if (!maybe_exec_function->ToObject(&exec_function)) {
- return Factory::undefined_value();
- }
- }
- return Handle<Object>(exec_function);
+ // Objects created through the API can have an instance-call handler
+ // that should be used when calling the object as a function.
+ if (object->IsHeapObject() &&
+ HeapObject::cast(*object)->map()->has_instance_call_handler()) {
+ return Handle<JSFunction>(
+ isolate->global_context()->call_as_function_delegate());
}
+ return factory->undefined_value();
+}
+
+
+Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
+ bool* has_pending_exception) {
+ ASSERT(!object->IsJSFunction());
+ Isolate* isolate = Isolate::Current();
+
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
if (object->IsHeapObject() &&
HeapObject::cast(*object)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
- Top::global_context()->call_as_function_delegate());
+ isolate->global_context()->call_as_function_delegate());
}
- return Factory::undefined_value();
+ // If the Object doesn't have an instance-call handler we should
+ // throw a non-callable exception.
+ i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
+ "called_non_callable", i::HandleVector<i::Object>(&object, 1));
+ isolate->Throw(*error_obj);
+ *has_pending_exception = true;
+
+ return isolate->factory()->undefined_value();
}
Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
ASSERT(!object->IsJSFunction());
+ Isolate* isolate = Isolate::Current();
// If you return a function from here, it will be called when an
// attempt is made to call the given object as a constructor.
@@ -215,26 +258,50 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
if (object->IsHeapObject() &&
HeapObject::cast(*object)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
- Top::global_context()->call_as_constructor_delegate());
+ isolate->global_context()->call_as_constructor_delegate());
}
- return Factory::undefined_value();
+ return isolate->factory()->undefined_value();
}
-// Static state for stack guards.
-StackGuard::ThreadLocal StackGuard::thread_local_;
+Handle<Object> Execution::TryGetConstructorDelegate(
+ Handle<Object> object,
+ bool* has_pending_exception) {
+ ASSERT(!object->IsJSFunction());
+ Isolate* isolate = Isolate::Current();
+
+ // If you return a function from here, it will be called when an
+ // attempt is made to call the given object as a constructor.
+
+ // Objects created through the API can have an instance-call handler
+ // that should be used when calling the object as a function.
+ if (object->IsHeapObject() &&
+ HeapObject::cast(*object)->map()->has_instance_call_handler()) {
+ return Handle<JSFunction>(
+ isolate->global_context()->call_as_constructor_delegate());
+ }
+
+ // If the Object doesn't have an instance-call handler we should
+ // throw a non-callable exception.
+ i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
+ "called_non_callable", i::HandleVector<i::Object>(&object, 1));
+ isolate->Throw(*error_obj);
+ *has_pending_exception = true;
+
+ return isolate->factory()->undefined_value();
+}
bool StackGuard::IsStackOverflow() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return (thread_local_.jslimit_ != kInterruptLimit &&
thread_local_.climit_ != kInterruptLimit);
}
void StackGuard::EnableInterrupts() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
if (has_pending_interrupts(access)) {
set_interrupt_limits(access);
}
@@ -242,10 +309,10 @@ void StackGuard::EnableInterrupts() {
void StackGuard::SetStackLimit(uintptr_t limit) {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
// If the current limits are special (eg due to a pending interrupt) then
// leave them alone.
- uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
+ uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
if (thread_local_.jslimit_ == thread_local_.real_jslimit_) {
thread_local_.jslimit_ = jslimit;
}
@@ -258,92 +325,92 @@ void StackGuard::SetStackLimit(uintptr_t limit) {
void StackGuard::DisableInterrupts() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
reset_limits(access);
}
bool StackGuard::IsInterrupted() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & INTERRUPT;
}
void StackGuard::Interrupt() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= INTERRUPT;
set_interrupt_limits(access);
}
bool StackGuard::IsPreempted() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & PREEMPT;
}
void StackGuard::Preempt() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= PREEMPT;
set_interrupt_limits(access);
}
bool StackGuard::IsTerminateExecution() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & TERMINATE;
}
void StackGuard::TerminateExecution() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= TERMINATE;
set_interrupt_limits(access);
}
bool StackGuard::IsRuntimeProfilerTick() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
}
void StackGuard::RequestRuntimeProfilerTick() {
// Ignore calls if we're not optimizing or if we can't get the lock.
- if (FLAG_opt && ExecutionAccess::TryLock()) {
+ if (FLAG_opt && ExecutionAccess::TryLock(isolate_)) {
thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
if (thread_local_.postpone_interrupts_nesting_ == 0) {
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- Heap::SetStackLimits();
+ isolate_->heap()->SetStackLimits();
}
- ExecutionAccess::Unlock();
+ ExecutionAccess::Unlock(isolate_);
}
}
#ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & DEBUGBREAK;
}
void StackGuard::DebugBreak() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= DEBUGBREAK;
set_interrupt_limits(access);
}
bool StackGuard::IsDebugCommand() {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & DEBUGCOMMAND;
}
void StackGuard::DebugCommand() {
if (FLAG_debugger_auto_break) {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= DEBUGCOMMAND;
set_interrupt_limits(access);
}
@@ -351,7 +418,7 @@ void StackGuard::DebugCommand() {
#endif
void StackGuard::Continue(InterruptFlag after_what) {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ &= ~static_cast<int>(after_what);
if (!should_postpone_interrupts(access) && !has_pending_interrupts(access)) {
reset_limits(access);
@@ -359,36 +426,34 @@ void StackGuard::Continue(InterruptFlag after_what) {
}
-int StackGuard::ArchiveSpacePerThread() {
- return sizeof(ThreadLocal);
-}
-
-
char* StackGuard::ArchiveStackGuard(char* to) {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
ThreadLocal blank;
+
+ // Set the stack limits using the old thread_local_.
+ // TODO(isolates): This was the old semantics of constructing a ThreadLocal
+ // (as the ctor called SetStackLimits, which looked at the
+ // current thread_local_ from StackGuard)-- but is this
+ // really what was intended?
+ isolate_->heap()->SetStackLimits();
thread_local_ = blank;
+
return to + sizeof(ThreadLocal);
}
char* StackGuard::RestoreStackGuard(char* from) {
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- Heap::SetStackLimits();
+ isolate_->heap()->SetStackLimits();
return from + sizeof(ThreadLocal);
}
-static internal::Thread::LocalStorageKey stack_limit_key =
- internal::Thread::CreateThreadLocalKey();
-
-
void StackGuard::FreeThreadResources() {
- Thread::SetThreadLocal(
- stack_limit_key,
- reinterpret_cast<void*>(thread_local_.real_climit_));
+ Isolate::CurrentPerIsolateThreadData()->set_stack_limit(
+ thread_local_.real_climit_);
}
@@ -400,52 +465,58 @@ void StackGuard::ThreadLocal::Clear() {
nesting_ = 0;
postpone_interrupts_nesting_ = 0;
interrupt_flags_ = 0;
- Heap::SetStackLimits();
}
-void StackGuard::ThreadLocal::Initialize() {
+bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
+ bool should_set_stack_limits = false;
if (real_climit_ == kIllegalLimit) {
// Takes the address of the limit variable in order to find out where
// the top of stack is right now.
const uintptr_t kLimitSize = FLAG_stack_size * KB;
uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
- real_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
- jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+ real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
+ jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
real_climit_ = limit;
climit_ = limit;
- Heap::SetStackLimits();
+ should_set_stack_limits = true;
}
nesting_ = 0;
postpone_interrupts_nesting_ = 0;
interrupt_flags_ = 0;
+ return should_set_stack_limits;
}
void StackGuard::ClearThread(const ExecutionAccess& lock) {
thread_local_.Clear();
+ isolate_->heap()->SetStackLimits();
}
void StackGuard::InitThread(const ExecutionAccess& lock) {
- thread_local_.Initialize();
- void* stored_limit = Thread::GetThreadLocal(stack_limit_key);
+ if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
+ Isolate::PerIsolateThreadData* per_thread =
+ isolate_->FindOrAllocatePerThreadDataForThisThread();
+ uintptr_t stored_limit = per_thread->stack_limit();
// You should hold the ExecutionAccess lock when you call this.
- if (stored_limit != NULL) {
- StackGuard::SetStackLimit(reinterpret_cast<intptr_t>(stored_limit));
+ if (stored_limit != 0) {
+ StackGuard::SetStackLimit(stored_limit);
}
}
// --- C a l l s t o n a t i v e s ---
-#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
- do { \
- Object** args[argc] = argv; \
- ASSERT(has_pending_exception != NULL); \
- return Call(Top::name##_fun(), Top::builtins(), argc, args, \
- has_pending_exception); \
+#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
+ do { \
+ Isolate* isolate = Isolate::Current(); \
+ Object** args[argc] = argv; \
+ ASSERT(has_pending_exception != NULL); \
+ return Call(isolate->name##_fun(), \
+ isolate->js_builtins_object(), argc, args, \
+ has_pending_exception); \
} while (false)
@@ -461,7 +532,7 @@ Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
double value = obj->Number();
result = !((value == 0) || isnan(value));
}
- return Handle<Object>(Heap::ToBoolean(result));
+ return Handle<Object>(HEAP->ToBoolean(result));
}
@@ -502,7 +573,7 @@ Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
Handle<Object> Execution::NewDate(double time, bool* exc) {
- Handle<Object> time_obj = Factory::NewNumber(time);
+ Handle<Object> time_obj = FACTORY->NewNumber(time);
RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
}
@@ -513,30 +584,33 @@ Handle<Object> Execution::NewDate(double time, bool* exc) {
Handle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
Handle<String> flags,
bool* exc) {
+ Handle<JSFunction> function = Handle<JSFunction>(
+ pattern->GetIsolate()->global_context()->regexp_function());
Handle<Object> re_obj = RegExpImpl::CreateRegExpLiteral(
- Handle<JSFunction>(Top::global_context()->regexp_function()),
- pattern,
- flags,
- exc);
+ function, pattern, flags, exc);
if (*exc) return Handle<JSRegExp>();
return Handle<JSRegExp>::cast(re_obj);
}
Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
+ Isolate* isolate = string->GetIsolate();
+ Factory* factory = isolate->factory();
+
int int_index = static_cast<int>(index);
if (int_index < 0 || int_index >= string->length()) {
- return Factory::undefined_value();
+ return factory->undefined_value();
}
Handle<Object> char_at =
- GetProperty(Top::builtins(), Factory::char_at_symbol());
+ GetProperty(isolate->js_builtins_object(),
+ factory->char_at_symbol());
if (!char_at->IsJSFunction()) {
- return Factory::undefined_value();
+ return factory->undefined_value();
}
bool caught_exception;
- Handle<Object> index_object = Factory::NewNumberFromInt(int_index);
+ Handle<Object> index_object = factory->NewNumberFromInt(int_index);
Object** index_arg[] = { index_object.location() };
Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
string,
@@ -544,7 +618,7 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
index_arg,
&caught_exception);
if (caught_exception) {
- return Factory::undefined_value();
+ return factory->undefined_value();
}
return result;
}
@@ -552,15 +626,18 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
Handle<JSFunction> Execution::InstantiateFunction(
Handle<FunctionTemplateInfo> data, bool* exc) {
+ Isolate* isolate = data->GetIsolate();
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
- Object* elm = Top::global_context()->function_cache()->
- GetElementNoExceptionThrown(serial_number);
+ Object* elm =
+ isolate->global_context()->function_cache()->
+ GetElementNoExceptionThrown(serial_number);
if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
// The function has not yet been instantiated in this context; do it.
Object** args[1] = { Handle<Object>::cast(data).location() };
Handle<Object> result =
- Call(Top::instantiate_fun(), Top::builtins(), 1, args, exc);
+ Call(isolate->instantiate_fun(),
+ isolate->js_builtins_object(), 1, args, exc);
if (*exc) return Handle<JSFunction>::null();
return Handle<JSFunction>::cast(result);
}
@@ -568,12 +645,13 @@ Handle<JSFunction> Execution::InstantiateFunction(
Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
bool* exc) {
+ Isolate* isolate = data->GetIsolate();
if (data->property_list()->IsUndefined() &&
!data->constructor()->IsUndefined()) {
// Initialization to make gcc happy.
Object* result = NULL;
{
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<FunctionTemplateInfo> cons_template =
Handle<FunctionTemplateInfo>(
FunctionTemplateInfo::cast(data->constructor()));
@@ -588,7 +666,8 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
} else {
Object** args[1] = { Handle<Object>::cast(data).location() };
Handle<Object> result =
- Call(Top::instantiate_fun(), Top::builtins(), 1, args, exc);
+ Call(isolate->instantiate_fun(),
+ isolate->js_builtins_object(), 1, args, exc);
if (*exc) return Handle<JSObject>::null();
return Handle<JSObject>::cast(result);
}
@@ -598,8 +677,10 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
void Execution::ConfigureInstance(Handle<Object> instance,
Handle<Object> instance_template,
bool* exc) {
+ Isolate* isolate = Isolate::Current();
Object** args[2] = { instance.location(), instance_template.location() };
- Execution::Call(Top::configure_instance_fun(), Top::builtins(), 2, args, exc);
+ Execution::Call(isolate->configure_instance_fun(),
+ isolate->js_builtins_object(), 2, args, exc);
}
@@ -607,93 +688,106 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Handle<JSFunction> fun,
Handle<Object> pos,
Handle<Object> is_global) {
+ Isolate* isolate = fun->GetIsolate();
const int argc = 4;
Object** args[argc] = { recv.location(),
Handle<Object>::cast(fun).location(),
pos.location(),
is_global.location() };
bool caught_exception = false;
- Handle<Object> result = TryCall(Top::get_stack_trace_line_fun(),
- Top::builtins(), argc, args,
- &caught_exception);
- if (caught_exception || !result->IsString()) return Factory::empty_symbol();
+ Handle<Object> result =
+ TryCall(isolate->get_stack_trace_line_fun(),
+ isolate->js_builtins_object(), argc, args,
+ &caught_exception);
+ if (caught_exception || !result->IsString()) {
+ return isolate->factory()->empty_symbol();
+ }
+
return Handle<String>::cast(result);
}
static Object* RuntimePreempt() {
+ Isolate* isolate = Isolate::Current();
+
// Clear the preempt request flag.
- StackGuard::Continue(PREEMPT);
+ isolate->stack_guard()->Continue(PREEMPT);
ContextSwitcher::PreemptionReceived();
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::InDebugger()) {
+ if (isolate->debug()->InDebugger()) {
// If currently in the debugger don't do any actual preemption but record
// that preemption occoured while in the debugger.
- Debug::PreemptionWhileInDebugger();
+ isolate->debug()->PreemptionWhileInDebugger();
} else {
// Perform preemption.
- v8::Unlocker unlocker;
+ v8::Unlocker unlocker(reinterpret_cast<v8::Isolate*>(isolate));
Thread::YieldCPU();
}
#else
- // Perform preemption.
- v8::Unlocker unlocker;
- Thread::YieldCPU();
+ { // NOLINT
+ // Perform preemption.
+ v8::Unlocker unlocker(reinterpret_cast<v8::Isolate*>(isolate));
+ Thread::YieldCPU();
+ }
#endif
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
Object* Execution::DebugBreakHelper() {
+ Isolate* isolate = Isolate::Current();
+
// Just continue if breaks are disabled.
- if (Debug::disable_break()) {
- return Heap::undefined_value();
+ if (isolate->debug()->disable_break()) {
+ return isolate->heap()->undefined_value();
}
// Ignore debug break during bootstrapping.
- if (Bootstrapper::IsActive()) {
- return Heap::undefined_value();
+ if (isolate->bootstrapper()->IsActive()) {
+ return isolate->heap()->undefined_value();
}
{
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(isolate);
ASSERT(!it.done());
Object* fun = it.frame()->function();
if (fun && fun->IsJSFunction()) {
// Don't stop in builtin functions.
if (JSFunction::cast(fun)->IsBuiltin()) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
GlobalObject* global = JSFunction::cast(fun)->context()->global();
// Don't stop in debugger functions.
- if (Debug::IsDebugGlobal(global)) {
- return Heap::undefined_value();
+ if (isolate->debug()->IsDebugGlobal(global)) {
+ return isolate->heap()->undefined_value();
}
}
}
// Collect the break state before clearing the flags.
bool debug_command_only =
- StackGuard::IsDebugCommand() && !StackGuard::IsDebugBreak();
+ isolate->stack_guard()->IsDebugCommand() &&
+ !isolate->stack_guard()->IsDebugBreak();
// Clear the debug break request flag.
- StackGuard::Continue(DEBUGBREAK);
+ isolate->stack_guard()->Continue(DEBUGBREAK);
ProcessDebugMesssages(debug_command_only);
// Return to continue execution.
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
void Execution::ProcessDebugMesssages(bool debug_command_only) {
+ Isolate* isolate = Isolate::Current();
// Clear the debug command request flag.
- StackGuard::Continue(DEBUGCOMMAND);
+ isolate->stack_guard()->Continue(DEBUGCOMMAND);
- HandleScope scope;
+ HandleScope scope(isolate);
// Enter the debugger. Just continue if we fail to enter the debugger.
EnterDebugger debugger;
if (debugger.FailedToEnter()) {
@@ -702,34 +796,37 @@ void Execution::ProcessDebugMesssages(bool debug_command_only) {
// Notify the debug event listeners. Indicate auto continue if the break was
// a debug command break.
- Debugger::OnDebugBreak(Factory::undefined_value(), debug_command_only);
+ isolate->debugger()->OnDebugBreak(isolate->factory()->undefined_value(),
+ debug_command_only);
}
#endif
MaybeObject* Execution::HandleStackGuardInterrupt() {
- Counters::stack_interrupts.Increment();
- if (StackGuard::IsRuntimeProfilerTick()) {
- Counters::runtime_profiler_ticks.Increment();
- StackGuard::Continue(RUNTIME_PROFILER_TICK);
- RuntimeProfiler::OptimizeNow();
+ Isolate* isolate = Isolate::Current();
+ StackGuard* stack_guard = isolate->stack_guard();
+ isolate->counters()->stack_interrupts()->Increment();
+ if (stack_guard->IsRuntimeProfilerTick()) {
+ isolate->counters()->runtime_profiler_ticks()->Increment();
+ stack_guard->Continue(RUNTIME_PROFILER_TICK);
+ isolate->runtime_profiler()->OptimizeNow();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (StackGuard::IsDebugBreak() || StackGuard::IsDebugCommand()) {
+ if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
DebugBreakHelper();
}
#endif
- if (StackGuard::IsPreempted()) RuntimePreempt();
- if (StackGuard::IsTerminateExecution()) {
- StackGuard::Continue(TERMINATE);
- return Top::TerminateExecution();
+ if (stack_guard->IsPreempted()) RuntimePreempt();
+ if (stack_guard->IsTerminateExecution()) {
+ stack_guard->Continue(TERMINATE);
+ return isolate->TerminateExecution();
}
- if (StackGuard::IsInterrupted()) {
- StackGuard::Continue(INTERRUPT);
- return Top::StackOverflow();
+ if (stack_guard->IsInterrupted()) {
+ stack_guard->Continue(INTERRUPT);
+ return isolate->StackOverflow();
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
} } // namespace v8::internal
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index cb07807c0..bb5f80450 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -28,6 +28,8 @@
#ifndef V8_EXECUTION_H_
#define V8_EXECUTION_H_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
@@ -51,7 +53,7 @@ class Execution : public AllStatic {
// *pending_exception tells whether the invoke resulted in
// a pending exception.
//
- static Handle<Object> Call(Handle<JSFunction> func,
+ static Handle<Object> Call(Handle<Object> callable,
Handle<Object> receiver,
int argc,
Object*** args,
@@ -138,79 +140,86 @@ class Execution : public AllStatic {
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as functions.
static Handle<Object> GetFunctionDelegate(Handle<Object> object);
+ static Handle<Object> TryGetFunctionDelegate(Handle<Object> object,
+ bool* has_pending_exception);
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as constructors.
static Handle<Object> GetConstructorDelegate(Handle<Object> object);
+ static Handle<Object> TryGetConstructorDelegate(Handle<Object> object,
+ bool* has_pending_exception);
};
class ExecutionAccess;
+class Isolate;
// StackGuard contains the handling of the limits that are used to limit the
// number of nested invocations of JavaScript and the stack size used in each
// invocation.
-class StackGuard : public AllStatic {
+class StackGuard {
public:
// Pass the address beyond which the stack should not grow. The stack
// is assumed to grow downwards.
- static void SetStackLimit(uintptr_t limit);
+ void SetStackLimit(uintptr_t limit);
// Threading support.
- static char* ArchiveStackGuard(char* to);
- static char* RestoreStackGuard(char* from);
- static int ArchiveSpacePerThread();
- static void FreeThreadResources();
+ char* ArchiveStackGuard(char* to);
+ char* RestoreStackGuard(char* from);
+ static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
+ void FreeThreadResources();
// Sets up the default stack guard for this thread if it has not
// already been set up.
- static void InitThread(const ExecutionAccess& lock);
+ void InitThread(const ExecutionAccess& lock);
// Clears the stack guard for this thread so it does not look as if
// it has been set up.
- static void ClearThread(const ExecutionAccess& lock);
-
- static bool IsStackOverflow();
- static bool IsPreempted();
- static void Preempt();
- static bool IsInterrupted();
- static void Interrupt();
- static bool IsTerminateExecution();
- static void TerminateExecution();
- static bool IsRuntimeProfilerTick();
- static void RequestRuntimeProfilerTick();
+ void ClearThread(const ExecutionAccess& lock);
+
+ bool IsStackOverflow();
+ bool IsPreempted();
+ void Preempt();
+ bool IsInterrupted();
+ void Interrupt();
+ bool IsTerminateExecution();
+ void TerminateExecution();
+ bool IsRuntimeProfilerTick();
+ void RequestRuntimeProfilerTick();
#ifdef ENABLE_DEBUGGER_SUPPORT
- static bool IsDebugBreak();
- static void DebugBreak();
- static bool IsDebugCommand();
- static void DebugCommand();
+ bool IsDebugBreak();
+ void DebugBreak();
+ bool IsDebugCommand();
+ void DebugCommand();
#endif
- static void Continue(InterruptFlag after_what);
+ void Continue(InterruptFlag after_what);
// This provides an asynchronous read of the stack limits for the current
// thread. There are no locks protecting this, but it is assumed that you
// have the global V8 lock if you are using multiple V8 threads.
- static uintptr_t climit() {
+ uintptr_t climit() {
return thread_local_.climit_;
}
- static uintptr_t real_climit() {
+ uintptr_t real_climit() {
return thread_local_.real_climit_;
}
- static uintptr_t jslimit() {
+ uintptr_t jslimit() {
return thread_local_.jslimit_;
}
- static uintptr_t real_jslimit() {
+ uintptr_t real_jslimit() {
return thread_local_.real_jslimit_;
}
- static Address address_of_jslimit() {
+ Address address_of_jslimit() {
return reinterpret_cast<Address>(&thread_local_.jslimit_);
}
- static Address address_of_real_jslimit() {
+ Address address_of_real_jslimit() {
return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
}
private:
+ StackGuard();
+
// You should hold the ExecutionAccess lock when calling this method.
- static bool has_pending_interrupts(const ExecutionAccess& lock) {
+ bool has_pending_interrupts(const ExecutionAccess& lock) {
// Sanity check: We shouldn't be asking about pending interrupts
// unless we're not postponing them anymore.
ASSERT(!should_postpone_interrupts(lock));
@@ -218,30 +227,20 @@ class StackGuard : public AllStatic {
}
// You should hold the ExecutionAccess lock when calling this method.
- static bool should_postpone_interrupts(const ExecutionAccess& lock) {
+ bool should_postpone_interrupts(const ExecutionAccess& lock) {
return thread_local_.postpone_interrupts_nesting_ > 0;
}
// You should hold the ExecutionAccess lock when calling this method.
- static void set_interrupt_limits(const ExecutionAccess& lock) {
- // Ignore attempts to interrupt when interrupts are postponed.
- if (should_postpone_interrupts(lock)) return;
- thread_local_.jslimit_ = kInterruptLimit;
- thread_local_.climit_ = kInterruptLimit;
- Heap::SetStackLimits();
- }
+ inline void set_interrupt_limits(const ExecutionAccess& lock);
// Reset limits to actual values. For example after handling interrupt.
// You should hold the ExecutionAccess lock when calling this method.
- static void reset_limits(const ExecutionAccess& lock) {
- thread_local_.jslimit_ = thread_local_.real_jslimit_;
- thread_local_.climit_ = thread_local_.real_climit_;
- Heap::SetStackLimits();
- }
+ inline void reset_limits(const ExecutionAccess& lock);
// Enable or disable interrupts.
- static void EnableInterrupts();
- static void DisableInterrupts();
+ void EnableInterrupts();
+ void DisableInterrupts();
#ifdef V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
@@ -256,9 +255,11 @@ class StackGuard : public AllStatic {
ThreadLocal() { Clear(); }
// You should hold the ExecutionAccess lock when you call Initialize or
// Clear.
- void Initialize();
void Clear();
+ // Returns true if the heap's stack limits should be set, false if not.
+ bool Initialize(Isolate* isolate);
+
// The stack limit is split into a JavaScript and a C++ stack limit. These
// two are the same except when running on a simulator where the C++ and
// JavaScript stacks are separate. Each of the two stack limits have two
@@ -278,45 +279,19 @@ class StackGuard : public AllStatic {
int interrupt_flags_;
};
- static ThreadLocal thread_local_;
+ // TODO(isolates): Technically this could be calculated directly from a
+ // pointer to StackGuard.
+ Isolate* isolate_;
+ ThreadLocal thread_local_;
+ friend class Isolate;
friend class StackLimitCheck;
friend class PostponeInterruptsScope;
-};
-
-// Support for checking for stack-overflows in C++ code.
-class StackLimitCheck BASE_EMBEDDED {
- public:
- bool HasOverflowed() const {
- // Stack has overflowed in C++ code only if stack pointer exceeds the C++
- // stack guard and the limits are not set to interrupt values.
- // TODO(214): Stack overflows are ignored if a interrupt is pending. This
- // code should probably always use the initial C++ limit.
- return (reinterpret_cast<uintptr_t>(this) < StackGuard::climit()) &&
- StackGuard::IsStackOverflow();
- }
+ DISALLOW_COPY_AND_ASSIGN(StackGuard);
};
-// Support for temporarily postponing interrupts. When the outermost
-// postpone scope is left the interrupts will be re-enabled and any
-// interrupts that occurred while in the scope will be taken into
-// account.
-class PostponeInterruptsScope BASE_EMBEDDED {
- public:
- PostponeInterruptsScope() {
- StackGuard::thread_local_.postpone_interrupts_nesting_++;
- StackGuard::DisableInterrupts();
- }
-
- ~PostponeInterruptsScope() {
- if (--StackGuard::thread_local_.postpone_interrupts_nesting_ == 0) {
- StackGuard::EnableInterrupts();
- }
- }
-};
-
} } // namespace v8::internal
#endif // V8_EXECUTION_H_
diff --git a/deps/v8/src/extensions/experimental/break-iterator.cc b/deps/v8/src/extensions/experimental/break-iterator.cc
new file mode 100644
index 000000000..e695a3e97
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/break-iterator.cc
@@ -0,0 +1,252 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/break-iterator.h"
+
+#include <string.h>
+
+#include "unicode/brkiter.h"
+#include "unicode/locid.h"
+#include "unicode/rbbi.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Persistent<v8::FunctionTemplate> BreakIterator::break_iterator_template_;
+
+icu::BreakIterator* BreakIterator::UnpackBreakIterator(
+ v8::Handle<v8::Object> obj) {
+ if (break_iterator_template_->HasInstance(obj)) {
+ return static_cast<icu::BreakIterator*>(
+ obj->GetPointerFromInternalField(0));
+ }
+
+ return NULL;
+}
+
+icu::UnicodeString* BreakIterator::ResetAdoptedText(
+ v8::Handle<v8::Object> obj, v8::Handle<v8::Value> value) {
+ // Get the previous value from the internal field.
+ icu::UnicodeString* text = static_cast<icu::UnicodeString*>(
+ obj->GetPointerFromInternalField(1));
+ delete text;
+
+ // Assign new value to the internal pointer.
+ v8::String::Value text_value(value);
+ text = new icu::UnicodeString(
+ reinterpret_cast<const UChar*>(*text_value), text_value.length());
+ obj->SetPointerInInternalField(1, text);
+
+ // Return new unicode string pointer.
+ return text;
+}
+
+void BreakIterator::DeleteBreakIterator(v8::Persistent<v8::Value> object,
+ void* param) {
+ v8::Persistent<v8::Object> persistent_object =
+ v8::Persistent<v8::Object>::Cast(object);
+
+ // First delete the hidden C++ object.
+ // Unpacking should never return NULL here. That would only happen if
+ // this method is used as the weak callback for persistent handles not
+ // pointing to a break iterator.
+ delete UnpackBreakIterator(persistent_object);
+
+ delete static_cast<icu::UnicodeString*>(
+ persistent_object->GetPointerFromInternalField(1));
+
+ // Then dispose of the persistent handle to JS object.
+ persistent_object.Dispose();
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+ // Returns undefined, and schedules an exception to be thrown.
+ return v8::ThrowException(v8::Exception::Error(
+ v8::String::New("BreakIterator method called on an object "
+ "that is not a BreakIterator.")));
+}
+
+v8::Handle<v8::Value> BreakIterator::BreakIteratorAdoptText(
+ const v8::Arguments& args) {
+ if (args.Length() != 1 || !args[0]->IsString()) {
+ return v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Text input is required.")));
+ }
+
+ icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
+ if (!break_iterator) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ break_iterator->setText(*ResetAdoptedText(args.Holder(), args[0]));
+
+ return v8::Undefined();
+}
+
+v8::Handle<v8::Value> BreakIterator::BreakIteratorFirst(
+ const v8::Arguments& args) {
+ icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
+ if (!break_iterator) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ return v8::Int32::New(break_iterator->first());
+}
+
+v8::Handle<v8::Value> BreakIterator::BreakIteratorNext(
+ const v8::Arguments& args) {
+ icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
+ if (!break_iterator) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ return v8::Int32::New(break_iterator->next());
+}
+
+v8::Handle<v8::Value> BreakIterator::BreakIteratorCurrent(
+ const v8::Arguments& args) {
+ icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
+ if (!break_iterator) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ return v8::Int32::New(break_iterator->current());
+}
+
+v8::Handle<v8::Value> BreakIterator::BreakIteratorBreakType(
+ const v8::Arguments& args) {
+ icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
+ if (!break_iterator) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
+ icu::RuleBasedBreakIterator* rule_based_iterator =
+ static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
+ int32_t status = rule_based_iterator->getRuleStatus();
+ // Keep return values in sync with JavaScript BreakType enum.
+ if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
+ return v8::Int32::New(UBRK_WORD_NONE);
+ } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
+ return v8::Int32::New(UBRK_WORD_NUMBER);
+ } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
+ return v8::Int32::New(UBRK_WORD_LETTER);
+ } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
+ return v8::Int32::New(UBRK_WORD_KANA);
+ } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
+ return v8::Int32::New(UBRK_WORD_IDEO);
+ } else {
+ return v8::Int32::New(-1);
+ }
+}
+
+v8::Handle<v8::Value> BreakIterator::JSBreakIterator(
+ const v8::Arguments& args) {
+ v8::HandleScope handle_scope;
+
+ if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
+ return v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Locale and iterator type are required.")));
+ }
+
+ v8::String::Utf8Value locale(args[0]);
+ icu::Locale icu_locale(*locale);
+
+ UErrorCode status = U_ZERO_ERROR;
+ icu::BreakIterator* break_iterator = NULL;
+ v8::String::Utf8Value type(args[1]);
+ if (!strcmp(*type, "character")) {
+ break_iterator =
+ icu::BreakIterator::createCharacterInstance(icu_locale, status);
+ } else if (!strcmp(*type, "word")) {
+ break_iterator =
+ icu::BreakIterator::createWordInstance(icu_locale, status);
+ } else if (!strcmp(*type, "sentence")) {
+ break_iterator =
+ icu::BreakIterator::createSentenceInstance(icu_locale, status);
+ } else if (!strcmp(*type, "line")) {
+ break_iterator =
+ icu::BreakIterator::createLineInstance(icu_locale, status);
+ } else {
+ return v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Invalid iterator type.")));
+ }
+
+ if (U_FAILURE(status)) {
+ delete break_iterator;
+ return v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Failed to create break iterator.")));
+ }
+
+ if (break_iterator_template_.IsEmpty()) {
+ v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+
+ raw_template->SetClassName(v8::String::New("v8Locale.v8BreakIterator"));
+
+ // Define internal field count on instance template.
+ v8::Local<v8::ObjectTemplate> object_template =
+ raw_template->InstanceTemplate();
+
+ // Set aside internal fields for icu break iterator and adopted text.
+ object_template->SetInternalFieldCount(2);
+
+ // Define all of the prototype methods on prototype template.
+ v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+ proto->Set(v8::String::New("adoptText"),
+ v8::FunctionTemplate::New(BreakIteratorAdoptText));
+ proto->Set(v8::String::New("first"),
+ v8::FunctionTemplate::New(BreakIteratorFirst));
+ proto->Set(v8::String::New("next"),
+ v8::FunctionTemplate::New(BreakIteratorNext));
+ proto->Set(v8::String::New("current"),
+ v8::FunctionTemplate::New(BreakIteratorCurrent));
+ proto->Set(v8::String::New("breakType"),
+ v8::FunctionTemplate::New(BreakIteratorBreakType));
+
+ break_iterator_template_ =
+ v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+ }
+
+ // Create an empty object wrapper.
+ v8::Local<v8::Object> local_object =
+ break_iterator_template_->GetFunction()->NewInstance();
+ v8::Persistent<v8::Object> wrapper =
+ v8::Persistent<v8::Object>::New(local_object);
+
+ // Set break iterator as internal field of the resulting JS object.
+ wrapper->SetPointerInInternalField(0, break_iterator);
+ // Make sure that the pointer to adopted text is NULL.
+ wrapper->SetPointerInInternalField(1, NULL);
+
+ // Make object handle weak so we can delete iterator once GC kicks in.
+ wrapper.MakeWeak(NULL, DeleteBreakIterator);
+
+ return wrapper;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/experimental/break-iterator.h b/deps/v8/src/extensions/experimental/break-iterator.h
new file mode 100644
index 000000000..73b9bbd56
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/break-iterator.h
@@ -0,0 +1,89 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class BreakIterator;
+class UnicodeString;
+}
+
+namespace v8 {
+namespace internal {
+
+class BreakIterator {
+ public:
+ static v8::Handle<v8::Value> JSBreakIterator(const v8::Arguments& args);
+
+ // Helper methods for various bindings.
+
+ // Unpacks break iterator object from corresponding JavaScript object.
+ static icu::BreakIterator* UnpackBreakIterator(v8::Handle<v8::Object> obj);
+
+ // Deletes the old value and sets the adopted text in
+ // corresponding JavaScript object.
+ static icu::UnicodeString* ResetAdoptedText(v8::Handle<v8::Object> obj,
+ v8::Handle<v8::Value> text_value);
+
+ // Release memory we allocated for the BreakIterator once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteBreakIterator(v8::Persistent<v8::Value> object,
+ void* param);
+
+ // Assigns new text to the iterator.
+ static v8::Handle<v8::Value> BreakIteratorAdoptText(
+ const v8::Arguments& args);
+
+ // Moves iterator to the beginning of the string and returns new position.
+ static v8::Handle<v8::Value> BreakIteratorFirst(const v8::Arguments& args);
+
+ // Moves iterator to the next position and returns it.
+ static v8::Handle<v8::Value> BreakIteratorNext(const v8::Arguments& args);
+
+ // Returns current iterator's current position.
+ static v8::Handle<v8::Value> BreakIteratorCurrent(
+ const v8::Arguments& args);
+
+ // Returns type of the item from current position.
+ // This call is only valid for word break iterators. Others just return 0.
+ static v8::Handle<v8::Value> BreakIteratorBreakType(
+ const v8::Arguments& args);
+
+ private:
+ BreakIterator() {}
+
+ static v8::Persistent<v8::FunctionTemplate> break_iterator_template_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
diff --git a/deps/v8/src/extensions/experimental/collator.cc b/deps/v8/src/extensions/experimental/collator.cc
new file mode 100644
index 000000000..5cf219256
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/collator.cc
@@ -0,0 +1,222 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/collator.h"
+
+#include "unicode/coll.h"
+#include "unicode/locid.h"
+#include "unicode/ucol.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Persistent<v8::FunctionTemplate> Collator::collator_template_;
+
+icu::Collator* Collator::UnpackCollator(v8::Handle<v8::Object> obj) {
+ if (collator_template_->HasInstance(obj)) {
+ return static_cast<icu::Collator*>(obj->GetPointerFromInternalField(0));
+ }
+
+ return NULL;
+}
+
+void Collator::DeleteCollator(v8::Persistent<v8::Value> object, void* param) {
+ v8::Persistent<v8::Object> persistent_object =
+ v8::Persistent<v8::Object>::Cast(object);
+
+ // First delete the hidden C++ object.
+ // Unpacking should never return NULL here. That would only happen if
+ // this method is used as the weak callback for persistent handles not
+ // pointing to a collator.
+ delete UnpackCollator(persistent_object);
+
+ // Then dispose of the persistent handle to JS object.
+ persistent_object.Dispose();
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+ // Returns undefined, and schedules an exception to be thrown.
+ return v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Collator method called on an object "
+ "that is not a Collator.")));
+}
+
+// Extract a boolean option named in |option| and set it to |result|.
+// Return true if it's specified. Otherwise, return false.
+static bool ExtractBooleanOption(const v8::Local<v8::Object>& options,
+ const char* option,
+ bool* result) {
+ v8::HandleScope handle_scope;
+ v8::TryCatch try_catch;
+ v8::Handle<v8::Value> value = options->Get(v8::String::New(option));
+ if (try_catch.HasCaught()) {
+ return false;
+ }
+ // No need to check if |value| is empty because it's taken care of
+ // by TryCatch above.
+ if (!value->IsUndefined() && !value->IsNull()) {
+ if (value->IsBoolean()) {
+ *result = value->BooleanValue();
+ return true;
+ }
+ }
+ return false;
+}
+
+// When there's an ICU error, throw a JavaScript error with |message|.
+static v8::Handle<v8::Value> ThrowExceptionForICUError(const char* message) {
+ return v8::ThrowException(v8::Exception::Error(v8::String::New(message)));
+}
+
+v8::Handle<v8::Value> Collator::CollatorCompare(const v8::Arguments& args) {
+ if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
+ return v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Two string arguments are required.")));
+ }
+
+ icu::Collator* collator = UnpackCollator(args.Holder());
+ if (!collator) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ v8::String::Value string_value1(args[0]);
+ v8::String::Value string_value2(args[1]);
+ const UChar* string1 = reinterpret_cast<const UChar*>(*string_value1);
+ const UChar* string2 = reinterpret_cast<const UChar*>(*string_value2);
+ UErrorCode status = U_ZERO_ERROR;
+ UCollationResult result = collator->compare(
+ string1, string_value1.length(), string2, string_value2.length(), status);
+
+ if (U_FAILURE(status)) {
+ return ThrowExceptionForICUError(
+ "Unexpected failure in Collator.compare.");
+ }
+
+ return v8::Int32::New(result);
+}
+
+v8::Handle<v8::Value> Collator::JSCollator(const v8::Arguments& args) {
+ v8::HandleScope handle_scope;
+
+ if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
+ return v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Locale and collation options are required.")));
+ }
+
+ v8::String::AsciiValue locale(args[0]);
+ icu::Locale icu_locale(*locale);
+
+ icu::Collator* collator = NULL;
+ UErrorCode status = U_ZERO_ERROR;
+ collator = icu::Collator::createInstance(icu_locale, status);
+
+ if (U_FAILURE(status)) {
+ delete collator;
+ return ThrowExceptionForICUError("Failed to create collator.");
+ }
+
+ v8::Local<v8::Object> options(args[1]->ToObject());
+
+ // Below, we change collation options that are explicitly specified
+ // by a caller in JavaScript. Otherwise, we don't touch because
+ // we don't want to change the locale-dependent default value.
+ // The three options below are very likely to have the same default
+ // across locales, but I haven't checked them all. Others we may add
+ // in the future have certainly locale-dependent default (e.g.
+ // caseFirst is upperFirst for Danish while is off for most other locales).
+
+ bool ignore_case, ignore_accents, numeric;
+
+ if (ExtractBooleanOption(options, "ignoreCase", &ignore_case)) {
+ // We need to explicitly set the level to secondary to get case ignored.
+ // The default L3 ignores UCOL_CASE_LEVEL == UCOL_OFF !
+ if (ignore_case) {
+ collator->setStrength(icu::Collator::SECONDARY);
+ }
+ collator->setAttribute(UCOL_CASE_LEVEL, ignore_case ? UCOL_OFF : UCOL_ON,
+ status);
+ if (U_FAILURE(status)) {
+ delete collator;
+ return ThrowExceptionForICUError("Failed to set ignoreCase.");
+ }
+ }
+
+ // Accents are taken into account with strength secondary or higher.
+ if (ExtractBooleanOption(options, "ignoreAccents", &ignore_accents)) {
+ if (!ignore_accents) {
+ collator->setStrength(icu::Collator::SECONDARY);
+ } else {
+ collator->setStrength(icu::Collator::PRIMARY);
+ }
+ }
+
+ if (ExtractBooleanOption(options, "numeric", &numeric)) {
+ collator->setAttribute(UCOL_NUMERIC_COLLATION,
+ numeric ? UCOL_ON : UCOL_OFF, status);
+ if (U_FAILURE(status)) {
+ delete collator;
+ return ThrowExceptionForICUError("Failed to set numeric sort option.");
+ }
+ }
+
+ if (collator_template_.IsEmpty()) {
+ v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+ raw_template->SetClassName(v8::String::New("v8Locale.Collator"));
+
+ // Define internal field count on instance template.
+ v8::Local<v8::ObjectTemplate> object_template =
+ raw_template->InstanceTemplate();
+
+ // Set aside internal fields for icu collator.
+ object_template->SetInternalFieldCount(1);
+
+ // Define all of the prototype methods on prototype template.
+ v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+ proto->Set(v8::String::New("compare"),
+ v8::FunctionTemplate::New(CollatorCompare));
+
+ collator_template_ =
+ v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+ }
+
+ // Create an empty object wrapper.
+ v8::Local<v8::Object> local_object =
+ collator_template_->GetFunction()->NewInstance();
+ v8::Persistent<v8::Object> wrapper =
+ v8::Persistent<v8::Object>::New(local_object);
+
+ // Set collator as internal field of the resulting JS object.
+ wrapper->SetPointerInInternalField(0, collator);
+
+ // Make object handle weak so we can delete iterator once GC kicks in.
+ wrapper.MakeWeak(NULL, DeleteCollator);
+
+ return wrapper;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/experimental/collator.h b/deps/v8/src/extensions/experimental/collator.h
new file mode 100644
index 000000000..ca7e4dc9d
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/collator.h
@@ -0,0 +1,68 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H
+#define V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class Collator;
+class UnicodeString;
+}
+
+namespace v8 {
+namespace internal {
+
+class Collator {
+ public:
+ static v8::Handle<v8::Value> JSCollator(const v8::Arguments& args);
+
+ // Helper methods for various bindings.
+
+ // Unpacks collator object from corresponding JavaScript object.
+ static icu::Collator* UnpackCollator(v8::Handle<v8::Object> obj);
+
+ // Release memory we allocated for the Collator once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteCollator(v8::Persistent<v8::Value> object, void* param);
+
+ // Compare two strings and returns -1, 0 and 1 depending on
+ // whether string1 is smaller than, equal to or larger than string2.
+ static v8::Handle<v8::Value> CollatorCompare(const v8::Arguments& args);
+
+ private:
+ Collator() {}
+
+ static v8::Persistent<v8::FunctionTemplate> collator_template_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_EXPERIMENTAL_COLLATOR
diff --git a/deps/v8/src/extensions/experimental/datetime-format.cc b/deps/v8/src/extensions/experimental/datetime-format.cc
new file mode 100644
index 000000000..7f4630297
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/datetime-format.cc
@@ -0,0 +1,384 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/datetime-format.h"
+
+#include <string.h>
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "unicode/dtfmtsym.h"
+#include "unicode/dtptngen.h"
+#include "unicode/locid.h"
+#include "unicode/smpdtfmt.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Persistent<v8::FunctionTemplate> DateTimeFormat::datetime_format_template_;
+
+static icu::DateFormat* CreateDateTimeFormat(v8::Handle<v8::String>,
+ v8::Handle<v8::Object>);
+static v8::Handle<v8::Value> GetSymbols(
+ const v8::Arguments&,
+ const icu::UnicodeString*, int32_t,
+ const icu::UnicodeString*, int32_t,
+ const icu::UnicodeString*, int32_t);
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
+static icu::DateFormat::EStyle GetDateTimeStyle(const icu::UnicodeString&);
+
+icu::SimpleDateFormat* DateTimeFormat::UnpackDateTimeFormat(
+ v8::Handle<v8::Object> obj) {
+ if (datetime_format_template_->HasInstance(obj)) {
+ return static_cast<icu::SimpleDateFormat*>(
+ obj->GetPointerFromInternalField(0));
+ }
+
+ return NULL;
+}
+
+void DateTimeFormat::DeleteDateTimeFormat(v8::Persistent<v8::Value> object,
+ void* param) {
+ v8::Persistent<v8::Object> persistent_object =
+ v8::Persistent<v8::Object>::Cast(object);
+
+ // First delete the hidden C++ object.
+ // Unpacking should never return NULL here. That would only happen if
+ // this method is used as the weak callback for persistent handles not
+ // pointing to a date time formatter.
+ delete UnpackDateTimeFormat(persistent_object);
+
+ // Then dispose of the persistent handle to JS object.
+ persistent_object.Dispose();
+}
+
+v8::Handle<v8::Value> DateTimeFormat::Format(const v8::Arguments& args) {
+ v8::HandleScope handle_scope;
+
+ double millis = 0.0;
+ if (args.Length() != 1 || !args[0]->IsDate()) {
+ // Create a new date.
+ v8::TryCatch try_catch;
+ v8::Local<v8::Script> date_script =
+ v8::Script::Compile(v8::String::New("eval('new Date()')"));
+ millis = date_script->Run()->NumberValue();
+ if (try_catch.HasCaught()) {
+ return try_catch.ReThrow();
+ }
+ } else {
+ millis = v8::Date::Cast(*args[0])->NumberValue();
+ }
+
+ icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+ if (!date_format) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ icu::UnicodeString result;
+ date_format->format(millis, result);
+
+ return v8::String::New(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetMonths(const v8::Arguments& args) {
+ icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+ if (!date_format) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+ int32_t narrow_count;
+ const icu::UnicodeString* narrow = symbols->getMonths(
+ narrow_count,
+ icu::DateFormatSymbols::STANDALONE,
+ icu::DateFormatSymbols::NARROW);
+ int32_t abbrev_count;
+ const icu::UnicodeString* abbrev = symbols->getMonths(
+ abbrev_count,
+ icu::DateFormatSymbols::STANDALONE,
+ icu::DateFormatSymbols::ABBREVIATED);
+ int32_t wide_count;
+ const icu::UnicodeString* wide = symbols->getMonths(
+ wide_count,
+ icu::DateFormatSymbols::STANDALONE,
+ icu::DateFormatSymbols::WIDE);
+
+ return GetSymbols(
+ args, narrow, narrow_count, abbrev, abbrev_count, wide, wide_count);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetWeekdays(const v8::Arguments& args) {
+ icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+ if (!date_format) {
+ ThrowUnexpectedObjectError();
+ }
+
+ const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+ int32_t narrow_count;
+ const icu::UnicodeString* narrow = symbols->getWeekdays(
+ narrow_count,
+ icu::DateFormatSymbols::STANDALONE,
+ icu::DateFormatSymbols::NARROW);
+ int32_t abbrev_count;
+ const icu::UnicodeString* abbrev = symbols->getWeekdays(
+ abbrev_count,
+ icu::DateFormatSymbols::STANDALONE,
+ icu::DateFormatSymbols::ABBREVIATED);
+ int32_t wide_count;
+ const icu::UnicodeString* wide = symbols->getWeekdays(
+ wide_count,
+ icu::DateFormatSymbols::STANDALONE,
+ icu::DateFormatSymbols::WIDE);
+
+ // getXXXWeekdays always returns 8 elements - ICU stable API.
+ // We can't use ASSERT_EQ(8, narrow_count) because ASSERT is internal to v8.
+ if (narrow_count != 8 || abbrev_count != 8 || wide_count != 8) {
+ return v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Failed to get weekday information.")));
+ }
+
+ // ICU documentation says we should ignore element 0 of the returned array.
+ return GetSymbols(args, narrow + 1, narrow_count - 1, abbrev + 1,
+ abbrev_count -1 , wide + 1, wide_count - 1);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetEras(const v8::Arguments& args) {
+ icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+ if (!date_format) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+ int32_t narrow_count;
+ const icu::UnicodeString* narrow = symbols->getNarrowEras(narrow_count);
+ int32_t abbrev_count;
+ const icu::UnicodeString* abbrev = symbols->getEras(abbrev_count);
+ int32_t wide_count;
+ const icu::UnicodeString* wide = symbols->getEraNames(wide_count);
+
+ return GetSymbols(
+ args, narrow, narrow_count, abbrev, abbrev_count, wide, wide_count);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetAmPm(const v8::Arguments& args) {
+ icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+ if (!date_format) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+ // In this case narrow == abbreviated == wide
+ int32_t count;
+ const icu::UnicodeString* wide = symbols->getAmPmStrings(count);
+
+ return GetSymbols(args, wide, count, wide, count, wide, count);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::JSDateTimeFormat(
+ const v8::Arguments& args) {
+ v8::HandleScope handle_scope;
+
+ if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
+ return v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Locale and date/time options are required.")));
+ }
+
+ icu::SimpleDateFormat* date_format = static_cast<icu::SimpleDateFormat*>(
+ CreateDateTimeFormat(args[0]->ToString(), args[1]->ToObject()));
+
+ if (datetime_format_template_.IsEmpty()) {
+ v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+
+ raw_template->SetClassName(v8::String::New("v8Locale.DateTimeFormat"));
+
+ // Define internal field count on instance template.
+ v8::Local<v8::ObjectTemplate> object_template =
+ raw_template->InstanceTemplate();
+
+ // Set aside internal field for icu date time formatter.
+ object_template->SetInternalFieldCount(1);
+
+ // Define all of the prototype methods on prototype template.
+ v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+ proto->Set(v8::String::New("format"),
+ v8::FunctionTemplate::New(Format));
+ proto->Set(v8::String::New("getMonths"),
+ v8::FunctionTemplate::New(GetMonths));
+ proto->Set(v8::String::New("getWeekdays"),
+ v8::FunctionTemplate::New(GetWeekdays));
+ proto->Set(v8::String::New("getEras"),
+ v8::FunctionTemplate::New(GetEras));
+ proto->Set(v8::String::New("getAmPm"),
+ v8::FunctionTemplate::New(GetAmPm));
+
+ datetime_format_template_ =
+ v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+ }
+
+ // Create an empty object wrapper.
+ v8::Local<v8::Object> local_object =
+ datetime_format_template_->GetFunction()->NewInstance();
+ v8::Persistent<v8::Object> wrapper =
+ v8::Persistent<v8::Object>::New(local_object);
+
+ // Set date time formatter as internal field of the resulting JS object.
+ wrapper->SetPointerInInternalField(0, date_format);
+
+ // Set resolved pattern in options.pattern.
+ icu::UnicodeString pattern;
+ date_format->toPattern(pattern);
+ v8::Local<v8::Object> options = v8::Object::New();
+ options->Set(v8::String::New("pattern"),
+ v8::String::New(reinterpret_cast<const uint16_t*>(
+ pattern.getBuffer()), pattern.length()));
+ wrapper->Set(v8::String::New("options"), options);
+
+ // Make object handle weak so we can delete iterator once GC kicks in.
+ wrapper.MakeWeak(NULL, DeleteDateTimeFormat);
+
+ return wrapper;
+}
+
+// Returns SimpleDateFormat.
+static icu::DateFormat* CreateDateTimeFormat(
+ v8::Handle<v8::String> locale, v8::Handle<v8::Object> settings) {
+ v8::HandleScope handle_scope;
+
+ v8::String::AsciiValue ascii_locale(locale);
+ icu::Locale icu_locale(*ascii_locale);
+
+ // Make formatter from skeleton.
+ icu::SimpleDateFormat* date_format = NULL;
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString skeleton;
+ if (I18NUtils::ExtractStringSetting(settings, "skeleton", &skeleton)) {
+ v8::Local<icu::DateTimePatternGenerator> generator(
+ icu::DateTimePatternGenerator::createInstance(icu_locale, status));
+ icu::UnicodeString pattern =
+ generator->getBestPattern(skeleton, status);
+
+ date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
+ if (U_SUCCESS(status)) {
+ return date_format;
+ } else {
+ delete date_format;
+ }
+ }
+
+ // Extract date style and time style from settings.
+ icu::UnicodeString date_style;
+ icu::DateFormat::EStyle icu_date_style = icu::DateFormat::kNone;
+ if (I18NUtils::ExtractStringSetting(settings, "dateStyle", &date_style)) {
+ icu_date_style = GetDateTimeStyle(date_style);
+ }
+
+ icu::UnicodeString time_style;
+ icu::DateFormat::EStyle icu_time_style = icu::DateFormat::kNone;
+ if (I18NUtils::ExtractStringSetting(settings, "timeStyle", &time_style)) {
+ icu_time_style = GetDateTimeStyle(time_style);
+ }
+
+ // Try all combinations of date/time styles.
+ if (icu_date_style == icu::DateFormat::kNone &&
+ icu_time_style == icu::DateFormat::kNone) {
+ // Return default short date, short
+ return icu::DateFormat::createDateTimeInstance(
+ icu::DateFormat::kShort, icu::DateFormat::kShort, icu_locale);
+ } else if (icu_date_style != icu::DateFormat::kNone &&
+ icu_time_style != icu::DateFormat::kNone) {
+ return icu::DateFormat::createDateTimeInstance(
+ icu_date_style, icu_time_style, icu_locale);
+ } else if (icu_date_style != icu::DateFormat::kNone) {
+ return icu::DateFormat::createDateInstance(icu_date_style, icu_locale);
+ } else {
+ // icu_time_style != icu::DateFormat::kNone
+ return icu::DateFormat::createTimeInstance(icu_time_style, icu_locale);
+ }
+}
+
+// Creates a v8::Array of narrow, abbrev or wide symbols.
+static v8::Handle<v8::Value> GetSymbols(const v8::Arguments& args,
+ const icu::UnicodeString* narrow,
+ int32_t narrow_count,
+ const icu::UnicodeString* abbrev,
+ int32_t abbrev_count,
+ const icu::UnicodeString* wide,
+ int32_t wide_count) {
+ v8::HandleScope handle_scope;
+
+ // Make wide width default.
+ const icu::UnicodeString* result = wide;
+ int32_t count = wide_count;
+
+ if (args.Length() == 1 && args[0]->IsString()) {
+ v8::String::AsciiValue ascii_value(args[0]);
+ if (strcmp(*ascii_value, "abbreviated") == 0) {
+ result = abbrev;
+ count = abbrev_count;
+ } else if (strcmp(*ascii_value, "narrow") == 0) {
+ result = narrow;
+ count = narrow_count;
+ }
+ }
+
+ v8::Handle<v8::Array> symbols = v8::Array::New();
+ for (int32_t i = 0; i < count; ++i) {
+ symbols->Set(i, v8::String::New(
+ reinterpret_cast<const uint16_t*>(result[i].getBuffer()),
+ result[i].length()));
+ }
+
+ return handle_scope.Close(symbols);
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+ // Returns undefined, and schedules an exception to be thrown.
+ return v8::ThrowException(v8::Exception::Error(
+ v8::String::New("DateTimeFormat method called on an object "
+ "that is not a DateTimeFormat.")));
+}
+
+// Returns icu date/time style.
+static icu::DateFormat::EStyle GetDateTimeStyle(
+ const icu::UnicodeString& type) {
+ if (type == UNICODE_STRING_SIMPLE("medium")) {
+ return icu::DateFormat::kMedium;
+ } else if (type == UNICODE_STRING_SIMPLE("long")) {
+ return icu::DateFormat::kLong;
+ } else if (type == UNICODE_STRING_SIMPLE("full")) {
+ return icu::DateFormat::kFull;
+ }
+
+ return icu::DateFormat::kShort;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/experimental/datetime-format.h b/deps/v8/src/extensions/experimental/datetime-format.h
new file mode 100644
index 000000000..a6a228c77
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/datetime-format.h
@@ -0,0 +1,83 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class SimpleDateFormat;
+}
+
+namespace v8 {
+namespace internal {
+
+class DateTimeFormat {
+ public:
+ static v8::Handle<v8::Value> JSDateTimeFormat(const v8::Arguments& args);
+
+ // Helper methods for various bindings.
+
+ // Unpacks date format object from corresponding JavaScript object.
+ static icu::SimpleDateFormat* UnpackDateTimeFormat(
+ v8::Handle<v8::Object> obj);
+
+ // Release memory we allocated for the DateFormat once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteDateTimeFormat(v8::Persistent<v8::Value> object,
+ void* param);
+
+ // Formats date and returns corresponding string.
+ static v8::Handle<v8::Value> Format(const v8::Arguments& args);
+
+ // All date time symbol methods below return stand-alone names in
+ // either narrow, abbreviated or wide width.
+
+ // Get list of months.
+ static v8::Handle<v8::Value> GetMonths(const v8::Arguments& args);
+
+ // Get list of weekdays.
+ static v8::Handle<v8::Value> GetWeekdays(const v8::Arguments& args);
+
+ // Get list of eras.
+ static v8::Handle<v8::Value> GetEras(const v8::Arguments& args);
+
+ // Get list of day periods.
+ static v8::Handle<v8::Value> GetAmPm(const v8::Arguments& args);
+
+ private:
+ DateTimeFormat();
+
+ static v8::Persistent<v8::FunctionTemplate> datetime_format_template_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
diff --git a/deps/v8/src/extensions/experimental/experimental.gyp b/deps/v8/src/extensions/experimental/experimental.gyp
index 4d7a9363b..24fb68316 100644
--- a/deps/v8/src/extensions/experimental/experimental.gyp
+++ b/deps/v8/src/extensions/experimental/experimental.gyp
@@ -37,17 +37,69 @@
'target_name': 'i18n_api',
'type': 'static_library',
'sources': [
+ 'break-iterator.cc',
+ 'break-iterator.h',
+ 'collator.cc',
+ 'collator.h',
+ 'datetime-format.cc',
+ 'datetime-format.h',
'i18n-extension.cc',
'i18n-extension.h',
+ 'i18n-locale.cc',
+ 'i18n-locale.h',
+ 'i18n-natives.h',
+ 'i18n-utils.cc',
+ 'i18n-utils.h',
+ 'language-matcher.cc',
+ 'language-matcher.h',
+ 'number-format.cc',
+ 'number-format.h',
+ '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
],
'include_dirs': [
'<(icu_src_dir)/public/common',
- '../..',
+ # v8/ is root for all includes.
+ '../../..'
],
'dependencies': [
'<(icu_src_dir)/icu.gyp:*',
+ 'js2c_i18n#host',
'../../../tools/gyp/v8.gyp:v8',
],
+ 'direct_dependent_settings': {
+ # Adds -Iv8 for embedders.
+ 'include_dirs': [
+ '../../..'
+ ],
+ },
+ },
+ {
+ 'target_name': 'js2c_i18n',
+ 'type': 'none',
+ 'toolsets': ['host'],
+ 'variables': {
+ 'js_files': [
+ 'i18n.js'
+ ],
+ },
+ 'actions': [
+ {
+ 'action_name': 'js2c_i18n',
+ 'inputs': [
+ 'i18n-js2c.py',
+ '<@(js_files)',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
+ ],
+ 'action': [
+ 'python',
+ 'i18n-js2c.py',
+ '<@(_outputs)',
+ '<@(js_files)'
+ ],
+ },
+ ],
},
], # targets
}
diff --git a/deps/v8/src/extensions/experimental/i18n-extension.cc b/deps/v8/src/extensions/experimental/i18n-extension.cc
index a721ba5ec..c5afcf0bf 100644
--- a/deps/v8/src/extensions/experimental/i18n-extension.cc
+++ b/deps/v8/src/extensions/experimental/i18n-extension.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,231 +25,41 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "i18n-extension.h"
+#include "src/extensions/experimental/i18n-extension.h"
-#include <algorithm>
-#include <string>
-
-#include "unicode/locid.h"
-#include "unicode/uloc.h"
+#include "src/extensions/experimental/break-iterator.h"
+#include "src/extensions/experimental/collator.h"
+#include "src/extensions/experimental/datetime-format.h"
+#include "src/extensions/experimental/i18n-locale.h"
+#include "src/extensions/experimental/i18n-natives.h"
+#include "src/extensions/experimental/number-format.h"
namespace v8 {
namespace internal {
I18NExtension* I18NExtension::extension_ = NULL;
-// TODO(cira): maybe move JS code to a .js file and generata cc files from it?
-// TODO(cira): Remove v8 prefix from v8Locale once we have stable API.
-const char* const I18NExtension::kSource =
- "v8Locale = function(optLocale) {"
- " native function NativeJSLocale();"
- " var properties = NativeJSLocale(optLocale);"
- " this.locale = properties.locale;"
- " this.language = properties.language;"
- " this.script = properties.script;"
- " this.region = properties.region;"
- "};"
- "v8Locale.availableLocales = function() {"
- " native function NativeJSAvailableLocales();"
- " return NativeJSAvailableLocales();"
- "};"
- "v8Locale.prototype.maximizedLocale = function() {"
- " native function NativeJSMaximizedLocale();"
- " return new v8Locale(NativeJSMaximizedLocale(this.locale));"
- "};"
- "v8Locale.prototype.minimizedLocale = function() {"
- " native function NativeJSMinimizedLocale();"
- " return new v8Locale(NativeJSMinimizedLocale(this.locale));"
- "};"
- "v8Locale.prototype.displayLocale_ = function(displayLocale) {"
- " var result = this.locale;"
- " if (displayLocale !== undefined) {"
- " result = displayLocale.locale;"
- " }"
- " return result;"
- "};"
- "v8Locale.prototype.displayLanguage = function(optDisplayLocale) {"
- " var displayLocale = this.displayLocale_(optDisplayLocale);"
- " native function NativeJSDisplayLanguage();"
- " return NativeJSDisplayLanguage(this.locale, displayLocale);"
- "};"
- "v8Locale.prototype.displayScript = function(optDisplayLocale) {"
- " var displayLocale = this.displayLocale_(optDisplayLocale);"
- " native function NativeJSDisplayScript();"
- " return NativeJSDisplayScript(this.locale, displayLocale);"
- "};"
- "v8Locale.prototype.displayRegion = function(optDisplayLocale) {"
- " var displayLocale = this.displayLocale_(optDisplayLocale);"
- " native function NativeJSDisplayRegion();"
- " return NativeJSDisplayRegion(this.locale, displayLocale);"
- "};"
- "v8Locale.prototype.displayName = function(optDisplayLocale) {"
- " var displayLocale = this.displayLocale_(optDisplayLocale);"
- " native function NativeJSDisplayName();"
- " return NativeJSDisplayName(this.locale, displayLocale);"
- "};";
+I18NExtension::I18NExtension()
+ : v8::Extension("v8/i18n", I18Natives::GetScriptSource()) {
+}
v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction(
v8::Handle<v8::String> name) {
if (name->Equals(v8::String::New("NativeJSLocale"))) {
- return v8::FunctionTemplate::New(JSLocale);
- } else if (name->Equals(v8::String::New("NativeJSAvailableLocales"))) {
- return v8::FunctionTemplate::New(JSAvailableLocales);
- } else if (name->Equals(v8::String::New("NativeJSMaximizedLocale"))) {
- return v8::FunctionTemplate::New(JSMaximizedLocale);
- } else if (name->Equals(v8::String::New("NativeJSMinimizedLocale"))) {
- return v8::FunctionTemplate::New(JSMinimizedLocale);
- } else if (name->Equals(v8::String::New("NativeJSDisplayLanguage"))) {
- return v8::FunctionTemplate::New(JSDisplayLanguage);
- } else if (name->Equals(v8::String::New("NativeJSDisplayScript"))) {
- return v8::FunctionTemplate::New(JSDisplayScript);
- } else if (name->Equals(v8::String::New("NativeJSDisplayRegion"))) {
- return v8::FunctionTemplate::New(JSDisplayRegion);
- } else if (name->Equals(v8::String::New("NativeJSDisplayName"))) {
- return v8::FunctionTemplate::New(JSDisplayName);
+ return v8::FunctionTemplate::New(I18NLocale::JSLocale);
+ } else if (name->Equals(v8::String::New("NativeJSBreakIterator"))) {
+ return v8::FunctionTemplate::New(BreakIterator::JSBreakIterator);
+ } else if (name->Equals(v8::String::New("NativeJSCollator"))) {
+ return v8::FunctionTemplate::New(Collator::JSCollator);
+ } else if (name->Equals(v8::String::New("NativeJSDateTimeFormat"))) {
+ return v8::FunctionTemplate::New(DateTimeFormat::JSDateTimeFormat);
+ } else if (name->Equals(v8::String::New("NativeJSNumberFormat"))) {
+ return v8::FunctionTemplate::New(NumberFormat::JSNumberFormat);
}
return v8::Handle<v8::FunctionTemplate>();
}
-v8::Handle<v8::Value> I18NExtension::JSLocale(const v8::Arguments& args) {
- // TODO(cira): Fetch browser locale. Accept en-US as good default for now.
- // We could possibly pass browser locale as a parameter in the constructor.
- std::string locale_name("en-US");
- if (args.Length() == 1 && args[0]->IsString()) {
- locale_name = *v8::String::Utf8Value(args[0]->ToString());
- }
-
- v8::Local<v8::Object> locale = v8::Object::New();
- locale->Set(v8::String::New("locale"), v8::String::New(locale_name.c_str()));
-
- icu::Locale icu_locale(locale_name.c_str());
-
- const char* language = icu_locale.getLanguage();
- locale->Set(v8::String::New("language"), v8::String::New(language));
-
- const char* script = icu_locale.getScript();
- if (strlen(script)) {
- locale->Set(v8::String::New("script"), v8::String::New(script));
- }
-
- const char* region = icu_locale.getCountry();
- if (strlen(region)) {
- locale->Set(v8::String::New("region"), v8::String::New(region));
- }
-
- return locale;
-}
-
-// TODO(cira): Filter out locales that Chrome doesn't support.
-v8::Handle<v8::Value> I18NExtension::JSAvailableLocales(
- const v8::Arguments& args) {
- v8::Local<v8::Array> all_locales = v8::Array::New();
-
- int count = 0;
- const Locale* icu_locales = icu::Locale::getAvailableLocales(count);
- for (int i = 0; i < count; ++i) {
- all_locales->Set(i, v8::String::New(icu_locales[i].getName()));
- }
-
- return all_locales;
-}
-
-// Use - as tag separator, not _ that ICU uses.
-static std::string NormalizeLocale(const std::string& locale) {
- std::string result(locale);
- // TODO(cira): remove STL dependency.
- std::replace(result.begin(), result.end(), '_', '-');
- return result;
-}
-
-v8::Handle<v8::Value> I18NExtension::JSMaximizedLocale(
- const v8::Arguments& args) {
- if (!args.Length() || !args[0]->IsString()) {
- return v8::Undefined();
- }
-
- UErrorCode status = U_ZERO_ERROR;
- std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
- char max_locale[ULOC_FULLNAME_CAPACITY];
- uloc_addLikelySubtags(locale_name.c_str(), max_locale,
- sizeof(max_locale), &status);
- if (U_FAILURE(status)) {
- return v8::Undefined();
- }
-
- return v8::String::New(NormalizeLocale(max_locale).c_str());
-}
-
-v8::Handle<v8::Value> I18NExtension::JSMinimizedLocale(
- const v8::Arguments& args) {
- if (!args.Length() || !args[0]->IsString()) {
- return v8::Undefined();
- }
-
- UErrorCode status = U_ZERO_ERROR;
- std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
- char min_locale[ULOC_FULLNAME_CAPACITY];
- uloc_minimizeSubtags(locale_name.c_str(), min_locale,
- sizeof(min_locale), &status);
- if (U_FAILURE(status)) {
- return v8::Undefined();
- }
-
- return v8::String::New(NormalizeLocale(min_locale).c_str());
-}
-
-// Common code for JSDisplayXXX methods.
-static v8::Handle<v8::Value> GetDisplayItem(const v8::Arguments& args,
- const std::string& item) {
- if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
- return v8::Undefined();
- }
-
- std::string base_locale = *v8::String::Utf8Value(args[0]->ToString());
- icu::Locale icu_locale(base_locale.c_str());
- icu::Locale display_locale =
- icu::Locale(*v8::String::Utf8Value(args[1]->ToString()));
- UnicodeString result;
- if (item == "language") {
- icu_locale.getDisplayLanguage(display_locale, result);
- } else if (item == "script") {
- icu_locale.getDisplayScript(display_locale, result);
- } else if (item == "region") {
- icu_locale.getDisplayCountry(display_locale, result);
- } else if (item == "name") {
- icu_locale.getDisplayName(display_locale, result);
- } else {
- return v8::Undefined();
- }
-
- if (result.length()) {
- return v8::String::New(
- reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
- }
-
- return v8::Undefined();
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayLanguage(
- const v8::Arguments& args) {
- return GetDisplayItem(args, "language");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayScript(
- const v8::Arguments& args) {
- return GetDisplayItem(args, "script");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayRegion(
- const v8::Arguments& args) {
- return GetDisplayItem(args, "region");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayName(const v8::Arguments& args) {
- return GetDisplayItem(args, "name");
-}
-
I18NExtension* I18NExtension::get() {
if (!extension_) {
extension_ = new I18NExtension();
diff --git a/deps/v8/src/extensions/experimental/i18n-extension.h b/deps/v8/src/extensions/experimental/i18n-extension.h
index 629332bab..5401f2504 100644
--- a/deps/v8/src/extensions/experimental/i18n-extension.h
+++ b/deps/v8/src/extensions/experimental/i18n-extension.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,7 +28,7 @@
#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-#include <v8.h>
+#include "include/v8.h"
namespace v8 {
namespace internal {
@@ -36,26 +36,16 @@ namespace internal {
class I18NExtension : public v8::Extension {
public:
- I18NExtension() : v8::Extension("v8/i18n", kSource) {}
+ I18NExtension();
+
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
- // Implementations of window.Locale methods.
- static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSAvailableLocales(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSMaximizedLocale(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSMinimizedLocale(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSDisplayLanguage(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSDisplayScript(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSDisplayRegion(const v8::Arguments& args);
- static v8::Handle<v8::Value> JSDisplayName(const v8::Arguments& args);
-
// V8 code prefers Register, while Chrome and WebKit use get kind of methods.
static void Register();
static I18NExtension* get();
private:
- static const char* const kSource;
static I18NExtension* extension_;
};
diff --git a/deps/v8/src/extensions/experimental/i18n-js2c.py b/deps/v8/src/extensions/experimental/i18n-js2c.py
new file mode 100644
index 000000000..9c3128bd2
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/i18n-js2c.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is a utility for converting I18N JavaScript source code into C-style
+# char arrays. It is used for embedded JavaScript code in the V8
+# library.
+# This is a pared down copy of v8/tools/js2c.py that avoids use of
+# v8/src/natives.h and produces different cc template.
+
+import os, re, sys, string
+
+
+def ToCArray(lines):
+ result = []
+ for chr in lines:
+ value = ord(chr)
+ assert value < 128
+ result.append(str(value))
+ result.append("0")
+ return ", ".join(result)
+
+
+def RemoveCommentsAndTrailingWhitespace(lines):
+ lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
+ lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
+ lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
+ return lines
+
+
+def ReadFile(filename):
+ file = open(filename, "rt")
+ try:
+ lines = file.read()
+ finally:
+ file.close()
+ return lines
+
+
+EVAL_PATTERN = re.compile(r'\beval\s*\(');
+WITH_PATTERN = re.compile(r'\bwith\s*\(');
+
+
+def Validate(lines, file):
+ lines = RemoveCommentsAndTrailingWhitespace(lines)
+ # Because of simplified context setup, eval and with is not
+ # allowed in the natives files.
+ eval_match = EVAL_PATTERN.search(lines)
+ if eval_match:
+ raise ("Eval disallowed in natives: %s" % file)
+ with_match = WITH_PATTERN.search(lines)
+ if with_match:
+ raise ("With statements disallowed in natives: %s" % file)
+
+
+HEADER_TEMPLATE = """\
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+// This file was generated from .js source files by gyp. If you
+// want to make changes to this file you should either change the
+// javascript source files or the i18n-js2c.py script.
+
+#include "src/extensions/experimental/i18n-natives.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+const char* I18Natives::GetScriptSource() {
+ // JavaScript source gets injected here.
+ static const char i18n_source[] = {%s};
+
+ return i18n_source;
+}
+
+} // internal
+} // v8
+"""
+
+
+def JS2C(source, target):
+ filename = str(source)
+
+ lines = ReadFile(filename)
+ Validate(lines, filename)
+ data = ToCArray(lines)
+
+ # Emit result
+ output = open(target, "w")
+ output.write(HEADER_TEMPLATE % data)
+ output.close()
+
+
+def main():
+ target = sys.argv[1]
+ source = sys.argv[2]
+ JS2C(source, target)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/v8/src/extensions/experimental/i18n-locale.cc b/deps/v8/src/extensions/experimental/i18n-locale.cc
new file mode 100644
index 000000000..46a5f87e1
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/i18n-locale.cc
@@ -0,0 +1,111 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/i18n-locale.h"
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "src/extensions/experimental/language-matcher.h"
+#include "unicode/locid.h"
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+const char* const I18NLocale::kLocaleID = "localeID";
+const char* const I18NLocale::kRegionID = "regionID";
+const char* const I18NLocale::kICULocaleID = "icuLocaleID";
+
+v8::Handle<v8::Value> I18NLocale::JSLocale(const v8::Arguments& args) {
+ v8::HandleScope handle_scope;
+
+ if (args.Length() != 1 || !args[0]->IsObject()) {
+ return v8::Undefined();
+ }
+
+ v8::Local<v8::Object> settings = args[0]->ToObject();
+
+ // Get best match for locale.
+ v8::TryCatch try_catch;
+ v8::Handle<v8::Value> locale_id = settings->Get(v8::String::New(kLocaleID));
+ if (try_catch.HasCaught()) {
+ return v8::Undefined();
+ }
+
+ LocaleIDMatch result;
+ if (locale_id->IsArray()) {
+ LanguageMatcher::GetBestMatchForPriorityList(
+ v8::Handle<v8::Array>::Cast(locale_id), &result);
+ } else if (locale_id->IsString()) {
+ LanguageMatcher::GetBestMatchForString(locale_id->ToString(), &result);
+ } else {
+ LanguageMatcher::GetBestMatchForString(v8::String::New(""), &result);
+ }
+
+ // Get best match for region.
+ char region_id[ULOC_COUNTRY_CAPACITY];
+ I18NUtils::StrNCopy(region_id, ULOC_COUNTRY_CAPACITY, "");
+
+ v8::Handle<v8::Value> region = settings->Get(v8::String::New(kRegionID));
+ if (try_catch.HasCaught()) {
+ return v8::Undefined();
+ }
+
+ if (!GetBestMatchForRegionID(result.icu_id, region, region_id)) {
+ // Set region id to empty string because region couldn't be inferred.
+ I18NUtils::StrNCopy(region_id, ULOC_COUNTRY_CAPACITY, "");
+ }
+
+ // Build JavaScript object that contains bcp and icu locale ID and region ID.
+ v8::Handle<v8::Object> locale = v8::Object::New();
+ locale->Set(v8::String::New(kLocaleID), v8::String::New(result.bcp47_id));
+ locale->Set(v8::String::New(kICULocaleID), v8::String::New(result.icu_id));
+ locale->Set(v8::String::New(kRegionID), v8::String::New(region_id));
+
+ return handle_scope.Close(locale);
+}
+
+bool I18NLocale::GetBestMatchForRegionID(
+ const char* locale_id, v8::Handle<v8::Value> region_id, char* result) {
+ if (region_id->IsString() && region_id->ToString()->Length() != 0) {
+ icu::Locale user_locale(
+ icu::Locale("und", *v8::String::Utf8Value(region_id->ToString())));
+ I18NUtils::StrNCopy(
+ result, ULOC_COUNTRY_CAPACITY, user_locale.getCountry());
+ return true;
+ }
+ // Maximize locale_id to infer the region (e.g. expand "de" to "de-Latn-DE"
+ // and grab "DE" from the result).
+ UErrorCode status = U_ZERO_ERROR;
+ char maximized_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_addLikelySubtags(
+ locale_id, maximized_locale, ULOC_FULLNAME_CAPACITY, &status);
+ uloc_getCountry(maximized_locale, result, ULOC_COUNTRY_CAPACITY, &status);
+
+ return !U_FAILURE(status);
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/mips/register-allocator-mips.h b/deps/v8/src/extensions/experimental/i18n-locale.h
index e056fb807..607818ce5 100644
--- a/deps/v8/src/mips/register-allocator-mips.h
+++ b/deps/v8/src/extensions/experimental/i18n-locale.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,22 +25,36 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-#define V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
-#include "mips/constants-mips.h"
+#include "include/v8.h"
namespace v8 {
namespace internal {
-class RegisterAllocatorConstants : public AllStatic {
+class I18NLocale {
public:
- static const int kNumRegisters = assembler::mips::kNumRegisters;
- static const int kInvalidRegister = assembler::mips::kInvalidRegister;
+ I18NLocale() {}
+
+ // Implementations of window.Locale methods.
+ static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
+
+ // Infers region id given the locale id, or uses user specified region id.
+ // Result is canonicalized.
+ // Returns status of ICU operation (maximizing locale or get region call).
+ static bool GetBestMatchForRegionID(
+ const char* locale_id, v8::Handle<v8::Value> regions, char* result);
+
+ private:
+ // Key name for localeID parameter.
+ static const char* const kLocaleID;
+ // Key name for regionID parameter.
+ static const char* const kRegionID;
+ // Key name for the icuLocaleID result.
+ static const char* const kICULocaleID;
};
-
} } // namespace v8::internal
-#endif // V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-
+#endif // V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
diff --git a/deps/v8/src/ia32/register-allocator-ia32.h b/deps/v8/src/extensions/experimental/i18n-natives.h
index e7ce91f4c..37362d0dd 100644
--- a/deps/v8/src/ia32/register-allocator-ia32.h
+++ b/deps/v8/src/extensions/experimental/i18n-natives.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,19 +25,19 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_H_
-#define V8_IA32_REGISTER_ALLOCATOR_IA32_H_
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
namespace v8 {
namespace internal {
-class RegisterAllocatorConstants : public AllStatic {
+class I18Natives {
public:
- static const int kNumRegisters = 5;
- static const int kInvalidRegister = -1;
+ // Gets script source from generated file.
+ // Source is statically allocated string.
+ static const char* GetScriptSource();
};
-
} } // namespace v8::internal
-#endif // V8_IA32_REGISTER_ALLOCATOR_IA32_H_
+#endif // V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
diff --git a/deps/v8/src/mips/fast-codegen-mips.cc b/deps/v8/src/extensions/experimental/i18n-utils.cc
index 186f9fadb..dc2be1a21 100644
--- a/deps/v8/src/mips/fast-codegen-mips.cc
+++ b/deps/v8/src/extensions/experimental/i18n-utils.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,53 +25,63 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
+#include "src/extensions/experimental/i18n-utils.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#include <string.h>
-#include "codegen-inl.h"
-#include "fast-codegen.h"
+#include "unicode/unistr.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+// static
+void I18NUtils::StrNCopy(char* dest, int length, const char* src) {
+ if (!dest || !src) return;
-Register FastCodeGenerator::accumulator0() { return no_reg; }
-Register FastCodeGenerator::accumulator1() { return no_reg; }
-Register FastCodeGenerator::scratch0() { return no_reg; }
-Register FastCodeGenerator::scratch1() { return no_reg; }
-Register FastCodeGenerator::receiver_reg() { return no_reg; }
-Register FastCodeGenerator::context_reg() { return no_reg; }
-
-
-void FastCodeGenerator::Generate(CompilationInfo* info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
- UNIMPLEMENTED_MIPS();
+ strncpy(dest, src, length);
+ dest[length - 1] = '\0';
}
-
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> name) {
- UNIMPLEMENTED_MIPS();
+// static
+bool I18NUtils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
+ const char* setting,
+ icu::UnicodeString* result) {
+ if (!setting || !result) return false;
+
+ v8::HandleScope handle_scope;
+ v8::TryCatch try_catch;
+ v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
+ if (try_catch.HasCaught()) {
+ return false;
+ }
+ // No need to check if |value| is empty because it's taken care of
+ // by TryCatch above.
+ if (!value->IsUndefined() && !value->IsNull() && value->IsString()) {
+ v8::String::Utf8Value utf8_value(value);
+ if (*utf8_value == NULL) return false;
+ result->setTo(icu::UnicodeString::fromUTF8(*utf8_value));
+ return true;
+ }
+ return false;
}
+// static
+void I18NUtils::AsciiToUChar(const char* source,
+ int32_t source_length,
+ UChar* target,
+ int32_t target_length) {
+ int32_t length =
+ source_length < target_length ? source_length : target_length;
-void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
- UNIMPLEMENTED_MIPS();
-}
+ if (length <= 0) {
+ return;
+ }
+ for (int32_t i = 0; i < length - 1; ++i) {
+ target[i] = static_cast<UChar>(source[i]);
+ }
-void FastCodeGenerator::EmitBitOr() {
- UNIMPLEMENTED_MIPS();
+ target[length - 1] = 0x0u;
}
-#undef __
-
-
} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/extensions/experimental/i18n-utils.h b/deps/v8/src/extensions/experimental/i18n-utils.h
new file mode 100644
index 000000000..7c31528be
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/i18n-utils.h
@@ -0,0 +1,69 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class UnicodeString;
+}
+
+namespace v8 {
+namespace internal {
+
+class I18NUtils {
+ public:
+ // Safe string copy. Null terminates the destination. Copies at most
+ // (length - 1) bytes.
+ // We can't use snprintf since it's not supported on all relevant platforms.
+ // We can't use OS::SNPrintF, it's only for internal code.
+ static void StrNCopy(char* dest, int length, const char* src);
+
+ // Extract a string setting named in |settings| and set it to |result|.
+ // Return true if it's specified. Otherwise, return false.
+ static bool ExtractStringSetting(const v8::Handle<v8::Object>& settings,
+ const char* setting,
+ icu::UnicodeString* result);
+
+ // Converts ASCII array into UChar array.
+ // Target is always \0 terminated.
+ static void AsciiToUChar(const char* source,
+ int32_t source_length,
+ UChar* target,
+ int32_t target_length);
+
+ private:
+ I18NUtils() {}
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
diff --git a/deps/v8/src/extensions/experimental/i18n.js b/deps/v8/src/extensions/experimental/i18n.js
new file mode 100644
index 000000000..56bcf9e47
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/i18n.js
@@ -0,0 +1,380 @@
+// Copyright 2006-2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// TODO(cira): Rename v8Locale into LocaleInfo once we have stable API.
+/**
+ * LocaleInfo class is an aggregate class of all i18n API calls.
+ * @param {Object} settings - localeID and regionID to create LocaleInfo from.
+ * {Array.<string>|string} settings.localeID -
+ * Unicode identifier of the locale.
+ * See http://unicode.org/reports/tr35/#BCP_47_Conformance
+ * {string} settings.regionID - ISO3166 region ID with addition of
+ * invalid, undefined and reserved region codes.
+ * @constructor
+ */
+v8Locale = function(settings) {
+ native function NativeJSLocale();
+
+ // Assume user wanted to do v8Locale("sr");
+ if (typeof(settings) === "string") {
+ settings = {'localeID': settings};
+ }
+
+ var properties = NativeJSLocale(
+ v8Locale.__createSettingsOrDefault(settings, {'localeID': 'root'}));
+
+ // Keep the resolved ICU locale ID around to avoid resolving localeID to
+ // ICU locale ID every time BreakIterator, Collator and so forth are called.
+ this.__icuLocaleID = properties.icuLocaleID;
+ this.options = {'localeID': properties.localeID,
+ 'regionID': properties.regionID};
+};
+
+/**
+ * Clones existing locale with possible overrides for some of the options.
+ * @param {!Object} settings - overrides for current locale settings.
+ * @returns {Object} - new LocaleInfo object.
+ */
+v8Locale.prototype.derive = function(settings) {
+ return new v8Locale(
+ v8Locale.__createSettingsOrDefault(settings, this.options));
+};
+
+/**
+ * v8BreakIterator class implements locale aware segmenatation.
+ * It is not part of EcmaScript proposal.
+ * @param {Object} locale - locale object to pass to break
+ * iterator implementation.
+ * @param {string} type - type of segmenatation:
+ * - character
+ * - word
+ * - sentence
+ * - line
+ * @private
+ * @constructor
+ */
+v8Locale.v8BreakIterator = function(locale, type) {
+ native function NativeJSBreakIterator();
+
+ locale = v8Locale.__createLocaleOrDefault(locale);
+ // BCP47 ID would work in this case, but we use ICU locale for consistency.
+ var iterator = NativeJSBreakIterator(locale.__icuLocaleID, type);
+ iterator.type = type;
+ return iterator;
+};
+
+/**
+ * Type of the break we encountered during previous iteration.
+ * @type{Enum}
+ */
+v8Locale.v8BreakIterator.BreakType = {
+ 'unknown': -1,
+ 'none': 0,
+ 'number': 100,
+ 'word': 200,
+ 'kana': 300,
+ 'ideo': 400
+};
+
+/**
+ * Creates new v8BreakIterator based on current locale.
+ * @param {string} - type of segmentation. See constructor.
+ * @returns {Object} - new v8BreakIterator object.
+ */
+v8Locale.prototype.v8CreateBreakIterator = function(type) {
+ return new v8Locale.v8BreakIterator(this, type);
+};
+
+// TODO(jungshik): Set |collator.options| to actually recognized / resolved
+// values.
+/**
+ * Collator class implements locale-aware sort.
+ * @param {Object} locale - locale object to pass to collator implementation.
+ * @param {Object} settings - collation flags:
+ * - ignoreCase
+ * - ignoreAccents
+ * - numeric
+ * @private
+ * @constructor
+ */
+v8Locale.Collator = function(locale, settings) {
+ native function NativeJSCollator();
+
+ locale = v8Locale.__createLocaleOrDefault(locale);
+ var collator = NativeJSCollator(
+ locale.__icuLocaleID, v8Locale.__createSettingsOrDefault(settings, {}));
+ return collator;
+};
+
+/**
+ * Creates new Collator based on current locale.
+ * @param {Object} - collation flags. See constructor.
+ * @returns {Object} - new Collator object.
+ */
+v8Locale.prototype.createCollator = function(settings) {
+ return new v8Locale.Collator(this, settings);
+};
+
+/**
+ * DateTimeFormat class implements locale-aware date and time formatting.
+ * Constructor is not part of public API.
+ * @param {Object} locale - locale object to pass to formatter.
+ * @param {Object} settings - formatting flags:
+ * - skeleton
+ * - dateStyle
+ * - timeStyle
+ * @private
+ * @constructor
+ */
+v8Locale.__DateTimeFormat = function(locale, settings) {
+ native function NativeJSDateTimeFormat();
+
+ settings = v8Locale.__createSettingsOrDefault(settings, {});
+
+ var cleanSettings = {};
+ if (settings.hasOwnProperty('skeleton')) {
+ cleanSettings['skeleton'] = settings['skeleton'];
+ } else {
+ cleanSettings = {};
+ if (settings.hasOwnProperty('dateStyle')) {
+ var ds = settings['dateStyle'];
+ if (!/^(short|medium|long|full)$/.test(ds)) ds = 'short';
+ cleanSettings['dateStyle'] = ds;
+ } else if (settings.hasOwnProperty('dateType')) {
+ // Obsolete. New spec requires dateStyle, but we'll keep this around
+ // for current users.
+ // TODO(cira): Remove when all internal users switch to dateStyle.
+ var dt = settings['dateType'];
+ if (!/^(short|medium|long|full)$/.test(dt)) dt = 'short';
+ cleanSettings['dateStyle'] = dt;
+ }
+
+ if (settings.hasOwnProperty('timeStyle')) {
+ var ts = settings['timeStyle'];
+ if (!/^(short|medium|long|full)$/.test(ts)) ts = 'short';
+ cleanSettings['timeStyle'] = ts;
+ } else if (settings.hasOwnProperty('timeType')) {
+ // TODO(cira): Remove when all internal users switch to timeStyle.
+ var tt = settings['timeType'];
+ if (!/^(short|medium|long|full)$/.test(tt)) tt = 'short';
+ cleanSettings['timeStyle'] = tt;
+ }
+ }
+
+ // Default is to show short date and time.
+ if (!cleanSettings.hasOwnProperty('skeleton') &&
+ !cleanSettings.hasOwnProperty('dateStyle') &&
+ !cleanSettings.hasOwnProperty('timeStyle')) {
+ cleanSettings = {'dateStyle': 'short',
+ 'timeStyle': 'short'};
+ }
+
+ locale = v8Locale.__createLocaleOrDefault(locale);
+ var formatter = NativeJSDateTimeFormat(locale.__icuLocaleID, cleanSettings);
+
+ // NativeJSDateTimeFormat creates formatter.options for us, we just need
+ // to append actual settings to it.
+ for (key in cleanSettings) {
+ formatter.options[key] = cleanSettings[key];
+ }
+
+ /**
+ * Clones existing date time format with possible overrides for some
+ * of the options.
+ * @param {!Object} overrideSettings - overrides for current format settings.
+ * @returns {Object} - new DateTimeFormat object.
+ * @public
+ */
+ formatter.derive = function(overrideSettings) {
+ // To remove a setting user can specify undefined as its value. We'll remove
+ // it from the map in that case.
+ for (var prop in overrideSettings) {
+ if (settings.hasOwnProperty(prop) && !overrideSettings[prop]) {
+ delete settings[prop];
+ }
+ }
+ return new v8Locale.__DateTimeFormat(
+ locale, v8Locale.__createSettingsOrDefault(overrideSettings, settings));
+ };
+
+ return formatter;
+};
+
+/**
+ * Creates new DateTimeFormat based on current locale.
+ * @param {Object} - formatting flags. See constructor.
+ * @returns {Object} - new DateTimeFormat object.
+ */
+v8Locale.prototype.createDateTimeFormat = function(settings) {
+ return new v8Locale.__DateTimeFormat(this, settings);
+};
+
+/**
+ * NumberFormat class implements locale-aware number formatting.
+ * Constructor is not part of public API.
+ * @param {Object} locale - locale object to pass to formatter.
+ * @param {Object} settings - formatting flags:
+ * - skeleton
+ * - pattern
+ * - style - decimal, currency, percent or scientific
+ * - currencyCode - ISO 4217 3-letter currency code
+ * @private
+ * @constructor
+ */
+v8Locale.__NumberFormat = function(locale, settings) {
+ native function NativeJSNumberFormat();
+
+ settings = v8Locale.__createSettingsOrDefault(settings, {});
+
+ var cleanSettings = {};
+ if (settings.hasOwnProperty('skeleton')) {
+ // Assign skeleton to cleanSettings and fix invalid currency pattern
+ // if present - 'ooxo' becomes 'o'.
+ cleanSettings['skeleton'] =
+ settings['skeleton'].replace(/\u00a4+[^\u00a4]+\u00a4+/g, '\u00a4');
+ } else if (settings.hasOwnProperty('pattern')) {
+ cleanSettings['pattern'] = settings['pattern'];
+ } else if (settings.hasOwnProperty('style')) {
+ var style = settings['style'];
+ if (!/^(decimal|currency|percent|scientific)$/.test(style)) {
+ style = 'decimal';
+ }
+ cleanSettings['style'] = style;
+ }
+
+ // Default is to show decimal style.
+ if (!cleanSettings.hasOwnProperty('skeleton') &&
+ !cleanSettings.hasOwnProperty('pattern') &&
+ !cleanSettings.hasOwnProperty('style')) {
+ cleanSettings = {'style': 'decimal'};
+ }
+
+ // Add currency code if available and valid (3-letter ASCII code).
+ if (settings.hasOwnProperty('currencyCode') &&
+ /^[a-zA-Z]{3}$/.test(settings['currencyCode'])) {
+ cleanSettings['currencyCode'] = settings['currencyCode'].toUpperCase();
+ }
+
+ locale = v8Locale.__createLocaleOrDefault(locale);
+ // Pass in region ID for proper currency detection. Use ZZ if region is empty.
+ var region = locale.options.regionID !== '' ? locale.options.regionID : 'ZZ';
+ var formatter = NativeJSNumberFormat(
+ locale.__icuLocaleID, 'und_' + region, cleanSettings);
+
+ // ICU doesn't always uppercase the currency code.
+ if (formatter.options.hasOwnProperty('currencyCode')) {
+ formatter.options['currencyCode'] =
+ formatter.options['currencyCode'].toUpperCase();
+ }
+
+ for (key in cleanSettings) {
+ // Don't overwrite keys that are alredy in.
+ if (formatter.options.hasOwnProperty(key)) continue;
+
+ formatter.options[key] = cleanSettings[key];
+ }
+
+ /**
+ * Clones existing number format with possible overrides for some
+ * of the options.
+ * @param {!Object} overrideSettings - overrides for current format settings.
+ * @returns {Object} - new or cached NumberFormat object.
+ * @public
+ */
+ formatter.derive = function(overrideSettings) {
+ // To remove a setting user can specify undefined as its value. We'll remove
+ // it from the map in that case.
+ for (var prop in overrideSettings) {
+ if (settings.hasOwnProperty(prop) && !overrideSettings[prop]) {
+ delete settings[prop];
+ }
+ }
+ return new v8Locale.__NumberFormat(
+ locale, v8Locale.__createSettingsOrDefault(overrideSettings, settings));
+ };
+
+ return formatter;
+};
+
+/**
+ * Creates new NumberFormat based on current locale.
+ * @param {Object} - formatting flags. See constructor.
+ * @returns {Object} - new or cached NumberFormat object.
+ */
+v8Locale.prototype.createNumberFormat = function(settings) {
+ return new v8Locale.__NumberFormat(this, settings);
+};
+
+/**
+ * Merges user settings and defaults.
+ * Settings that are not of object type are rejected.
+ * Actual property values are not validated, but whitespace is trimmed if they
+ * are strings.
+ * @param {!Object} settings - user provided settings.
+ * @param {!Object} defaults - default values for this type of settings.
+ * @returns {Object} - valid settings object.
+ * @private
+ */
+v8Locale.__createSettingsOrDefault = function(settings, defaults) {
+ if (!settings || typeof(settings) !== 'object' ) {
+ return defaults;
+ }
+ for (var key in defaults) {
+ if (!settings.hasOwnProperty(key)) {
+ settings[key] = defaults[key];
+ }
+ }
+ // Clean up settings.
+ for (var key in settings) {
+ // Trim whitespace.
+ if (typeof(settings[key]) === "string") {
+ settings[key] = settings[key].trim();
+ }
+ // Remove all properties that are set to undefined/null. This allows
+ // derive method to remove a setting we don't need anymore.
+ if (!settings[key]) {
+ delete settings[key];
+ }
+ }
+
+ return settings;
+};
+
+/**
+ * If locale is valid (defined and of v8Locale type) we return it. If not
+ * we create default locale and return it.
+ * @param {!Object} locale - user provided locale.
+ * @returns {Object} - v8Locale object.
+ * @private
+ */
+v8Locale.__createLocaleOrDefault = function(locale) {
+ if (!locale || !(locale instanceof v8Locale)) {
+ return new v8Locale();
+ } else {
+ return locale;
+ }
+};
diff --git a/deps/v8/src/extensions/experimental/language-matcher.cc b/deps/v8/src/extensions/experimental/language-matcher.cc
new file mode 100644
index 000000000..127e57178
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/language-matcher.cc
@@ -0,0 +1,252 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// TODO(cira): Remove LanguageMatcher from v8 when ICU implements
+// language matching API.
+
+#include "src/extensions/experimental/language-matcher.h"
+
+#include <string.h>
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "unicode/datefmt.h" // For getAvailableLocales
+#include "unicode/locid.h"
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+const unsigned int LanguageMatcher::kLanguageWeight = 75;
+const unsigned int LanguageMatcher::kScriptWeight = 20;
+const unsigned int LanguageMatcher::kRegionWeight = 5;
+const unsigned int LanguageMatcher::kThreshold = 50;
+const unsigned int LanguageMatcher::kPositionBonus = 1;
+const char* const LanguageMatcher::kDefaultLocale = "root";
+
+static const char* GetLanguageException(const char*);
+static bool BCP47ToICUFormat(const char*, char*);
+static int CompareLocaleSubtags(const char*, const char*);
+static bool BuildLocaleName(const char*, const char*, LocaleIDMatch*);
+
+LocaleIDMatch::LocaleIDMatch()
+ : score(-1) {
+ I18NUtils::StrNCopy(
+ bcp47_id, ULOC_FULLNAME_CAPACITY, LanguageMatcher::kDefaultLocale);
+
+ I18NUtils::StrNCopy(
+ icu_id, ULOC_FULLNAME_CAPACITY, LanguageMatcher::kDefaultLocale);
+}
+
+LocaleIDMatch& LocaleIDMatch::operator=(const LocaleIDMatch& rhs) {
+ I18NUtils::StrNCopy(this->bcp47_id, ULOC_FULLNAME_CAPACITY, rhs.bcp47_id);
+ I18NUtils::StrNCopy(this->icu_id, ULOC_FULLNAME_CAPACITY, rhs.icu_id);
+ this->score = rhs.score;
+
+ return *this;
+}
+
+// static
+void LanguageMatcher::GetBestMatchForPriorityList(
+ v8::Handle<v8::Array> locales, LocaleIDMatch* result) {
+ v8::HandleScope handle_scope;
+
+ unsigned int position_bonus = locales->Length() * kPositionBonus;
+
+ int max_score = 0;
+ LocaleIDMatch match;
+ for (unsigned int i = 0; i < locales->Length(); ++i) {
+ position_bonus -= kPositionBonus;
+
+ v8::TryCatch try_catch;
+ v8::Local<v8::Value> locale_id = locales->Get(v8::Integer::New(i));
+
+ // Return default if exception is raised when reading parameter.
+ if (try_catch.HasCaught()) break;
+
+ // JavaScript arrays can be heterogenous so check each item
+ // if it's a string.
+ if (!locale_id->IsString()) continue;
+
+ if (!CompareToSupportedLocaleIDList(locale_id->ToString(), &match)) {
+ continue;
+ }
+
+ // Skip items under threshold.
+ if (match.score < kThreshold) continue;
+
+ match.score += position_bonus;
+ if (match.score > max_score) {
+ *result = match;
+
+ max_score = match.score;
+ }
+ }
+}
+
+// static
+void LanguageMatcher::GetBestMatchForString(
+ v8::Handle<v8::String> locale, LocaleIDMatch* result) {
+ LocaleIDMatch match;
+
+ if (CompareToSupportedLocaleIDList(locale, &match) &&
+ match.score >= kThreshold) {
+ *result = match;
+ }
+}
+
+// static
+bool LanguageMatcher::CompareToSupportedLocaleIDList(
+ v8::Handle<v8::String> locale_id, LocaleIDMatch* result) {
+ static int32_t available_count = 0;
+ // Depending on how ICU data is built, locales returned by
+ // Locale::getAvailableLocale() are not guaranteed to support DateFormat,
+ // Collation and other services. We can call getAvailableLocale() of all the
+ // services we want to support and take the intersection of them all, but
+ // using DateFormat::getAvailableLocales() should suffice.
+ // TODO(cira): Maybe make this thread-safe?
+ static const icu::Locale* available_locales =
+ icu::DateFormat::getAvailableLocales(available_count);
+
+ // Skip this locale_id if it's not in ASCII.
+ static LocaleIDMatch default_match;
+ v8::String::AsciiValue ascii_value(locale_id);
+ if (*ascii_value == NULL) return false;
+
+ char locale[ULOC_FULLNAME_CAPACITY];
+ if (!BCP47ToICUFormat(*ascii_value, locale)) return false;
+
+ icu::Locale input_locale(locale);
+
+ // Position of the best match locale in list of available locales.
+ int position = -1;
+ const char* language = GetLanguageException(input_locale.getLanguage());
+ const char* script = input_locale.getScript();
+ const char* region = input_locale.getCountry();
+ for (int32_t i = 0; i < available_count; ++i) {
+ int current_score = 0;
+ int sign =
+ CompareLocaleSubtags(language, available_locales[i].getLanguage());
+ current_score += sign * kLanguageWeight;
+
+ sign = CompareLocaleSubtags(script, available_locales[i].getScript());
+ current_score += sign * kScriptWeight;
+
+ sign = CompareLocaleSubtags(region, available_locales[i].getCountry());
+ current_score += sign * kRegionWeight;
+
+ if (current_score >= kThreshold && current_score > result->score) {
+ result->score = current_score;
+ position = i;
+ }
+ }
+
+ // Didn't find any good matches so use defaults.
+ if (position == -1) return false;
+
+ return BuildLocaleName(available_locales[position].getBaseName(),
+ input_locale.getName(), result);
+}
+
+// For some unsupported language subtags it is better to fallback to related
+// language that is supported than to default.
+static const char* GetLanguageException(const char* language) {
+ // Serbo-croatian to Serbian.
+ if (!strcmp(language, "sh")) return "sr";
+
+ // Norweigan to Norweiaan to Norwegian Bokmal.
+ if (!strcmp(language, "no")) return "nb";
+
+ // Moldavian to Romanian.
+ if (!strcmp(language, "mo")) return "ro";
+
+ // Tagalog to Filipino.
+ if (!strcmp(language, "tl")) return "fil";
+
+ return language;
+}
+
+// Converts user input from BCP47 locale id format to ICU compatible format.
+// Returns false if uloc_forLanguageTag call fails or if extension is too long.
+static bool BCP47ToICUFormat(const char* locale_id, char* result) {
+ UErrorCode status = U_ZERO_ERROR;
+ int32_t locale_size = 0;
+
+ char locale[ULOC_FULLNAME_CAPACITY];
+ I18NUtils::StrNCopy(locale, ULOC_FULLNAME_CAPACITY, locale_id);
+
+ // uloc_forLanguageTag has a bug where long extension can crash the code.
+ // We need to check if extension part of language id conforms to the length.
+ // ICU bug: http://bugs.icu-project.org/trac/ticket/8519
+ const char* extension = strstr(locale_id, "-u-");
+ if (extension != NULL &&
+ strlen(extension) > ULOC_KEYWORD_AND_VALUES_CAPACITY) {
+ // Truncate to get non-crashing string, but still preserve base language.
+ int base_length = strlen(locale_id) - strlen(extension);
+ locale[base_length] = '\0';
+ }
+
+ uloc_forLanguageTag(locale, result, ULOC_FULLNAME_CAPACITY,
+ &locale_size, &status);
+ return !U_FAILURE(status);
+}
+
+// Compares locale id subtags.
+// Returns 1 for match or -1 for mismatch.
+static int CompareLocaleSubtags(const char* lsubtag, const char* rsubtag) {
+ return strcmp(lsubtag, rsubtag) == 0 ? 1 : -1;
+}
+
+// Builds a BCP47 compliant locale id from base name of matched locale and
+// full user specified locale.
+// Returns false if uloc_toLanguageTag failed to convert locale id.
+// Example:
+// base_name of matched locale (ICU ID): de_DE
+// input_locale_name (ICU ID): de_AT@collation=phonebk
+// result (ICU ID): de_DE@collation=phonebk
+// result (BCP47 ID): de-DE-u-co-phonebk
+static bool BuildLocaleName(const char* base_name,
+ const char* input_locale_name,
+ LocaleIDMatch* result) {
+ I18NUtils::StrNCopy(result->icu_id, ULOC_LANG_CAPACITY, base_name);
+
+ // Get extensions (if any) from the original locale.
+ const char* extension = strchr(input_locale_name, ULOC_KEYWORD_SEPARATOR);
+ if (extension != NULL) {
+ I18NUtils::StrNCopy(result->icu_id + strlen(base_name),
+ ULOC_KEYWORD_AND_VALUES_CAPACITY, extension);
+ } else {
+ I18NUtils::StrNCopy(result->icu_id, ULOC_LANG_CAPACITY, base_name);
+ }
+
+ // Convert ICU locale name into BCP47 format.
+ UErrorCode status = U_ZERO_ERROR;
+ uloc_toLanguageTag(result->icu_id, result->bcp47_id,
+ ULOC_FULLNAME_CAPACITY, false, &status);
+ return !U_FAILURE(status);
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/experimental/language-matcher.h b/deps/v8/src/extensions/experimental/language-matcher.h
new file mode 100644
index 000000000..dd2930458
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/language-matcher.h
@@ -0,0 +1,95 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
+
+#include "include/v8.h"
+
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+struct LocaleIDMatch {
+ LocaleIDMatch();
+
+ LocaleIDMatch& operator=(const LocaleIDMatch& rhs);
+
+ // Bcp47 locale id - "de-Latn-DE-u-co-phonebk".
+ char bcp47_id[ULOC_FULLNAME_CAPACITY];
+
+ // ICU locale id - "de_Latn_DE@collation=phonebk".
+ char icu_id[ULOC_FULLNAME_CAPACITY];
+
+ // Score for this locale.
+ int score;
+};
+
+class LanguageMatcher {
+ public:
+ // Default locale.
+ static const char* const kDefaultLocale;
+
+ // Finds best supported locale for a given a list of locale identifiers.
+ // It preserves the extension for the locale id.
+ static void GetBestMatchForPriorityList(
+ v8::Handle<v8::Array> locale_list, LocaleIDMatch* result);
+
+ // Finds best supported locale for a single locale identifier.
+ // It preserves the extension for the locale id.
+ static void GetBestMatchForString(
+ v8::Handle<v8::String> locale_id, LocaleIDMatch* result);
+
+ private:
+ // If langauge subtags match add this amount to the score.
+ static const unsigned int kLanguageWeight;
+
+ // If script subtags match add this amount to the score.
+ static const unsigned int kScriptWeight;
+
+ // If region subtags match add this amount to the score.
+ static const unsigned int kRegionWeight;
+
+ // LocaleID match score has to be over this number to accept the match.
+ static const unsigned int kThreshold;
+
+ // For breaking ties in priority queue.
+ static const unsigned int kPositionBonus;
+
+ LanguageMatcher();
+
+ // Compares locale_id to the supported list of locales and returns best
+ // match.
+ // Returns false if it fails to convert locale id from ICU to BCP47 format.
+ static bool CompareToSupportedLocaleIDList(v8::Handle<v8::String> locale_id,
+ LocaleIDMatch* result);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
diff --git a/deps/v8/src/extensions/experimental/number-format.cc b/deps/v8/src/extensions/experimental/number-format.cc
new file mode 100644
index 000000000..51e0b959c
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/number-format.cc
@@ -0,0 +1,356 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/number-format.h"
+
+#include <string.h>
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "unicode/dcfmtsym.h"
+#include "unicode/decimfmt.h"
+#include "unicode/locid.h"
+#include "unicode/numfmt.h"
+#include "unicode/uchar.h"
+#include "unicode/ucurr.h"
+
+namespace v8 {
+namespace internal {
+
+const int NumberFormat::kCurrencyCodeLength = 4;
+
+v8::Persistent<v8::FunctionTemplate> NumberFormat::number_format_template_;
+
+static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String>,
+ v8::Handle<v8::String>,
+ v8::Handle<v8::Object>);
+static icu::DecimalFormat* CreateFormatterFromSkeleton(
+ const icu::Locale&, const icu::UnicodeString&, UErrorCode*);
+static icu::DecimalFormatSymbols* GetFormatSymbols(const icu::Locale&);
+static bool GetCurrencyCode(const icu::Locale&,
+ const char* const,
+ v8::Handle<v8::Object>,
+ UChar*);
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
+
+icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
+ v8::Handle<v8::Object> obj) {
+ if (number_format_template_->HasInstance(obj)) {
+ return static_cast<icu::DecimalFormat*>(
+ obj->GetPointerFromInternalField(0));
+ }
+
+ return NULL;
+}
+
+void NumberFormat::DeleteNumberFormat(v8::Persistent<v8::Value> object,
+ void* param) {
+ v8::Persistent<v8::Object> persistent_object =
+ v8::Persistent<v8::Object>::Cast(object);
+
+ // First delete the hidden C++ object.
+ // Unpacking should never return NULL here. That would only happen if
+ // this method is used as the weak callback for persistent handles not
+ // pointing to a number formatter.
+ delete UnpackNumberFormat(persistent_object);
+
+ // Then dispose of the persistent handle to JS object.
+ persistent_object.Dispose();
+}
+
+v8::Handle<v8::Value> NumberFormat::Format(const v8::Arguments& args) {
+ v8::HandleScope handle_scope;
+
+ if (args.Length() != 1 || !args[0]->IsNumber()) {
+ // Just return NaN on invalid input.
+ return v8::String::New("NaN");
+ }
+
+ icu::DecimalFormat* number_format = UnpackNumberFormat(args.Holder());
+ if (!number_format) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ // ICU will handle actual NaN value properly and return NaN string.
+ icu::UnicodeString result;
+ number_format->format(args[0]->NumberValue(), result);
+
+ return v8::String::New(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
+}
+
+v8::Handle<v8::Value> NumberFormat::JSNumberFormat(const v8::Arguments& args) {
+ v8::HandleScope handle_scope;
+
+ // Expect locale id, region id and settings.
+ if (args.Length() != 3 ||
+ !args[0]->IsString() || !args[1]->IsString() || !args[2]->IsObject()) {
+ return v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Locale, region and number settings are required.")));
+ }
+
+ icu::DecimalFormat* number_format = CreateNumberFormat(
+ args[0]->ToString(), args[1]->ToString(), args[2]->ToObject());
+
+ if (number_format_template_.IsEmpty()) {
+ v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+
+ raw_template->SetClassName(v8::String::New("v8Locale.NumberFormat"));
+
+ // Define internal field count on instance template.
+ v8::Local<v8::ObjectTemplate> object_template =
+ raw_template->InstanceTemplate();
+
+ // Set aside internal field for icu number formatter.
+ object_template->SetInternalFieldCount(1);
+
+ // Define all of the prototype methods on prototype template.
+ v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+ proto->Set(v8::String::New("format"),
+ v8::FunctionTemplate::New(Format));
+
+ number_format_template_ =
+ v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+ }
+
+ // Create an empty object wrapper.
+ v8::Local<v8::Object> local_object =
+ number_format_template_->GetFunction()->NewInstance();
+ v8::Persistent<v8::Object> wrapper =
+ v8::Persistent<v8::Object>::New(local_object);
+
+ // Set number formatter as internal field of the resulting JS object.
+ wrapper->SetPointerInInternalField(0, number_format);
+
+ // Create options key.
+ v8::Local<v8::Object> options = v8::Object::New();
+
+ // Show what ICU decided to use for easier problem tracking.
+ // Keep it as v8 specific extension.
+ icu::UnicodeString pattern;
+ number_format->toPattern(pattern);
+ options->Set(v8::String::New("v8ResolvedPattern"),
+ v8::String::New(reinterpret_cast<const uint16_t*>(
+ pattern.getBuffer()), pattern.length()));
+
+ // Set resolved currency code in options.currency if not empty.
+ icu::UnicodeString currency(number_format->getCurrency());
+ if (!currency.isEmpty()) {
+ options->Set(v8::String::New("currencyCode"),
+ v8::String::New(reinterpret_cast<const uint16_t*>(
+ currency.getBuffer()), currency.length()));
+ }
+
+ wrapper->Set(v8::String::New("options"), options);
+
+ // Make object handle weak so we can delete iterator once GC kicks in.
+ wrapper.MakeWeak(NULL, DeleteNumberFormat);
+
+ return wrapper;
+}
+
+// Returns DecimalFormat.
+static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String> locale,
+ v8::Handle<v8::String> region,
+ v8::Handle<v8::Object> settings) {
+ v8::HandleScope handle_scope;
+
+ v8::String::AsciiValue ascii_locale(locale);
+ icu::Locale icu_locale(*ascii_locale);
+
+ // Make formatter from skeleton.
+ icu::DecimalFormat* number_format = NULL;
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString setting;
+
+ if (I18NUtils::ExtractStringSetting(settings, "skeleton", &setting)) {
+ // TODO(cira): Use ICU skeleton once
+ // http://bugs.icu-project.org/trac/ticket/8610 is resolved.
+ number_format = CreateFormatterFromSkeleton(icu_locale, setting, &status);
+ } else if (I18NUtils::ExtractStringSetting(settings, "pattern", &setting)) {
+ number_format =
+ new icu::DecimalFormat(setting, GetFormatSymbols(icu_locale), status);
+ } else if (I18NUtils::ExtractStringSetting(settings, "style", &setting)) {
+ if (setting == UNICODE_STRING_SIMPLE("currency")) {
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createCurrencyInstance(icu_locale, status));
+ } else if (setting == UNICODE_STRING_SIMPLE("percent")) {
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createPercentInstance(icu_locale, status));
+ } else if (setting == UNICODE_STRING_SIMPLE("scientific")) {
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createScientificInstance(icu_locale, status));
+ } else {
+ // Make it decimal in any other case.
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, status));
+ }
+ }
+
+ if (U_FAILURE(status)) {
+ delete number_format;
+ status = U_ZERO_ERROR;
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, status));
+ }
+
+ // Attach appropriate currency code to the formatter.
+ // It affects currency formatters only.
+ // Region is full language identifier in form 'und_' + region id.
+ v8::String::AsciiValue ascii_region(region);
+
+ UChar currency_code[NumberFormat::kCurrencyCodeLength];
+ if (GetCurrencyCode(icu_locale, *ascii_region, settings, currency_code)) {
+ number_format->setCurrency(currency_code, status);
+ }
+
+ return number_format;
+}
+
+// Generates ICU number format pattern from given skeleton.
+static icu::DecimalFormat* CreateFormatterFromSkeleton(
+ const icu::Locale& icu_locale,
+ const icu::UnicodeString& skeleton,
+ UErrorCode* status) {
+ icu::DecimalFormat skeleton_format(
+ skeleton, GetFormatSymbols(icu_locale), *status);
+
+ // Find out if skeleton contains currency or percent symbol and create
+ // proper instance to tweak.
+ icu::DecimalFormat* base_format = NULL;
+
+ // UChar representation of U+00A4 currency symbol.
+ const UChar currency_symbol = 0xA4u;
+
+ int32_t index = skeleton.indexOf(currency_symbol);
+ if (index != -1) {
+ // Find how many U+00A4 are there. There is at least one.
+ // Case of non-consecutive U+00A4 is taken care of in i18n.js.
+ int32_t end_index = skeleton.lastIndexOf(currency_symbol, index);
+
+ icu::NumberFormat::EStyles style;
+ switch (end_index - index) {
+ case 0:
+ style = icu::NumberFormat::kCurrencyStyle;
+ break;
+ case 1:
+ style = icu::NumberFormat::kIsoCurrencyStyle;
+ break;
+ default:
+ style = icu::NumberFormat::kPluralCurrencyStyle;
+ }
+
+ base_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, style, *status));
+ } else if (skeleton.indexOf('%') != -1) {
+ base_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createPercentInstance(icu_locale, *status));
+ } else {
+ // TODO(cira): Handle scientific skeleton.
+ base_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, *status));
+ }
+
+ if (U_FAILURE(*status)) {
+ delete base_format;
+ return NULL;
+ }
+
+ // Copy important information from skeleton to the new formatter.
+ // TODO(cira): copy rounding information from skeleton?
+ base_format->setGroupingUsed(skeleton_format.isGroupingUsed());
+
+ base_format->setMinimumIntegerDigits(
+ skeleton_format.getMinimumIntegerDigits());
+
+ base_format->setMinimumFractionDigits(
+ skeleton_format.getMinimumFractionDigits());
+
+ base_format->setMaximumFractionDigits(
+ skeleton_format.getMaximumFractionDigits());
+
+ return base_format;
+}
+
+// Gets decimal symbols for a locale.
+static icu::DecimalFormatSymbols* GetFormatSymbols(
+ const icu::Locale& icu_locale) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::DecimalFormatSymbols* symbols =
+ new icu::DecimalFormatSymbols(icu_locale, status);
+
+ if (U_FAILURE(status)) {
+ delete symbols;
+ // Use symbols from default locale.
+ symbols = new icu::DecimalFormatSymbols(status);
+ }
+
+ return symbols;
+}
+
+// Gets currency ISO 4217 3-letter code.
+// Check currencyCode setting first, then @currency=code and in the end
+// try to infer currency code from locale in the form 'und_' + region id.
+// Returns false in case of error.
+static bool GetCurrencyCode(const icu::Locale& icu_locale,
+ const char* const und_region_locale,
+ v8::Handle<v8::Object> settings,
+ UChar* code) {
+ UErrorCode status = U_ZERO_ERROR;
+
+ // If there is user specified currency code, use it.
+ icu::UnicodeString currency;
+ if (I18NUtils::ExtractStringSetting(settings, "currencyCode", &currency)) {
+ currency.extract(code, NumberFormat::kCurrencyCodeLength, status);
+ return true;
+ }
+
+ // If ICU locale has -cu- currency code use it.
+ char currency_code[NumberFormat::kCurrencyCodeLength];
+ int32_t length = icu_locale.getKeywordValue(
+ "currency", currency_code, NumberFormat::kCurrencyCodeLength, status);
+ if (length != 0) {
+ I18NUtils::AsciiToUChar(currency_code, length + 1,
+ code, NumberFormat::kCurrencyCodeLength);
+ return true;
+ }
+
+ // Otherwise infer currency code from the region id.
+ ucurr_forLocale(
+ und_region_locale, code, NumberFormat::kCurrencyCodeLength, &status);
+
+ return !!U_SUCCESS(status);
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+ // Returns undefined, and schedules an exception to be thrown.
+ return v8::ThrowException(v8::Exception::Error(
+ v8::String::New("NumberFormat method called on an object "
+ "that is not a NumberFormat.")));
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/extensions/experimental/number-format.h b/deps/v8/src/extensions/experimental/number-format.h
new file mode 100644
index 000000000..bcfaed6fc
--- /dev/null
+++ b/deps/v8/src/extensions/experimental/number-format.h
@@ -0,0 +1,71 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class DecimalFormat;
+}
+
+namespace v8 {
+namespace internal {
+
+class NumberFormat {
+ public:
+ // 3-letter ISO 4217 currency code plus \0.
+ static const int kCurrencyCodeLength;
+
+ static v8::Handle<v8::Value> JSNumberFormat(const v8::Arguments& args);
+
+ // Helper methods for various bindings.
+
+ // Unpacks date format object from corresponding JavaScript object.
+ static icu::DecimalFormat* UnpackNumberFormat(
+ v8::Handle<v8::Object> obj);
+
+ // Release memory we allocated for the NumberFormat once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteNumberFormat(v8::Persistent<v8::Value> object,
+ void* param);
+
+ // Formats number and returns corresponding string.
+ static v8::Handle<v8::Value> Format(const v8::Arguments& args);
+
+ private:
+ NumberFormat();
+
+ static v8::Persistent<v8::FunctionTemplate> number_format_template_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 8b4bdbd88..b3f83fe98 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -100,7 +100,7 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
data, string->length());
result = string->MakeExternal(resource);
if (result && !string->IsSymbol()) {
- i::ExternalStringTable::AddString(*string);
+ HEAP->external_string_table()->AddString(*string);
}
if (!result) delete resource;
} else {
@@ -110,7 +110,7 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
data, string->length());
result = string->MakeExternal(resource);
if (result && !string->IsSymbol()) {
- i::ExternalStringTable::AddString(*string);
+ HEAP->external_string_table()->AddString(*string);
}
if (!result) delete resource;
}
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 63daa05b5..3740c27aa 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -45,7 +45,7 @@ v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
if (args.Length() >= 1 && args[0]->IsBoolean()) {
compact = args[0]->BooleanValue();
}
- Heap::CollectAllGarbage(compact);
+ HEAP->CollectAllGarbage(compact);
return v8::Undefined();
}
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 96c757a37..fcf07092b 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -41,35 +41,53 @@ namespace internal {
Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
ASSERT(0 <= size);
- CALL_HEAP_FUNCTION(Heap::AllocateFixedArray(size, pretenure), FixedArray);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFixedArray(size, pretenure),
+ FixedArray);
}
Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
PretenureFlag pretenure) {
ASSERT(0 <= size);
- CALL_HEAP_FUNCTION(Heap::AllocateFixedArrayWithHoles(size, pretenure),
- FixedArray);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFixedArrayWithHoles(size, pretenure),
+ FixedArray);
+}
+
+
+Handle<FixedArray> Factory::NewFixedDoubleArray(int size,
+ PretenureFlag pretenure) {
+ ASSERT(0 <= size);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
+ FixedArray);
}
Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(StringDictionary::Allocate(at_least_space_for),
+ CALL_HEAP_FUNCTION(isolate(),
+ StringDictionary::Allocate(at_least_space_for),
StringDictionary);
}
Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(NumberDictionary::Allocate(at_least_space_for),
+ CALL_HEAP_FUNCTION(isolate(),
+ NumberDictionary::Allocate(at_least_space_for),
NumberDictionary);
}
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
ASSERT(0 <= number_of_descriptors);
- CALL_HEAP_FUNCTION(DescriptorArray::Allocate(number_of_descriptors),
+ CALL_HEAP_FUNCTION(isolate(),
+ DescriptorArray::Allocate(number_of_descriptors),
DescriptorArray);
}
@@ -78,7 +96,8 @@ Handle<DeoptimizationInputData> Factory::NewDeoptimizationInputData(
int deopt_entry_count,
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
- CALL_HEAP_FUNCTION(DeoptimizationInputData::Allocate(deopt_entry_count,
+ CALL_HEAP_FUNCTION(isolate(),
+ DeoptimizationInputData::Allocate(deopt_entry_count,
pretenure),
DeoptimizationInputData);
}
@@ -88,7 +107,8 @@ Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData(
int deopt_entry_count,
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
- CALL_HEAP_FUNCTION(DeoptimizationOutputData::Allocate(deopt_entry_count,
+ CALL_HEAP_FUNCTION(isolate(),
+ DeoptimizationOutputData::Allocate(deopt_entry_count,
pretenure),
DeoptimizationOutputData);
}
@@ -96,96 +116,178 @@ Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData(
// Symbols are created in the old generation (data space).
Handle<String> Factory::LookupSymbol(Vector<const char> string) {
- CALL_HEAP_FUNCTION(Heap::LookupSymbol(string), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->LookupSymbol(string),
+ String);
+}
+
+// Symbols are created in the old generation (data space).
+Handle<String> Factory::LookupSymbol(Handle<String> string) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->LookupSymbol(*string),
+ String);
}
Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) {
- CALL_HEAP_FUNCTION(Heap::LookupAsciiSymbol(string), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->LookupAsciiSymbol(string),
+ String);
}
+
+Handle<String> Factory::LookupAsciiSymbol(Handle<SeqAsciiString> string,
+ int from,
+ int length) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->LookupAsciiSymbol(string,
+ from,
+ length),
+ String);
+}
+
+
Handle<String> Factory::LookupTwoByteSymbol(Vector<const uc16> string) {
- CALL_HEAP_FUNCTION(Heap::LookupTwoByteSymbol(string), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->LookupTwoByteSymbol(string),
+ String);
}
Handle<String> Factory::NewStringFromAscii(Vector<const char> string,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateStringFromAscii(string, pretenure), String);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateStringFromAscii(string, pretenure),
+ String);
}
Handle<String> Factory::NewStringFromUtf8(Vector<const char> string,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateStringFromUtf8(string, pretenure), String);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateStringFromUtf8(string, pretenure),
+ String);
}
Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateStringFromTwoByte(string, pretenure),
- String);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateStringFromTwoByte(string, pretenure),
+ String);
}
-Handle<String> Factory::NewRawAsciiString(int length,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateRawAsciiString(length, pretenure), String);
+Handle<SeqAsciiString> Factory::NewRawAsciiString(int length,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateRawAsciiString(length, pretenure),
+ SeqAsciiString);
}
-Handle<String> Factory::NewRawTwoByteString(int length,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateRawTwoByteString(length, pretenure), String);
+Handle<SeqTwoByteString> Factory::NewRawTwoByteString(int length,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateRawTwoByteString(length, pretenure),
+ SeqTwoByteString);
}
Handle<String> Factory::NewConsString(Handle<String> first,
Handle<String> second) {
- CALL_HEAP_FUNCTION(Heap::AllocateConsString(*first, *second), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateConsString(*first, *second),
+ String);
}
Handle<String> Factory::NewSubString(Handle<String> str,
int begin,
int end) {
- CALL_HEAP_FUNCTION(str->SubString(begin, end), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ str->SubString(begin, end),
+ String);
+}
+
+
+Handle<String> Factory::NewProperSubString(Handle<String> str,
+ int begin,
+ int end) {
+ ASSERT(begin > 0 || end < str->length());
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateSubString(*str, begin, end),
+ String);
}
Handle<String> Factory::NewExternalStringFromAscii(
ExternalAsciiString::Resource* resource) {
- CALL_HEAP_FUNCTION(Heap::AllocateExternalStringFromAscii(resource), String);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateExternalStringFromAscii(resource),
+ String);
}
Handle<String> Factory::NewExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource) {
- CALL_HEAP_FUNCTION(Heap::AllocateExternalStringFromTwoByte(resource), String);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
+ String);
}
Handle<Context> Factory::NewGlobalContext() {
- CALL_HEAP_FUNCTION(Heap::AllocateGlobalContext(), Context);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateGlobalContext(),
+ Context);
}
Handle<Context> Factory::NewFunctionContext(int length,
- Handle<JSFunction> closure) {
- CALL_HEAP_FUNCTION(Heap::AllocateFunctionContext(length, *closure), Context);
+ Handle<JSFunction> function) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFunctionContext(length, *function),
+ Context);
+}
+
+
+Handle<Context> Factory::NewCatchContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<String> name,
+ Handle<Object> thrown_object) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateCatchContext(*function,
+ *previous,
+ *name,
+ *thrown_object),
+ Context);
}
-Handle<Context> Factory::NewWithContext(Handle<Context> previous,
- Handle<JSObject> extension,
- bool is_catch_context) {
- CALL_HEAP_FUNCTION(Heap::AllocateWithContext(*previous,
- *extension,
- is_catch_context),
- Context);
+Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<JSObject> extension) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateWithContext(*function, *previous, *extension),
+ Context);
}
Handle<Struct> Factory::NewStruct(InstanceType type) {
- CALL_HEAP_FUNCTION(Heap::AllocateStruct(type), Struct);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateStruct(type),
+ Struct);
}
@@ -200,63 +302,59 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() {
Handle<Script> Factory::NewScript(Handle<String> source) {
// Generate id for this script.
int id;
- if (Heap::last_script_id()->IsUndefined()) {
+ Heap* heap = isolate()->heap();
+ if (heap->last_script_id()->IsUndefined()) {
// Script ids start from one.
id = 1;
} else {
// Increment id, wrap when positive smi is exhausted.
- id = Smi::cast(Heap::last_script_id())->value();
+ id = Smi::cast(heap->last_script_id())->value();
id++;
if (!Smi::IsValid(id)) {
id = 0;
}
}
- Heap::SetLastScriptId(Smi::FromInt(id));
+ heap->SetLastScriptId(Smi::FromInt(id));
// Create and initialize script object.
- Handle<Proxy> wrapper = Factory::NewProxy(0, TENURED);
+ Handle<Foreign> wrapper = NewForeign(0, TENURED);
Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
script->set_source(*source);
- script->set_name(Heap::undefined_value());
- script->set_id(Heap::last_script_id());
+ script->set_name(heap->undefined_value());
+ script->set_id(heap->last_script_id());
script->set_line_offset(Smi::FromInt(0));
script->set_column_offset(Smi::FromInt(0));
- script->set_data(Heap::undefined_value());
- script->set_context_data(Heap::undefined_value());
+ script->set_data(heap->undefined_value());
+ script->set_context_data(heap->undefined_value());
script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
script->set_wrapper(*wrapper);
- script->set_line_ends(Heap::undefined_value());
- script->set_eval_from_shared(Heap::undefined_value());
+ script->set_line_ends(heap->undefined_value());
+ script->set_eval_from_shared(heap->undefined_value());
script->set_eval_from_instructions_offset(Smi::FromInt(0));
return script;
}
-Handle<Proxy> Factory::NewProxy(Address addr, PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateProxy(addr, pretenure), Proxy);
+Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateForeign(addr, pretenure),
+ Foreign);
}
-Handle<Proxy> Factory::NewProxy(const AccessorDescriptor* desc) {
- return NewProxy((Address) desc, TENURED);
+Handle<Foreign> Factory::NewForeign(const AccessorDescriptor* desc) {
+ return NewForeign((Address) desc, TENURED);
}
Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
ASSERT(0 <= length);
- CALL_HEAP_FUNCTION(Heap::AllocateByteArray(length, pretenure), ByteArray);
-}
-
-
-Handle<PixelArray> Factory::NewPixelArray(int length,
- uint8_t* external_pointer,
- PretenureFlag pretenure) {
- ASSERT(0 <= length);
- CALL_HEAP_FUNCTION(Heap::AllocatePixelArray(length,
- external_pointer,
- pretenure), PixelArray);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateByteArray(length, pretenure),
+ ByteArray);
}
@@ -265,32 +363,43 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
void* external_pointer,
PretenureFlag pretenure) {
ASSERT(0 <= length);
- CALL_HEAP_FUNCTION(Heap::AllocateExternalArray(length,
- array_type,
- external_pointer,
- pretenure), ExternalArray);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateExternalArray(length,
+ array_type,
+ external_pointer,
+ pretenure),
+ ExternalArray);
}
Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
Handle<Object> value) {
- CALL_HEAP_FUNCTION(Heap::AllocateJSGlobalPropertyCell(*value),
- JSGlobalPropertyCell);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSGlobalPropertyCell(*value),
+ JSGlobalPropertyCell);
}
Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
- CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateMap(type, instance_size),
+ Map);
}
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
- CALL_HEAP_FUNCTION(Heap::AllocateFunctionPrototype(*function), JSObject);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFunctionPrototype(*function),
+ JSObject);
}
Handle<Map> Factory::CopyMapDropDescriptors(Handle<Map> src) {
- CALL_HEAP_FUNCTION(src->CopyDropDescriptors(), Map);
+ CALL_HEAP_FUNCTION(isolate(), src->CopyDropDescriptors(), Map);
}
@@ -320,27 +429,33 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
- CALL_HEAP_FUNCTION(src->CopyDropTransitions(), Map);
+ CALL_HEAP_FUNCTION(isolate(), src->CopyDropTransitions(), Map);
}
Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(src->GetFastElementsMap(), Map);
+ CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map);
}
Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(src->GetSlowElementsMap(), Map);
+ CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map);
}
-Handle<Map> Factory::GetPixelArrayElementsMap(Handle<Map> src) {
- CALL_HEAP_FUNCTION(src->GetPixelArrayElementsMap(), Map);
+Handle<Map> Factory::GetExternalArrayElementsMap(
+ Handle<Map> src,
+ ExternalArrayType array_type,
+ bool safe_to_add_transition) {
+ CALL_HEAP_FUNCTION(isolate(),
+ src->GetExternalArrayElementsMap(array_type,
+ safe_to_add_transition),
+ Map);
}
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
- CALL_HEAP_FUNCTION(array->Copy(), FixedArray);
+ CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedArray);
}
@@ -348,10 +463,12 @@ Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Map> function_map,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map,
- *function_info,
- Heap::the_hole_value(),
- pretenure),
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFunction(*function_map,
+ *function_info,
+ isolate()->heap()->the_hole_value(),
+ pretenure),
JSFunction);
}
@@ -361,11 +478,15 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<Context> context,
PretenureFlag pretenure) {
Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
- function_info, Top::function_map(), pretenure);
+ function_info,
+ function_info->strict_mode()
+ ? isolate()->strict_mode_function_map()
+ : isolate()->function_map(),
+ pretenure);
+
result->set_context(*context);
int number_of_literals = function_info->num_literals();
- Handle<FixedArray> literals =
- Factory::NewFixedArray(number_of_literals, pretenure);
+ Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
if (number_of_literals > 0) {
// Store the object, regexp and array functions in the literals
// array prefix. These functions will be used when creating
@@ -374,7 +495,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
context->global_context());
}
result->set_literals(*literals);
- result->set_next_function_link(Heap::undefined_value());
+ result->set_next_function_link(isolate()->heap()->undefined_value());
if (V8::UseCrankshaft() &&
FLAG_always_opt &&
@@ -389,23 +510,32 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<Object> Factory::NewNumber(double value,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::NumberFromDouble(value, pretenure), Object);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->NumberFromDouble(value, pretenure), Object);
}
Handle<Object> Factory::NewNumberFromInt(int value) {
- CALL_HEAP_FUNCTION(Heap::NumberFromInt32(value), Object);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->NumberFromInt32(value), Object);
}
Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
- CALL_HEAP_FUNCTION(Heap::NumberFromUint32(value), Object);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->NumberFromUint32(value), Object);
}
Handle<JSObject> Factory::NewNeanderObject() {
- CALL_HEAP_FUNCTION(Heap::AllocateJSObjectFromMap(Heap::neander_map()),
- JSObject);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObjectFromMap(
+ isolate()->heap()->neander_map()),
+ JSObject);
}
@@ -455,11 +585,11 @@ Handle<Object> Factory::NewReferenceError(Handle<String> message) {
Handle<Object> Factory::NewError(const char* maker, const char* type,
Vector< Handle<Object> > args) {
v8::HandleScope scope; // Instantiate a closeable HandleScope for EscapeFrom.
- Handle<FixedArray> array = Factory::NewFixedArray(args.length());
+ Handle<FixedArray> array = NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
array->set(i, *args[i]);
}
- Handle<JSArray> object = Factory::NewJSArrayWithElements(array);
+ Handle<JSArray> object = NewJSArrayWithElements(array);
Handle<Object> result = NewError(maker, type, object);
return result.EscapeFrom(&scope);
}
@@ -480,15 +610,15 @@ Handle<Object> Factory::NewError(const char* type,
Handle<Object> Factory::NewError(const char* maker,
const char* type,
Handle<JSArray> args) {
- Handle<String> make_str = Factory::LookupAsciiSymbol(maker);
- Handle<Object> fun_obj(Top::builtins()->GetPropertyNoExceptionThrown(
- *make_str));
+ Handle<String> make_str = LookupAsciiSymbol(maker);
+ Handle<Object> fun_obj(
+ isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str));
// If the builtins haven't been properly configured yet this error
// constructor may not have been defined. Bail out.
if (!fun_obj->IsJSFunction())
- return Factory::undefined_value();
+ return undefined_value();
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
- Handle<Object> type_obj = Factory::LookupAsciiSymbol(type);
+ Handle<Object> type_obj = LookupAsciiSymbol(type);
Object** argv[2] = { type_obj.location(),
Handle<Object>::cast(args).location() };
@@ -496,10 +626,7 @@ Handle<Object> Factory::NewError(const char* maker,
// running the factory method, use the exception as the result.
bool caught_exception;
Handle<Object> result = Execution::TryCall(fun,
- Top::builtins(),
- 2,
- argv,
- &caught_exception);
+ isolate()->js_builtins_object(), 2, argv, &caught_exception);
return result;
}
@@ -511,21 +638,17 @@ Handle<Object> Factory::NewError(Handle<String> message) {
Handle<Object> Factory::NewError(const char* constructor,
Handle<String> message) {
- Handle<String> constr = Factory::LookupAsciiSymbol(constructor);
- Handle<JSFunction> fun =
- Handle<JSFunction>(
- JSFunction::cast(
- Top::builtins()->GetPropertyNoExceptionThrown(*constr)));
+ Handle<String> constr = LookupAsciiSymbol(constructor);
+ Handle<JSFunction> fun = Handle<JSFunction>(
+ JSFunction::cast(isolate()->js_builtins_object()->
+ GetPropertyNoExceptionThrown(*constr)));
Object** argv[1] = { Handle<Object>::cast(message).location() };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
bool caught_exception;
Handle<Object> result = Execution::TryCall(fun,
- Top::builtins(),
- 1,
- argv,
- &caught_exception);
+ isolate()->js_builtins_object(), 1, argv, &caught_exception);
return result;
}
@@ -586,15 +709,15 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
// property that refers to the function.
SetPrototypeProperty(function, prototype);
// Currently safe because it is only invoked from Genesis.
- SetLocalPropertyNoThrow(
- prototype, Factory::constructor_symbol(), function, DONT_ENUM);
+ SetLocalPropertyNoThrow(prototype, constructor_symbol(), function, DONT_ENUM);
return function;
}
Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> code) {
- Handle<JSFunction> function = NewFunctionWithoutPrototype(name);
+ Handle<JSFunction> function = NewFunctionWithoutPrototype(name,
+ kNonStrictMode);
function->shared()->set_code(*code);
function->set_code(*code);
ASSERT(!function->has_initial_map());
@@ -605,18 +728,26 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> Factory::NewCode(const CodeDesc& desc,
Code::Flags flags,
- Handle<Object> self_ref) {
- CALL_HEAP_FUNCTION(Heap::CreateCode(desc, flags, self_ref), Code);
+ Handle<Object> self_ref,
+ bool immovable) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CreateCode(
+ desc, flags, self_ref, immovable),
+ Code);
}
Handle<Code> Factory::CopyCode(Handle<Code> code) {
- CALL_HEAP_FUNCTION(Heap::CopyCode(*code), Code);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyCode(*code),
+ Code);
}
Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
- CALL_HEAP_FUNCTION(Heap::CopyCode(*code, reloc_info), Code);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyCode(*code, reloc_info),
+ Code);
}
@@ -632,18 +763,20 @@ MUST_USE_RESULT static inline MaybeObject* DoCopyInsert(
// Allocate the new array.
-Handle<DescriptorArray> Factory::CopyAppendProxyDescriptor(
+Handle<DescriptorArray> Factory::CopyAppendForeignDescriptor(
Handle<DescriptorArray> array,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(DoCopyInsert(*array, *key, *value, attributes),
+ CALL_HEAP_FUNCTION(isolate(),
+ DoCopyInsert(*array, *key, *value, attributes),
DescriptorArray);
}
Handle<String> Factory::SymbolFromString(Handle<String> value) {
- CALL_HEAP_FUNCTION(Heap::LookupSymbol(*value), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->LookupSymbol(*value), String);
}
@@ -708,40 +841,57 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(Heap::AllocateJSObject(*constructor, pretenure), JSObject);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObject(*constructor, pretenure), JSObject);
}
Handle<GlobalObject> Factory::NewGlobalObject(
Handle<JSFunction> constructor) {
- CALL_HEAP_FUNCTION(Heap::AllocateGlobalObject(*constructor),
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateGlobalObject(*constructor),
GlobalObject);
}
Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
- CALL_HEAP_FUNCTION(Heap::AllocateJSObjectFromMap(*map, NOT_TENURED),
- JSObject);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObjectFromMap(*map, NOT_TENURED),
+ JSObject);
}
-Handle<JSArray> Factory::NewJSArray(int length,
+Handle<JSArray> Factory::NewJSArray(int capacity,
PretenureFlag pretenure) {
- Handle<JSObject> obj = NewJSObject(Top::array_function(), pretenure);
- CALL_HEAP_FUNCTION(Handle<JSArray>::cast(obj)->Initialize(length), JSArray);
+ Handle<JSObject> obj = NewJSObject(isolate()->array_function(), pretenure);
+ CALL_HEAP_FUNCTION(isolate(),
+ Handle<JSArray>::cast(obj)->Initialize(capacity),
+ JSArray);
}
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
PretenureFlag pretenure) {
Handle<JSArray> result =
- Handle<JSArray>::cast(NewJSObject(Top::array_function(), pretenure));
+ Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
+ pretenure));
result->SetContent(*elements);
return result;
}
+Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
+ Handle<Object> prototype) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSProxy(*handler, *prototype),
+ JSProxy);
+}
+
+
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
@@ -770,24 +920,27 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
Handle<Object> script,
Handle<Object> stack_trace,
Handle<Object> stack_frames) {
- CALL_HEAP_FUNCTION(Heap::AllocateJSMessageObject(*type,
- *arguments,
- start_position,
- end_position,
- *script,
- *stack_trace,
- *stack_frames),
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateJSMessageObject(*type,
+ *arguments,
+ start_position,
+ end_position,
+ *script,
+ *stack_trace,
+ *stack_frames),
JSMessageObject);
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
- CALL_HEAP_FUNCTION(Heap::AllocateSharedFunctionInfo(*name),
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateSharedFunctionInfo(*name),
SharedFunctionInfo);
}
Handle<String> Factory::NumberToString(Handle<Object> number) {
- CALL_HEAP_FUNCTION(Heap::NumberToString(*number), String);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->NumberToString(*number), String);
}
@@ -795,54 +948,65 @@ Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
Handle<NumberDictionary> dictionary,
uint32_t key,
Handle<Object> value) {
- CALL_HEAP_FUNCTION(dictionary->AtNumberPut(key, *value), NumberDictionary);
+ CALL_HEAP_FUNCTION(isolate(),
+ dictionary->AtNumberPut(key, *value),
+ NumberDictionary);
}
Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name,
Handle<Object> prototype) {
Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
- CALL_HEAP_FUNCTION(Heap::AllocateFunction(*Top::function_map(),
- *function_share,
- *prototype),
- JSFunction);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFunction(*isolate()->function_map(),
+ *function_share,
+ *prototype),
+ JSFunction);
}
Handle<JSFunction> Factory::NewFunction(Handle<String> name,
Handle<Object> prototype) {
Handle<JSFunction> fun = NewFunctionHelper(name, prototype);
- fun->set_context(Top::context()->global_context());
+ fun->set_context(isolate()->context()->global_context());
return fun;
}
Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
- Handle<String> name) {
+ Handle<String> name,
+ StrictModeFlag strict_mode) {
Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
- CALL_HEAP_FUNCTION(Heap::AllocateFunction(
- *Top::function_without_prototype_map(),
+ Handle<Map> map = strict_mode == kStrictMode
+ ? isolate()->strict_mode_function_without_prototype_map()
+ : isolate()->function_without_prototype_map();
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateFunction(
+ *map,
*function_share,
*the_hole_value()),
JSFunction);
}
-Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name) {
- Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name);
- fun->set_context(Top::context()->global_context());
+Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
+ Handle<String> name,
+ StrictModeFlag strict_mode) {
+ Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name, strict_mode);
+ fun->set_context(isolate()->context()->global_context());
return fun;
}
Handle<Object> Factory::ToObject(Handle<Object> object) {
- CALL_HEAP_FUNCTION(object->ToObject(), Object);
+ CALL_HEAP_FUNCTION(isolate(), object->ToObject(), Object);
}
Handle<Object> Factory::ToObject(Handle<Object> object,
Handle<Context> global_context) {
- CALL_HEAP_FUNCTION(object->ToObject(*global_context), Object);
+ CALL_HEAP_FUNCTION(isolate(), object->ToObject(*global_context), Object);
}
@@ -859,13 +1023,13 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
// debug info object to avoid allocation while setting up the debug info
// object.
Handle<FixedArray> break_points(
- Factory::NewFixedArray(Debug::kEstimatedNofBreakPointsInFunction));
+ NewFixedArray(Debug::kEstimatedNofBreakPointsInFunction));
// Create and set up the debug info object. Debug info contains function, a
// copy of the original code, the executing code and initial fixed array for
// active break points.
Handle<DebugInfo> debug_info =
- Handle<DebugInfo>::cast(Factory::NewStruct(DEBUG_INFO_TYPE));
+ Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
debug_info->set_shared(*shared);
debug_info->set_original_code(*original_code);
debug_info->set_code(*code);
@@ -881,15 +1045,16 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee,
int length) {
- CALL_HEAP_FUNCTION(Heap::AllocateArgumentsObject(*callee, length), JSObject);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateArgumentsObject(*callee, length), JSObject);
}
Handle<JSFunction> Factory::CreateApiFunction(
Handle<FunctionTemplateInfo> obj, ApiInstanceType instance_type) {
- Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::HandleApiCall));
- Handle<Code> construct_stub =
- Handle<Code>(Builtins::builtin(Builtins::JSConstructStubApi));
+ Handle<Code> code = isolate()->builtins()->HandleApiCall();
+ Handle<Code> construct_stub = isolate()->builtins()->JSConstructStubApi();
int internal_field_count = 0;
if (!obj->instance_template()->IsUndefined()) {
@@ -921,11 +1086,11 @@ Handle<JSFunction> Factory::CreateApiFunction(
ASSERT(type != INVALID_TYPE);
Handle<JSFunction> result =
- Factory::NewFunction(Factory::empty_symbol(),
- type,
- instance_size,
- code,
- true);
+ NewFunction(Factory::empty_symbol(),
+ type,
+ instance_size,
+ code,
+ true);
// Set class name.
Handle<Object> class_name = Handle<Object>(obj->class_name());
if (class_name->IsString()) {
@@ -973,7 +1138,7 @@ Handle<JSFunction> Factory::CreateApiFunction(
while (true) {
Handle<Object> props = Handle<Object>(obj->property_accessors());
if (!props->IsUndefined()) {
- array = Factory::CopyAppendCallbackDescriptors(array, props);
+ array = CopyAppendCallbackDescriptors(array, props);
}
Handle<Object> parent = Handle<Object>(obj->parent_template());
if (parent->IsUndefined()) break;
@@ -989,7 +1154,8 @@ Handle<JSFunction> Factory::CreateApiFunction(
Handle<MapCache> Factory::NewMapCache(int at_least_space_for) {
- CALL_HEAP_FUNCTION(MapCache::Allocate(at_least_space_for), MapCache);
+ CALL_HEAP_FUNCTION(isolate(),
+ MapCache::Allocate(at_least_space_for), MapCache);
}
@@ -1009,7 +1175,8 @@ MUST_USE_RESULT static MaybeObject* UpdateMapCacheWith(Context* context,
Handle<MapCache> Factory::AddToMapCache(Handle<Context> context,
Handle<FixedArray> keys,
Handle<Map> map) {
- CALL_HEAP_FUNCTION(UpdateMapCacheWith(*context, *keys, *map), MapCache);
+ CALL_HEAP_FUNCTION(isolate(),
+ UpdateMapCacheWith(*context, *keys, *map), MapCache);
}
@@ -1058,8 +1225,8 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
store->set(JSRegExp::kSourceIndex, *source);
store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
- store->set(JSRegExp::kIrregexpASCIICodeIndex, Heap::the_hole_value());
- store->set(JSRegExp::kIrregexpUC16CodeIndex, Heap::the_hole_value());
+ store->set(JSRegExp::kIrregexpASCIICodeIndex, HEAP->the_hole_value());
+ store->set(JSRegExp::kIrregexpUC16CodeIndex, HEAP->the_hole_value());
store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
store->set(JSRegExp::kIrregexpCaptureCountIndex,
Smi::FromInt(capture_count));
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 7547f7c45..19f09a15c 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,6 +29,7 @@
#define V8_FACTORY_H_
#include "globals.h"
+#include "handles.h"
#include "heap.h"
namespace v8 {
@@ -36,34 +37,43 @@ namespace internal {
// Interface for handle based allocation.
-class Factory : public AllStatic {
+class Factory {
public:
- // Allocate a new fixed array with undefined entries.
- static Handle<FixedArray> NewFixedArray(
+ // Allocate a new uninitialized fixed array.
+ Handle<FixedArray> NewFixedArray(
int size,
PretenureFlag pretenure = NOT_TENURED);
// Allocate a new fixed array with non-existing entries (the hole).
- static Handle<FixedArray> NewFixedArrayWithHoles(
+ Handle<FixedArray> NewFixedArrayWithHoles(
int size,
PretenureFlag pretenure = NOT_TENURED);
- static Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
+ // Allocate a new uninitialized fixed double array.
+ Handle<FixedArray> NewFixedDoubleArray(
+ int size,
+ PretenureFlag pretenure = NOT_TENURED);
- static Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
+ Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
- static Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
- static Handle<DeoptimizationInputData> NewDeoptimizationInputData(
+ Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
+
+ Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
+ Handle<DeoptimizationInputData> NewDeoptimizationInputData(
int deopt_entry_count,
PretenureFlag pretenure);
- static Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
+ Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
int deopt_entry_count,
PretenureFlag pretenure);
- static Handle<String> LookupSymbol(Vector<const char> str);
- static Handle<String> LookupAsciiSymbol(Vector<const char> str);
- static Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
- static Handle<String> LookupAsciiSymbol(const char* str) {
+ Handle<String> LookupSymbol(Vector<const char> str);
+ Handle<String> LookupSymbol(Handle<String> str);
+ Handle<String> LookupAsciiSymbol(Vector<const char> str);
+ Handle<String> LookupAsciiSymbol(Handle<SeqAsciiString>,
+ int from,
+ int length);
+ Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
+ Handle<String> LookupAsciiSymbol(const char* str) {
return LookupSymbol(CStrVector(str));
}
@@ -90,236 +100,249 @@ class Factory : public AllStatic {
// two byte.
//
// ASCII strings are pretenured when used as keys in the SourceCodeCache.
- static Handle<String> NewStringFromAscii(
+ Handle<String> NewStringFromAscii(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
// UTF8 strings are pretenured when used for regexp literal patterns and
// flags in the parser.
- static Handle<String> NewStringFromUtf8(
+ Handle<String> NewStringFromUtf8(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- static Handle<String> NewStringFromTwoByte(
+ Handle<String> NewStringFromTwoByte(
Vector<const uc16> str,
PretenureFlag pretenure = NOT_TENURED);
// Allocates and partially initializes an ASCII or TwoByte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
- static Handle<String> NewRawAsciiString(
+ Handle<SeqAsciiString> NewRawAsciiString(
int length,
PretenureFlag pretenure = NOT_TENURED);
- static Handle<String> NewRawTwoByteString(
+ Handle<SeqTwoByteString> NewRawTwoByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
// Create a new cons string object which consists of a pair of strings.
- static Handle<String> NewConsString(Handle<String> first,
- Handle<String> second);
+ Handle<String> NewConsString(Handle<String> first,
+ Handle<String> second);
// Create a new string object which holds a substring of a string.
- static Handle<String> NewSubString(Handle<String> str,
- int begin,
- int end);
+ Handle<String> NewSubString(Handle<String> str,
+ int begin,
+ int end);
+
+ // Create a new string object which holds a proper substring of a string.
+ Handle<String> NewProperSubString(Handle<String> str,
+ int begin,
+ int end);
// Creates a new external String object. There are two String encodings
// in the system: ASCII and two byte. Unlike other String types, it does
// not make sense to have a UTF-8 factory function for external strings,
// because we cannot change the underlying buffer.
- static Handle<String> NewExternalStringFromAscii(
+ Handle<String> NewExternalStringFromAscii(
ExternalAsciiString::Resource* resource);
- static Handle<String> NewExternalStringFromTwoByte(
+ Handle<String> NewExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource);
// Create a global (but otherwise uninitialized) context.
- static Handle<Context> NewGlobalContext();
+ Handle<Context> NewGlobalContext();
// Create a function context.
- static Handle<Context> NewFunctionContext(int length,
- Handle<JSFunction> closure);
+ Handle<Context> NewFunctionContext(int length,
+ Handle<JSFunction> function);
+
+ // Create a catch context.
+ Handle<Context> NewCatchContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<String> name,
+ Handle<Object> thrown_object);
// Create a 'with' context.
- static Handle<Context> NewWithContext(Handle<Context> previous,
- Handle<JSObject> extension,
- bool is_catch_context);
+ Handle<Context> NewWithContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<JSObject> extension);
// Return the Symbol matching the passed in string.
- static Handle<String> SymbolFromString(Handle<String> value);
+ Handle<String> SymbolFromString(Handle<String> value);
// Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation).
- static Handle<Struct> NewStruct(InstanceType type);
+ Handle<Struct> NewStruct(InstanceType type);
- static Handle<AccessorInfo> NewAccessorInfo();
+ Handle<AccessorInfo> NewAccessorInfo();
- static Handle<Script> NewScript(Handle<String> source);
+ Handle<Script> NewScript(Handle<String> source);
- // Proxies are pretenured when allocated by the bootstrapper.
- static Handle<Proxy> NewProxy(Address addr,
- PretenureFlag pretenure = NOT_TENURED);
+ // Foreign objects are pretenured when allocated by the bootstrapper.
+ Handle<Foreign> NewForeign(Address addr,
+ PretenureFlag pretenure = NOT_TENURED);
- // Allocate a new proxy. The proxy is pretenured (allocated directly in
- // the old generation).
- static Handle<Proxy> NewProxy(const AccessorDescriptor* proxy);
+ // Allocate a new foreign object. The foreign is pretenured (allocated
+ // directly in the old generation).
+ Handle<Foreign> NewForeign(const AccessorDescriptor* foreign);
- static Handle<ByteArray> NewByteArray(int length,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<ByteArray> NewByteArray(int length,
+ PretenureFlag pretenure = NOT_TENURED);
- static Handle<PixelArray> NewPixelArray(
- int length,
- uint8_t* external_pointer,
- PretenureFlag pretenure = NOT_TENURED);
-
- static Handle<ExternalArray> NewExternalArray(
+ Handle<ExternalArray> NewExternalArray(
int length,
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure = NOT_TENURED);
- static Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
+ Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value);
- static Handle<Map> NewMap(InstanceType type, int instance_size);
+ Handle<Map> NewMap(InstanceType type, int instance_size);
- static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
+ Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
- static Handle<Map> CopyMapDropDescriptors(Handle<Map> map);
+ Handle<Map> CopyMapDropDescriptors(Handle<Map> map);
// Copy the map adding more inobject properties if possible without
// overflowing the instance size.
- static Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
+ Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
- static Handle<Map> CopyMapDropTransitions(Handle<Map> map);
+ Handle<Map> CopyMapDropTransitions(Handle<Map> map);
- static Handle<Map> GetFastElementsMap(Handle<Map> map);
+ Handle<Map> GetFastElementsMap(Handle<Map> map);
- static Handle<Map> GetSlowElementsMap(Handle<Map> map);
+ Handle<Map> GetSlowElementsMap(Handle<Map> map);
- static Handle<Map> GetPixelArrayElementsMap(Handle<Map> map);
+ Handle<Map> GetExternalArrayElementsMap(Handle<Map> map,
+ ExternalArrayType array_type,
+ bool safe_to_add_transition);
- static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
+ Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
// Numbers (eg, literals) are pretenured by the parser.
- static Handle<Object> NewNumber(double value,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<Object> NewNumber(double value,
+ PretenureFlag pretenure = NOT_TENURED);
- static Handle<Object> NewNumberFromInt(int value);
- static Handle<Object> NewNumberFromUint(uint32_t value);
+ Handle<Object> NewNumberFromInt(int value);
+ Handle<Object> NewNumberFromUint(uint32_t value);
// These objects are used by the api to create env-independent data
// structures in the heap.
- static Handle<JSObject> NewNeanderObject();
+ Handle<JSObject> NewNeanderObject();
- static Handle<JSObject> NewArgumentsObject(Handle<Object> callee, int length);
+ Handle<JSObject> NewArgumentsObject(Handle<Object> callee, int length);
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
- static Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
+ PretenureFlag pretenure = NOT_TENURED);
// Global objects are pretenured.
- static Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
+ Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
- static Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
+ Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
// JS arrays are pretenured when allocated by the parser.
- static Handle<JSArray> NewJSArray(int init_length,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArray> NewJSArray(int capacity,
+ PretenureFlag pretenure = NOT_TENURED);
- static Handle<JSArray> NewJSArrayWithElements(
+ Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArray> elements,
PretenureFlag pretenure = NOT_TENURED);
- static Handle<JSFunction> NewFunction(Handle<String> name,
- Handle<Object> prototype);
+ Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
+
+ Handle<JSFunction> NewFunction(Handle<String> name,
+ Handle<Object> prototype);
- static Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name);
+ Handle<JSFunction> NewFunctionWithoutPrototype(
+ Handle<String> name,
+ StrictModeFlag strict_mode);
- static Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
+ Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
- static Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo(
+ Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Map> function_map,
PretenureFlag pretenure);
- static Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+ Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Context> context,
PretenureFlag pretenure = TENURED);
- static Handle<Code> NewCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference);
+ Handle<Code> NewCode(const CodeDesc& desc,
+ Code::Flags flags,
+ Handle<Object> self_reference,
+ bool immovable = false);
- static Handle<Code> CopyCode(Handle<Code> code);
+ Handle<Code> CopyCode(Handle<Code> code);
- static Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
+ Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
- static Handle<Object> ToObject(Handle<Object> object);
- static Handle<Object> ToObject(Handle<Object> object,
- Handle<Context> global_context);
+ Handle<Object> ToObject(Handle<Object> object);
+ Handle<Object> ToObject(Handle<Object> object,
+ Handle<Context> global_context);
// Interface for creating error objects.
- static Handle<Object> NewError(const char* maker, const char* type,
- Handle<JSArray> args);
- static Handle<Object> NewError(const char* maker, const char* type,
- Vector< Handle<Object> > args);
- static Handle<Object> NewError(const char* type,
- Vector< Handle<Object> > args);
- static Handle<Object> NewError(Handle<String> message);
- static Handle<Object> NewError(const char* constructor,
- Handle<String> message);
+ Handle<Object> NewError(const char* maker, const char* type,
+ Handle<JSArray> args);
+ Handle<Object> NewError(const char* maker, const char* type,
+ Vector< Handle<Object> > args);
+ Handle<Object> NewError(const char* type,
+ Vector< Handle<Object> > args);
+ Handle<Object> NewError(Handle<String> message);
+ Handle<Object> NewError(const char* constructor,
+ Handle<String> message);
- static Handle<Object> NewTypeError(const char* type,
- Vector< Handle<Object> > args);
- static Handle<Object> NewTypeError(Handle<String> message);
+ Handle<Object> NewTypeError(const char* type,
+ Vector< Handle<Object> > args);
+ Handle<Object> NewTypeError(Handle<String> message);
- static Handle<Object> NewRangeError(const char* type,
- Vector< Handle<Object> > args);
- static Handle<Object> NewRangeError(Handle<String> message);
+ Handle<Object> NewRangeError(const char* type,
+ Vector< Handle<Object> > args);
+ Handle<Object> NewRangeError(Handle<String> message);
- static Handle<Object> NewSyntaxError(const char* type, Handle<JSArray> args);
- static Handle<Object> NewSyntaxError(Handle<String> message);
+ Handle<Object> NewSyntaxError(const char* type, Handle<JSArray> args);
+ Handle<Object> NewSyntaxError(Handle<String> message);
- static Handle<Object> NewReferenceError(const char* type,
- Vector< Handle<Object> > args);
- static Handle<Object> NewReferenceError(Handle<String> message);
+ Handle<Object> NewReferenceError(const char* type,
+ Vector< Handle<Object> > args);
+ Handle<Object> NewReferenceError(Handle<String> message);
- static Handle<Object> NewEvalError(const char* type,
- Vector< Handle<Object> > args);
+ Handle<Object> NewEvalError(const char* type,
+ Vector< Handle<Object> > args);
- static Handle<JSFunction> NewFunction(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<Code> code,
- bool force_initial_map);
+ Handle<JSFunction> NewFunction(Handle<String> name,
+ InstanceType type,
+ int instance_size,
+ Handle<Code> code,
+ bool force_initial_map);
- static Handle<JSFunction> NewFunction(Handle<Map> function_map,
+ Handle<JSFunction> NewFunction(Handle<Map> function_map,
Handle<SharedFunctionInfo> shared, Handle<Object> prototype);
- static Handle<JSFunction> NewFunctionWithPrototype(Handle<String> name,
- InstanceType type,
- int instance_size,
- Handle<JSObject> prototype,
- Handle<Code> code,
- bool force_initial_map);
+ Handle<JSFunction> NewFunctionWithPrototype(Handle<String> name,
+ InstanceType type,
+ int instance_size,
+ Handle<JSObject> prototype,
+ Handle<Code> code,
+ bool force_initial_map);
- static Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
- Handle<Code> code);
+ Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
+ Handle<Code> code);
- static Handle<DescriptorArray> CopyAppendProxyDescriptor(
+ Handle<DescriptorArray> CopyAppendForeignDescriptor(
Handle<DescriptorArray> array,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes);
- static Handle<String> NumberToString(Handle<Object> number);
+ Handle<String> NumberToString(Handle<Object> number);
enum ApiInstanceType {
JavaScriptObject,
@@ -327,47 +350,47 @@ class Factory : public AllStatic {
OuterGlobalObject
};
- static Handle<JSFunction> CreateApiFunction(
+ Handle<JSFunction> CreateApiFunction(
Handle<FunctionTemplateInfo> data,
ApiInstanceType type = JavaScriptObject);
- static Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
+ Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
// Installs interceptors on the instance. 'desc' is a function template,
// and instance is an object instance created by the function of this
// function template.
- static void ConfigureInstance(Handle<FunctionTemplateInfo> desc,
- Handle<JSObject> instance,
- bool* pending_exception);
+ void ConfigureInstance(Handle<FunctionTemplateInfo> desc,
+ Handle<JSObject> instance,
+ bool* pending_exception);
#define ROOT_ACCESSOR(type, name, camel_name) \
- static inline Handle<type> name() { \
+ inline Handle<type> name() { \
return Handle<type>(BitCast<type**>( \
- &Heap::roots_[Heap::k##camel_name##RootIndex])); \
+ &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR_ACCESSOR
-#define SYMBOL_ACCESSOR(name, str) \
- static inline Handle<String> name() { \
+#define SYMBOL_ACCESSOR(name, str) \
+ inline Handle<String> name() { \
return Handle<String>(BitCast<String**>( \
- &Heap::roots_[Heap::k##name##RootIndex])); \
+ &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
}
SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
- static Handle<String> hidden_symbol() {
- return Handle<String>(&Heap::hidden_symbol_);
+ Handle<String> hidden_symbol() {
+ return Handle<String>(&isolate()->heap()->hidden_symbol_);
}
- static Handle<SharedFunctionInfo> NewSharedFunctionInfo(
+ Handle<SharedFunctionInfo> NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
Handle<Code> code,
Handle<SerializedScopeInfo> scope_info);
- static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
+ Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
- static Handle<JSMessageObject> NewJSMessageObject(
+ Handle<JSMessageObject> NewJSMessageObject(
Handle<String> type,
Handle<JSArray> arguments,
int start_position,
@@ -376,54 +399,57 @@ class Factory : public AllStatic {
Handle<Object> stack_trace,
Handle<Object> stack_frames);
- static Handle<NumberDictionary> DictionaryAtNumberPut(
+ Handle<NumberDictionary> DictionaryAtNumberPut(
Handle<NumberDictionary>,
uint32_t key,
Handle<Object> value);
#ifdef ENABLE_DEBUGGER_SUPPORT
- static Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
+ Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
#endif
// Return a map using the map cache in the global context.
// The key the an ordered set of property names.
- static Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
- Handle<FixedArray> keys);
+ Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
+ Handle<FixedArray> keys);
// Creates a new FixedArray that holds the data associated with the
// atom regexp and stores it in the regexp.
- static void SetRegExpAtomData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- Handle<Object> match_pattern);
+ void SetRegExpAtomData(Handle<JSRegExp> regexp,
+ JSRegExp::Type type,
+ Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<Object> match_pattern);
// Creates a new FixedArray that holds the data associated with the
// irregexp regexp and stores it in the regexp.
- static void SetRegExpIrregexpData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- int capture_count);
+ void SetRegExpIrregexpData(Handle<JSRegExp> regexp,
+ JSRegExp::Type type,
+ Handle<String> source,
+ JSRegExp::Flags flags,
+ int capture_count);
private:
- static Handle<JSFunction> NewFunctionHelper(Handle<String> name,
- Handle<Object> prototype);
+ Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
+
+ Handle<JSFunction> NewFunctionHelper(Handle<String> name,
+ Handle<Object> prototype);
- static Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
- Handle<String> name);
+ Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
+ Handle<String> name,
+ StrictModeFlag strict_mode);
- static Handle<DescriptorArray> CopyAppendCallbackDescriptors(
+ Handle<DescriptorArray> CopyAppendCallbackDescriptors(
Handle<DescriptorArray> array,
Handle<Object> descriptors);
// Create a new map cache.
- static Handle<MapCache> NewMapCache(int at_least_space_for);
+ Handle<MapCache> NewMapCache(int at_least_space_for);
// Update the map cache in the global context with (keys, map)
- static Handle<MapCache> AddToMapCache(Handle<Context> context,
- Handle<FixedArray> keys,
- Handle<Map> map);
+ Handle<MapCache> AddToMapCache(Handle<Context> context,
+ Handle<FixedArray> keys,
+ Handle<Map> map);
};
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index ea245a44d..e24271939 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -96,11 +96,17 @@ private:
//
#define FLAG FLAG_FULL
+// Flags for experimental language features.
+DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
+
+// Flags for experimental implementation features.
+DEFINE_bool(unbox_double_arrays, false, "automatically unbox arrays of doubles")
+
// Flags for Crankshaft.
-#ifdef V8_TARGET_ARCH_IA32
-DEFINE_bool(crankshaft, true, "use crankshaft")
+#ifdef V8_TARGET_ARCH_MIPS
+ DEFINE_bool(crankshaft, false, "use crankshaft")
#else
-DEFINE_bool(crankshaft, false, "use crankshaft")
+ DEFINE_bool(crankshaft, true, "use crankshaft")
#endif
DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
@@ -115,7 +121,7 @@ DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
DEFINE_bool(eliminate_empty_blocks, true, "eliminate empty blocks")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
-DEFINE_bool(time_hydrogen, false, "timing for hydrogen")
+DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
DEFINE_bool(trace_alloc, false, "trace register allocator")
@@ -128,12 +134,9 @@ DEFINE_bool(stress_environments, false, "environment for every instruction")
DEFINE_int(deopt_every_n_times,
0,
"deoptimize every n times a deopt point is passed")
-DEFINE_bool(process_arguments_object, true, "try to deal with arguments object")
DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
-DEFINE_bool(aggressive_loop_invariant_motion, true,
- "aggressive motion of instructions out of loops")
DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
@@ -144,11 +147,8 @@ DEFINE_bool(optimize_closures, true, "optimize closures")
DEFINE_bool(debug_code, false,
"generate extra code (assertions) for debugging")
DEFINE_bool(code_comments, false, "emit comments in code disassembly")
-DEFINE_bool(emit_branch_hints, false, "emit branch hints")
DEFINE_bool(peephole_optimization, true,
"perform peephole optimizations in assembly code")
-DEFINE_bool(print_peephole_optimization, false,
- "print peephole optimizations in assembly code")
DEFINE_bool(enable_sse2, true,
"enable use of SSE2 instructions if available")
DEFINE_bool(enable_sse3, true,
@@ -162,9 +162,12 @@ DEFINE_bool(enable_rdtsc, true,
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true,
- "enable use of VFP3 instructions if available (ARM only)")
+ "enable use of VFP3 instructions if available - this implies "
+ "enabling ARMv7 instructions (ARM only)")
DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)")
+DEFINE_bool(enable_fpu, true,
+ "enable use of MIPS FPU instructions if available (MIPS only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
@@ -184,7 +187,6 @@ DEFINE_bool(stack_trace_on_abort, true,
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace, false, "trace function calls")
-DEFINE_bool(defer_negation, true, "defer negation operation")
DEFINE_bool(mask_constants_with_cookie,
true,
"use random jit cookie to mask large constants")
@@ -197,7 +199,6 @@ DEFINE_bool(opt, true, "use adaptive optimizations")
DEFINE_bool(opt_eagerly, false, "be more eager when adaptively optimizing")
DEFINE_bool(always_opt, false, "always try to optimize functions")
DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
-DEFINE_bool(debug_info, true, "add debug information to compiled functions")
DEFINE_bool(deopt, true, "support deoptimization")
DEFINE_bool(trace_deopt, false, "trace deoptimization")
@@ -205,23 +206,17 @@ DEFINE_bool(trace_deopt, false, "trace deoptimization")
DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024,
"minimum length for automatic enable preparsing")
-DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code")
DEFINE_bool(always_full_compiler, false,
"try to use the dedicated run-once backend for all code")
DEFINE_bool(trace_bailout, false,
"print reasons for falling back to using the classic V8 backend")
-DEFINE_bool(safe_int32_compiler, true,
- "enable optimized side-effect-free int32 expressions.")
-DEFINE_bool(use_flow_graph, false, "perform flow-graph based optimizations")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
-// data-flow.cc
-DEFINE_bool(loop_peeling, false, "Peel off the first iteration of loops.")
+DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
// debug.cc
-DEFINE_bool(remote_debugging, false, "enable remote debugging")
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
@@ -280,10 +275,9 @@ DEFINE_bool(native_code_counters, false,
DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only")
-DEFINE_bool(cleanup_ics_at_gc, true,
- "Flush inline caches prior to mark compact collection.")
-DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
- "Flush code caches in maps during mark compact cycle.")
+DEFINE_bool(cleanup_code_caches_at_gc, true,
+ "Flush inline caches prior to mark compact collection and "
+ "flush code caches in maps during mark compact cycle.")
DEFINE_int(random_seed, 0,
"Default seed for initializing random generator "
"(0, the default, means to use system random).")
@@ -310,9 +304,6 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(strict_mode, true, "allow strict mode directives")
-// rewriter.cc
-DEFINE_bool(optimize_ast, true, "optimize the ast")
-
// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
@@ -320,7 +311,7 @@ DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
-// top.cc
+// isolate.cc
DEFINE_bool(trace_exception, false,
"print stack trace when throwing exceptions")
DEFINE_bool(preallocate_message_memory, false,
@@ -331,7 +322,6 @@ DEFINE_bool(preemption, false,
"activate a 100ms timer that switches between V8 threads")
// Regexp
-DEFINE_bool(trace_regexps, false, "trace regexp execution")
DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
@@ -408,16 +398,11 @@ DEFINE_bool(print_builtin_ast, false, "print source AST for builtins")
DEFINE_bool(print_json_ast, false, "print source AST as JSON")
DEFINE_bool(print_builtin_json_ast, false,
"print source AST for builtins as JSON")
-DEFINE_bool(trace_calls, false, "trace calls")
-DEFINE_bool(trace_builtin_calls, false, "trace builtins calls")
DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
DEFINE_bool(print_scopes, false, "print scopes")
-DEFINE_bool(print_ir, false, "print the AST as seen by the backend")
-DEFINE_bool(print_graph_text, false,
- "print a text representation of the flow graph")
// contexts.cc
DEFINE_bool(trace_contexts, false, "trace contexts operations")
@@ -451,6 +436,8 @@ DEFINE_bool(collect_heap_spill_statistics, false,
"report heap spill statistics along with heap_stats "
"(requires heap_stats)")
+DEFINE_bool(trace_isolates, false, "trace isolate state changes")
+
// VM state
DEFINE_bool(log_state_changes, false, "Log state changes.")
@@ -487,7 +474,6 @@ DEFINE_bool(log_handles, false, "Log global handle events.")
DEFINE_bool(log_snapshot_positions, false,
"log positions of (de)serialized objects in the snapshot.")
DEFINE_bool(log_suspect, false, "Log suspect operations.")
-DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).")
DEFINE_bool(prof_auto, true,
diff --git a/deps/v8/src/frame-element.h b/deps/v8/src/frame-element.h
deleted file mode 100644
index ae5d6a1bf..000000000
--- a/deps/v8/src/frame-element.h
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FRAME_ELEMENT_H_
-#define V8_FRAME_ELEMENT_H_
-
-#include "type-info.h"
-#include "macro-assembler.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frame elements
-//
-// The internal elements of the virtual frames. There are several kinds of
-// elements:
-// * Invalid: elements that are uninitialized or not actually part
-// of the virtual frame. They should not be read.
-// * Memory: an element that resides in the actual frame. Its address is
-// given by its position in the virtual frame.
-// * Register: an element that resides in a register.
-// * Constant: an element whose value is known at compile time.
-
-class FrameElement BASE_EMBEDDED {
- public:
- enum SyncFlag {
- NOT_SYNCED,
- SYNCED
- };
-
- inline TypeInfo type_info() {
- // Copied elements do not have type info. Instead
- // we have to inspect their backing element in the frame.
- ASSERT(!is_copy());
- return TypeInfo::FromInt(TypeInfoField::decode(value_));
- }
-
- inline void set_type_info(TypeInfo info) {
- // Copied elements do not have type info. Instead
- // we have to inspect their backing element in the frame.
- ASSERT(!is_copy());
- value_ = value_ & ~TypeInfoField::mask();
- value_ = value_ | TypeInfoField::encode(info.ToInt());
- }
-
- // The default constructor creates an invalid frame element.
- FrameElement() {
- value_ = TypeField::encode(INVALID)
- | CopiedField::encode(false)
- | SyncedField::encode(false)
- | TypeInfoField::encode(TypeInfo::Uninitialized().ToInt())
- | DataField::encode(0);
- }
-
- // Factory function to construct an invalid frame element.
- static FrameElement InvalidElement() {
- FrameElement result;
- return result;
- }
-
- // Factory function to construct an in-memory frame element.
- static FrameElement MemoryElement(TypeInfo info) {
- FrameElement result(MEMORY, no_reg, SYNCED, info);
- return result;
- }
-
- // Factory function to construct an in-register frame element.
- static FrameElement RegisterElement(Register reg,
- SyncFlag is_synced,
- TypeInfo info) {
- return FrameElement(REGISTER, reg, is_synced, info);
- }
-
- // Factory function to construct a frame element whose value is known at
- // compile time.
- static FrameElement ConstantElement(Handle<Object> value,
- SyncFlag is_synced) {
- TypeInfo info = TypeInfo::TypeFromValue(value);
- FrameElement result(value, is_synced, info);
- return result;
- }
-
- // Static indirection table for handles to constants. If a frame
- // element represents a constant, the data contains an index into
- // this table of handles to the actual constants.
- typedef ZoneList<Handle<Object> > ZoneObjectList;
-
- static ZoneObjectList* ConstantList();
-
- static bool ConstantPoolOverflowed() {
- return !DataField::is_valid(ConstantList()->length());
- }
-
- // Clear the constants indirection table.
- static void ClearConstantList() {
- ConstantList()->Clear();
- }
-
- bool is_synced() const { return SyncedField::decode(value_); }
-
- void set_sync() {
- ASSERT(type() != MEMORY);
- value_ = value_ | SyncedField::encode(true);
- }
-
- void clear_sync() {
- ASSERT(type() != MEMORY);
- value_ = value_ & ~SyncedField::mask();
- }
-
- bool is_valid() const { return type() != INVALID; }
- bool is_memory() const { return type() == MEMORY; }
- bool is_register() const { return type() == REGISTER; }
- bool is_constant() const { return type() == CONSTANT; }
- bool is_copy() const { return type() == COPY; }
-
- bool is_copied() const { return CopiedField::decode(value_); }
- void set_copied() { value_ = value_ | CopiedField::encode(true); }
- void clear_copied() { value_ = value_ & ~CopiedField::mask(); }
-
- // An untagged int32 FrameElement represents a signed int32
- // on the stack. These are only allowed in a side-effect-free
- // int32 calculation, and if a non-int32 input shows up or an overflow
- // occurs, we bail out and drop all the int32 values.
- void set_untagged_int32(bool value) {
- value_ &= ~UntaggedInt32Field::mask();
- value_ |= UntaggedInt32Field::encode(value);
- }
- bool is_untagged_int32() const { return UntaggedInt32Field::decode(value_); }
-
- Register reg() const {
- ASSERT(is_register());
- uint32_t reg = DataField::decode(value_);
- Register result;
- result.code_ = reg;
- return result;
- }
-
- Handle<Object> handle() const {
- ASSERT(is_constant());
- return ConstantList()->at(DataField::decode(value_));
- }
-
- int index() const {
- ASSERT(is_copy());
- return DataField::decode(value_);
- }
-
- bool Equals(FrameElement other) {
- uint32_t masked_difference = (value_ ^ other.value_) & ~CopiedField::mask();
- if (!masked_difference) {
- // The elements are equal if they agree exactly except on copied field.
- return true;
- } else {
- // If two constants have the same value, and agree otherwise, return true.
- return !(masked_difference & ~DataField::mask()) &&
- is_constant() &&
- handle().is_identical_to(other.handle());
- }
- }
-
- // Test if two FrameElements refer to the same memory or register location.
- bool SameLocation(FrameElement* other) {
- if (type() == other->type()) {
- if (value_ == other->value_) return true;
- if (is_constant() && handle().is_identical_to(other->handle())) {
- return true;
- }
- }
- return false;
- }
-
- // Given a pair of non-null frame element pointers, return one of them
- // as an entry frame candidate or null if they are incompatible.
- FrameElement* Combine(FrameElement* other) {
- // If either is invalid, the result is.
- if (!is_valid()) return this;
- if (!other->is_valid()) return other;
-
- if (!SameLocation(other)) return NULL;
- // If either is unsynced, the result is.
- FrameElement* result = is_synced() ? other : this;
- return result;
- }
-
- private:
- enum Type {
- INVALID,
- MEMORY,
- REGISTER,
- CONSTANT,
- COPY
- };
-
- // Used to construct memory and register elements.
- FrameElement(Type type,
- Register reg,
- SyncFlag is_synced,
- TypeInfo info) {
- value_ = TypeField::encode(type)
- | CopiedField::encode(false)
- | SyncedField::encode(is_synced != NOT_SYNCED)
- | TypeInfoField::encode(info.ToInt())
- | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
- }
-
- // Used to construct constant elements.
- FrameElement(Handle<Object> value, SyncFlag is_synced, TypeInfo info) {
- value_ = TypeField::encode(CONSTANT)
- | CopiedField::encode(false)
- | SyncedField::encode(is_synced != NOT_SYNCED)
- | TypeInfoField::encode(info.ToInt())
- | DataField::encode(ConstantList()->length());
- ConstantList()->Add(value);
- }
-
- Type type() const { return TypeField::decode(value_); }
- void set_type(Type type) {
- value_ = value_ & ~TypeField::mask();
- value_ = value_ | TypeField::encode(type);
- }
-
- void set_index(int new_index) {
- ASSERT(is_copy());
- value_ = value_ & ~DataField::mask();
- value_ = value_ | DataField::encode(new_index);
- }
-
- void set_reg(Register new_reg) {
- ASSERT(is_register());
- value_ = value_ & ~DataField::mask();
- value_ = value_ | DataField::encode(new_reg.code_);
- }
-
- // Encode type, copied, synced and data in one 32 bit integer.
- uint32_t value_;
-
- // Declare BitFields with template parameters <type, start, size>.
- class TypeField: public BitField<Type, 0, 3> {};
- class CopiedField: public BitField<bool, 3, 1> {};
- class SyncedField: public BitField<bool, 4, 1> {};
- class UntaggedInt32Field: public BitField<bool, 5, 1> {};
- class TypeInfoField: public BitField<int, 6, 7> {};
- class DataField: public BitField<uint32_t, 13, 32 - 13> {};
-
- friend class VirtualFrame;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_FRAME_ELEMENT_H_
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 9430cad0d..595180624 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -29,6 +29,8 @@
#define V8_FRAMES_INL_H_
#include "frames.h"
+#include "isolate.h"
+#include "v8memory.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/frames-ia32.h"
@@ -86,11 +88,21 @@ inline Address* StackHandler::pc_address() const {
}
+inline StackFrame::StackFrame(StackFrameIterator* iterator)
+ : iterator_(iterator), isolate_(iterator_->isolate()) {
+}
+
+
inline StackHandler* StackFrame::top_handler() const {
return iterator_->handler();
}
+inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
+ return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code;
+}
+
+
inline Object* StandardFrame::GetExpression(int index) const {
return Memory::Object_at(GetExpressionAddress(index));
}
@@ -172,6 +184,13 @@ inline Object* JavaScriptFrame::function() const {
template<typename Iterator>
+inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+ Isolate* isolate)
+ : iterator_(isolate) {
+ if (!done()) Advance();
+}
+
+template<typename Iterator>
inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
// TODO(1233797): The frame hierarchy needs to change. It's
// problematic that we can't use the safe-cast operator to cast to
@@ -185,11 +204,9 @@ inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
template<typename Iterator>
JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
- StackFrame::Id id) {
- while (!done()) {
- Advance();
- if (frame()->id() == id) return;
- }
+ Isolate* isolate, StackFrame::Id id)
+ : iterator_(isolate) {
+ AdvanceToId(id);
}
@@ -210,6 +227,15 @@ void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToArgumentsFrame() {
template<typename Iterator>
+void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToId(StackFrame::Id id) {
+ while (!done()) {
+ Advance();
+ if (frame()->id() == id) return;
+ }
+}
+
+
+template<typename Iterator>
void JavaScriptFrameIteratorTemp<Iterator>::Reset() {
iterator_.Reset();
if (!done()) Advance();
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index e94fdd84e..d81d5afaa 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,16 +35,10 @@
#include "safepoint-table.h"
#include "scopeinfo.h"
#include "string-stream.h"
-#include "top.h"
namespace v8 {
namespace internal {
-PcToCodeCache::PcToCodeCacheEntry
- PcToCodeCache::cache_[PcToCodeCache::kPcToCodeCacheSize];
-
-int SafeStackFrameIterator::active_count_ = 0;
-
// Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain.
class StackHandlerIterator BASE_EMBEDDED {
@@ -76,21 +70,34 @@ class StackHandlerIterator BASE_EMBEDDED {
#define INITIALIZE_SINGLETON(type, field) field##_(this),
StackFrameIterator::StackFrameIterator()
- : STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL), thread_(Top::GetCurrentThread()),
+ : isolate_(Isolate::Current()),
+ STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+ frame_(NULL), handler_(NULL),
+ thread_(isolate_->thread_local_top()),
+ fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
+ Reset();
+}
+StackFrameIterator::StackFrameIterator(Isolate* isolate)
+ : isolate_(isolate),
+ STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+ frame_(NULL), handler_(NULL),
+ thread_(isolate_->thread_local_top()),
fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
Reset();
}
-StackFrameIterator::StackFrameIterator(ThreadLocalTop* t)
- : STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
+ : isolate_(isolate),
+ STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
frame_(NULL), handler_(NULL), thread_(t),
fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
Reset();
}
-StackFrameIterator::StackFrameIterator(bool use_top, Address fp, Address sp)
- : STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+StackFrameIterator::StackFrameIterator(Isolate* isolate,
+ bool use_top, Address fp, Address sp)
+ : isolate_(isolate),
+ STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
frame_(NULL), handler_(NULL),
- thread_(use_top ? Top::GetCurrentThread() : NULL),
+ thread_(use_top ? isolate_->thread_local_top() : NULL),
fp_(use_top ? NULL : fp), sp_(sp),
advance_(use_top ? &StackFrameIterator::AdvanceWithHandler :
&StackFrameIterator::AdvanceWithoutHandler) {
@@ -138,15 +145,17 @@ void StackFrameIterator::Reset() {
StackFrame::State state;
StackFrame::Type type;
if (thread_ != NULL) {
- type = ExitFrame::GetStateForFramePointer(Top::c_entry_fp(thread_), &state);
- handler_ = StackHandler::FromAddress(Top::handler(thread_));
+ type = ExitFrame::GetStateForFramePointer(
+ Isolate::c_entry_fp(thread_), &state);
+ handler_ = StackHandler::FromAddress(
+ Isolate::handler(thread_));
} else {
ASSERT(fp_ != NULL);
state.fp = fp_;
state.sp = sp_;
state.pc_address =
reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_));
- type = StackFrame::ComputeType(&state);
+ type = StackFrame::ComputeType(isolate(), &state);
}
if (SingletonFor(type) == NULL) return;
frame_ = SingletonFor(type, &state);
@@ -187,6 +196,12 @@ StackTraceFrameIterator::StackTraceFrameIterator() {
}
+StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
+ : JavaScriptFrameIterator(isolate) {
+ if (!done() && !IsValidFrame()) Advance();
+}
+
+
void StackTraceFrameIterator::Advance() {
while (true) {
JavaScriptFrameIterator::Advance();
@@ -220,23 +235,44 @@ bool SafeStackFrameIterator::ExitFrameValidator::IsValidFP(Address fp) {
}
+SafeStackFrameIterator::ActiveCountMaintainer::ActiveCountMaintainer(
+ Isolate* isolate)
+ : isolate_(isolate) {
+ isolate_->set_safe_stack_iterator_counter(
+ isolate_->safe_stack_iterator_counter() + 1);
+}
+
+
+SafeStackFrameIterator::ActiveCountMaintainer::~ActiveCountMaintainer() {
+ isolate_->set_safe_stack_iterator_counter(
+ isolate_->safe_stack_iterator_counter() - 1);
+}
+
+
SafeStackFrameIterator::SafeStackFrameIterator(
+ Isolate* isolate,
Address fp, Address sp, Address low_bound, Address high_bound) :
- maintainer_(),
+ maintainer_(isolate),
stack_validator_(low_bound, high_bound),
- is_valid_top_(IsValidTop(low_bound, high_bound)),
+ is_valid_top_(IsValidTop(isolate, low_bound, high_bound)),
is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
is_working_iterator_(is_valid_top_ || is_valid_fp_),
iteration_done_(!is_working_iterator_),
- iterator_(is_valid_top_, is_valid_fp_ ? fp : NULL, sp) {
+ iterator_(isolate, is_valid_top_, is_valid_fp_ ? fp : NULL, sp) {
}
+bool SafeStackFrameIterator::is_active(Isolate* isolate) {
+ return isolate->safe_stack_iterator_counter() > 0;
+}
-bool SafeStackFrameIterator::IsValidTop(Address low_bound, Address high_bound) {
- Address fp = Top::c_entry_fp(Top::GetCurrentThread());
+
+bool SafeStackFrameIterator::IsValidTop(Isolate* isolate,
+ Address low_bound, Address high_bound) {
+ ThreadLocalTop* top = isolate->thread_local_top();
+ Address fp = Isolate::c_entry_fp(top);
ExitFrameValidator validator(low_bound, high_bound);
if (!validator.IsValidFP(fp)) return false;
- return Top::handler(Top::GetCurrentThread()) != NULL;
+ return Isolate::handler(top) != NULL;
}
@@ -312,8 +348,9 @@ void SafeStackFrameIterator::Reset() {
#ifdef ENABLE_LOGGING_AND_PROFILING
SafeStackTraceFrameIterator::SafeStackTraceFrameIterator(
+ Isolate* isolate,
Address fp, Address sp, Address low_bound, Address high_bound) :
- SafeJavaScriptFrameIterator(fp, sp, low_bound, high_bound) {
+ SafeJavaScriptFrameIterator(isolate, fp, sp, low_bound, high_bound) {
if (!done() && !frame()->is_java_script()) Advance();
}
@@ -328,10 +365,12 @@ void SafeStackTraceFrameIterator::Advance() {
#endif
-Code* StackFrame::GetSafepointData(Address pc,
+Code* StackFrame::GetSafepointData(Isolate* isolate,
+ Address pc,
SafepointEntry* safepoint_entry,
unsigned* stack_slots) {
- PcToCodeCache::PcToCodeCacheEntry* entry = PcToCodeCache::GetCacheEntry(pc);
+ PcToCodeCache::PcToCodeCacheEntry* entry =
+ isolate->pc_to_code_cache()->GetCacheEntry(pc);
SafepointEntry cached_safepoint_entry = entry->safepoint_entry;
if (!entry->safepoint_entry.is_valid()) {
entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
@@ -370,7 +409,7 @@ void StackFrame::IteratePc(ObjectVisitor* v,
}
-StackFrame::Type StackFrame::ComputeType(State* state) {
+StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
return ARGUMENTS_ADAPTOR;
@@ -385,8 +424,8 @@ StackFrame::Type StackFrame::ComputeType(State* state) {
// frames as normal JavaScript frames to avoid having to look
// into the heap to determine the state. This is safe as long
// as nobody tries to GC...
- if (SafeStackFrameIterator::is_active()) return JAVA_SCRIPT;
- Code::Kind kind = GetContainingCode(*(state->pc_address))->kind();
+ if (SafeStackFrameIterator::is_active(isolate)) return JAVA_SCRIPT;
+ Code::Kind kind = GetContainingCode(isolate, *(state->pc_address))->kind();
ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
}
@@ -397,12 +436,12 @@ StackFrame::Type StackFrame::ComputeType(State* state) {
StackFrame::Type StackFrame::GetCallerState(State* state) const {
ComputeCallerState(state);
- return ComputeType(state);
+ return ComputeType(isolate(), state);
}
Code* EntryFrame::unchecked_code() const {
- return Heap::raw_unchecked_js_entry_code();
+ return HEAP->raw_unchecked_js_entry_code();
}
@@ -425,7 +464,7 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
Code* EntryConstructFrame::unchecked_code() const {
- return Heap::raw_unchecked_js_construct_entry_code();
+ return HEAP->raw_unchecked_js_construct_entry_code();
}
@@ -457,7 +496,7 @@ void ExitFrame::SetCallerFp(Address caller_fp) {
void ExitFrame::Iterate(ObjectVisitor* v) const {
// The arguments are traversed as part of the expression stack of
// the calling frame.
- IteratePc(v, pc_address(), code());
+ IteratePc(v, pc_address(), LookupCode());
v->VisitPointer(&code_slot());
}
@@ -531,13 +570,13 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
// Make sure that we're not doing "safe" stack frame iteration. We cannot
// possibly find pointers in optimized frames in that state.
- ASSERT(!SafeStackFrameIterator::is_active());
+ ASSERT(!SafeStackFrameIterator::is_active(isolate()));
// Compute the safepoint information.
unsigned stack_slots = 0;
SafepointEntry safepoint_entry;
Code* code = StackFrame::GetSafepointData(
- pc(), &safepoint_entry, &stack_slots);
+ isolate(), pc(), &safepoint_entry, &stack_slots);
unsigned slot_space = stack_slots * kPointerSize;
// Visit the outgoing parameters.
@@ -614,8 +653,8 @@ Code* JavaScriptFrame::unchecked_code() const {
int JavaScriptFrame::GetNumberOfIncomingArguments() const {
- ASSERT(!SafeStackFrameIterator::is_active() &&
- Heap::gc_state() == Heap::NOT_IN_GC);
+ ASSERT(!SafeStackFrameIterator::is_active(isolate()) &&
+ isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
JSFunction* function = JSFunction::cast(this->function());
return function->shared()->formal_parameter_count();
@@ -635,7 +674,7 @@ void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
ASSERT(functions->length() == 0);
- Code* code_pointer = code();
+ Code* code_pointer = LookupCode();
int offset = static_cast<int>(pc() - code_pointer->address());
FrameSummary summary(receiver(),
JSFunction::cast(function()),
@@ -703,24 +742,30 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
// at the first position. Since we are always at a call when we need
// to construct a stack trace, the receiver is always in a stack slot.
opcode = static_cast<Translation::Opcode>(it.Next());
- ASSERT(opcode == Translation::STACK_SLOT);
- int input_slot_index = it.Next();
+ ASSERT(opcode == Translation::STACK_SLOT ||
+ opcode == Translation::LITERAL);
+ int index = it.Next();
// Get the correct receiver in the optimized frame.
Object* receiver = NULL;
- // Positive index means the value is spilled to the locals area. Negative
- // means it is stored in the incoming parameter area.
- if (input_slot_index >= 0) {
- receiver = GetExpression(input_slot_index);
+ if (opcode == Translation::LITERAL) {
+ receiver = data->LiteralArray()->get(index);
} else {
- // Index -1 overlaps with last parameter, -n with the first parameter,
- // (-n - 1) with the receiver with n being the number of parameters
- // of the outermost, optimized frame.
- int parameter_count = ComputeParametersCount();
- int parameter_index = input_slot_index + parameter_count;
- receiver = (parameter_index == -1)
- ? this->receiver()
- : this->GetParameter(parameter_index);
+ // Positive index means the value is spilled to the locals
+ // area. Negative means it is stored in the incoming parameter
+ // area.
+ if (index >= 0) {
+ receiver = GetExpression(index);
+ } else {
+ // Index -1 overlaps with last parameter, -n with the first parameter,
+ // (-n - 1) with the receiver with n being the number of parameters
+ // of the outermost, optimized frame.
+ int parameter_count = ComputeParametersCount();
+ int parameter_index = index + parameter_count;
+ receiver = (parameter_index == -1)
+ ? this->receiver()
+ : this->GetParameter(parameter_index);
+ }
}
Code* code = function->shared()->code();
@@ -754,7 +799,7 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
// back to a slow search in this case to find the original optimized
// code object.
if (!code->contains(pc())) {
- code = PcToCodeCache::GcSafeFindCodeForPc(pc());
+ code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
}
ASSERT(code != NULL);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
@@ -813,7 +858,8 @@ Address InternalFrame::GetCallerStackPointer() const {
Code* ArgumentsAdaptorFrame::unchecked_code() const {
- return Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline);
+ return isolate()->builtins()->builtin(
+ Builtins::kArgumentsAdaptorTrampoline);
}
@@ -898,6 +944,10 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->Add("\n");
return;
}
+ if (is_optimized()) {
+ accumulator->Add(" {\n// optimized frame\n}\n");
+ return;
+ }
accumulator->Add(" {\n");
// Compute the number of locals and expression stack elements.
@@ -1007,14 +1057,14 @@ void EntryFrame::Iterate(ObjectVisitor* v) const {
ASSERT(!it.done());
StackHandler* handler = it.handler();
ASSERT(handler->is_entry());
- handler->Iterate(v, code());
+ handler->Iterate(v, LookupCode());
#ifdef DEBUG
// Make sure that the entry frame does not contain more than one
// stack handler.
it.Advance();
ASSERT(it.done());
#endif
- IteratePc(v, pc_address(), code());
+ IteratePc(v, pc_address(), LookupCode());
}
@@ -1031,7 +1081,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
v->VisitPointers(base, reinterpret_cast<Object**>(address));
base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
// Traverse the pointers in the handler itself.
- handler->Iterate(v, code());
+ handler->Iterate(v, LookupCode());
}
v->VisitPointers(base, limit);
}
@@ -1039,7 +1089,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
IterateExpressions(v);
- IteratePc(v, pc_address(), code());
+ IteratePc(v, pc_address(), LookupCode());
}
@@ -1047,7 +1097,7 @@ void InternalFrame::Iterate(ObjectVisitor* v) const {
// Internal frames only have object pointers on the expression stack
// as they never have any arguments.
IterateExpressions(v);
- IteratePc(v, pc_address(), code());
+ IteratePc(v, pc_address(), LookupCode());
}
@@ -1077,14 +1127,15 @@ Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
+ Heap* heap = isolate_->heap();
// Check if the pc points into a large object chunk.
- LargeObjectChunk* chunk = Heap::lo_space()->FindChunkContainingPc(pc);
+ LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc);
if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
// Iterate through the 8K page until we reach the end or find an
// object starting after the pc.
Page* page = Page::FromAddress(pc);
- HeapObjectIterator iterator(page, Heap::GcSafeSizeOfOldObjectFunction());
+ HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction());
HeapObject* previous = NULL;
while (true) {
HeapObject* next = iterator.next();
@@ -1097,14 +1148,14 @@ Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
- Counters::pc_to_code.Increment();
+ isolate_->counters()->pc_to_code()->Increment();
ASSERT(IsPowerOf2(kPcToCodeCacheSize));
uint32_t hash = ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
uint32_t index = hash & (kPcToCodeCacheSize - 1);
PcToCodeCacheEntry* entry = cache(index);
if (entry->pc == pc) {
- Counters::pc_to_code_cached.Increment();
+ isolate_->counters()->pc_to_code_cached()->Increment();
ASSERT(entry->code == GcSafeFindCodeForPc(pc));
} else {
// Because this code may be interrupted by a profiling signal that
@@ -1131,11 +1182,8 @@ int NumRegs(RegList reglist) {
}
-int JSCallerSavedCode(int n) {
- static int reg_code[kNumJSCallerSaved];
- static bool initialized = false;
- if (!initialized) {
- initialized = true;
+struct JSCallerSavedCodeData {
+ JSCallerSavedCodeData() {
int i = 0;
for (int r = 0; r < kNumRegs; r++)
if ((kJSCallerSaved & (1 << r)) != 0)
@@ -1143,8 +1191,16 @@ int JSCallerSavedCode(int n) {
ASSERT(i == kNumJSCallerSaved);
}
+ int reg_code[kNumJSCallerSaved];
+};
+
+
+static const JSCallerSavedCodeData kCallerSavedCodeData;
+
+
+int JSCallerSavedCode(int n) {
ASSERT(0 <= n && n < kNumJSCallerSaved);
- return reg_code[n];
+ return kCallerSavedCodeData.reg_code[n];
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 03e5e671b..aa91026fb 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -28,6 +28,8 @@
#ifndef V8_FRAMES_H_
#define V8_FRAMES_H_
+#include "allocation.h"
+#include "handles.h"
#include "safepoint-table.h"
namespace v8 {
@@ -44,11 +46,10 @@ int JSCallerSavedCode(int n);
// Forward declarations.
class StackFrameIterator;
-class Top;
class ThreadLocalTop;
+class Isolate;
-
-class PcToCodeCache : AllStatic {
+class PcToCodeCache {
public:
struct PcToCodeCacheEntry {
Address pc;
@@ -56,22 +57,28 @@ class PcToCodeCache : AllStatic {
SafepointEntry safepoint_entry;
};
- static PcToCodeCacheEntry* cache(int index) {
- return &cache_[index];
+ explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) {
+ Flush();
}
- static Code* GcSafeFindCodeForPc(Address pc);
- static Code* GcSafeCastToCode(HeapObject* object, Address pc);
+ Code* GcSafeFindCodeForPc(Address pc);
+ Code* GcSafeCastToCode(HeapObject* object, Address pc);
- static void FlushPcToCodeCache() {
+ void Flush() {
memset(&cache_[0], 0, sizeof(cache_));
}
- static PcToCodeCacheEntry* GetCacheEntry(Address pc);
+ PcToCodeCacheEntry* GetCacheEntry(Address pc);
private:
+ PcToCodeCacheEntry* cache(int index) { return &cache_[index]; }
+
+ Isolate* isolate_;
+
static const int kPcToCodeCacheSize = 1024;
- static PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
+ PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
+
+ DISALLOW_COPY_AND_ASSIGN(PcToCodeCache);
};
@@ -145,6 +152,12 @@ class StackFrame BASE_EMBEDDED {
NO_ID = 0
};
+ // Used to mark the outermost JS entry frame.
+ enum JsFrameMarker {
+ INNER_JSENTRY_FRAME = 0,
+ OUTERMOST_JSENTRY_FRAME = 1
+ };
+
struct State {
State() : sp(NULL), fp(NULL), pc_address(NULL) { }
Address sp;
@@ -152,10 +165,12 @@ class StackFrame BASE_EMBEDDED {
Address* pc_address;
};
- // Copy constructor; it breaks the connection to host iterator.
+ // Copy constructor; it breaks the connection to host iterator
+ // (as an iterator usually lives on stack).
StackFrame(const StackFrame& original) {
this->state_ = original.state_;
this->iterator_ = NULL;
+ this->isolate_ = original.isolate_;
}
// Type testers.
@@ -199,17 +214,18 @@ class StackFrame BASE_EMBEDDED {
virtual Code* unchecked_code() const = 0;
// Get the code associated with this frame.
- Code* code() const { return GetContainingCode(pc()); }
+ Code* LookupCode() const {
+ return GetContainingCode(isolate(), pc());
+ }
// Get the code object that contains the given pc.
- static Code* GetContainingCode(Address pc) {
- return PcToCodeCache::GetCacheEntry(pc)->code;
- }
+ static inline Code* GetContainingCode(Isolate* isolate, Address pc);
// Get the code object containing the given pc and fill in the
// safepoint entry and the number of stack slots. The pc must be at
// a safepoint.
- static Code* GetSafepointData(Address pc,
+ static Code* GetSafepointData(Isolate* isolate,
+ Address pc,
SafepointEntry* safepoint_entry,
unsigned* stack_slots);
@@ -224,9 +240,11 @@ class StackFrame BASE_EMBEDDED {
int index) const { }
protected:
- explicit StackFrame(StackFrameIterator* iterator) : iterator_(iterator) { }
+ inline explicit StackFrame(StackFrameIterator* iterator);
virtual ~StackFrame() { }
+ Isolate* isolate() const { return isolate_; }
+
// Compute the stack pointer for the calling frame.
virtual Address GetCallerStackPointer() const = 0;
@@ -239,10 +257,11 @@ class StackFrame BASE_EMBEDDED {
inline StackHandler* top_handler() const;
// Compute the stack frame type for the given state.
- static Type ComputeType(State* state);
+ static Type ComputeType(Isolate* isolate, State* state);
private:
const StackFrameIterator* iterator_;
+ Isolate* isolate_;
State state_;
// Fill in the state of the calling frame.
@@ -251,6 +270,8 @@ class StackFrame BASE_EMBEDDED {
// Get the type and the state of the calling frame.
virtual Type GetCallerState(State* state) const;
+ static const intptr_t kIsolateTag = 1;
+
friend class StackFrameIterator;
friend class StackHandlerIterator;
friend class SafeStackFrameIterator;
@@ -424,7 +445,7 @@ class FrameSummary BASE_EMBEDDED {
Handle<Object> receiver() { return receiver_; }
Handle<JSFunction> function() { return function_; }
Handle<Code> code() { return code_; }
- Address pc() { return reinterpret_cast<Address>(*code_) + offset_; }
+ Address pc() { return code_->address() + offset_; }
int offset() { return offset_; }
bool is_constructor() { return is_constructor_; }
@@ -612,22 +633,28 @@ class ConstructFrame: public InternalFrame {
class StackFrameIterator BASE_EMBEDDED {
public:
- // An iterator that iterates over the current thread's stack.
+ // An iterator that iterates over the current thread's stack,
+ // and uses current isolate.
StackFrameIterator();
+ // An iterator that iterates over the isolate's current thread's stack,
+ explicit StackFrameIterator(Isolate* isolate);
+
// An iterator that iterates over a given thread's stack.
- explicit StackFrameIterator(ThreadLocalTop* thread);
+ StackFrameIterator(Isolate* isolate, ThreadLocalTop* t);
// An iterator that can start from a given FP address.
// If use_top, then work as usual, if fp isn't NULL, use it,
// otherwise, do nothing.
- StackFrameIterator(bool use_top, Address fp, Address sp);
+ StackFrameIterator(Isolate* isolate, bool use_top, Address fp, Address sp);
StackFrame* frame() const {
ASSERT(!done());
return frame_;
}
+ Isolate* isolate() const { return isolate_; }
+
bool done() const { return frame_ == NULL; }
void Advance() { (this->*advance_)(); }
@@ -635,6 +662,7 @@ class StackFrameIterator BASE_EMBEDDED {
void Reset();
private:
+ Isolate* isolate_;
#define DECLARE_SINGLETON(ignore, type) type type##_;
STACK_FRAME_TYPE_LIST(DECLARE_SINGLETON)
#undef DECLARE_SINGLETON
@@ -670,13 +698,12 @@ class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
public:
JavaScriptFrameIteratorTemp() { if (!done()) Advance(); }
- explicit JavaScriptFrameIteratorTemp(ThreadLocalTop* thread) :
- iterator_(thread) {
- if (!done()) Advance();
- }
+ inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
// Skip frames until the frame with the given id is reached.
- explicit JavaScriptFrameIteratorTemp(StackFrame::Id id);
+ explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
+
+ inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
JavaScriptFrameIteratorTemp(Address fp, Address sp,
Address low_bound, Address high_bound) :
@@ -684,6 +711,13 @@ class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
if (!done()) Advance();
}
+ JavaScriptFrameIteratorTemp(Isolate* isolate,
+ Address fp, Address sp,
+ Address low_bound, Address high_bound) :
+ iterator_(isolate, fp, sp, low_bound, high_bound) {
+ if (!done()) Advance();
+ }
+
inline JavaScriptFrame* frame() const;
bool done() const { return iterator_.done(); }
@@ -698,6 +732,8 @@ class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
void Reset();
private:
+ inline void AdvanceToId(StackFrame::Id id);
+
Iterator iterator_;
};
@@ -712,6 +748,7 @@ typedef JavaScriptFrameIteratorTemp<StackFrameIterator> JavaScriptFrameIterator;
class StackTraceFrameIterator: public JavaScriptFrameIterator {
public:
StackTraceFrameIterator();
+ explicit StackTraceFrameIterator(Isolate* isolate);
void Advance();
private:
@@ -721,7 +758,8 @@ class StackTraceFrameIterator: public JavaScriptFrameIterator {
class SafeStackFrameIterator BASE_EMBEDDED {
public:
- SafeStackFrameIterator(Address fp, Address sp,
+ SafeStackFrameIterator(Isolate* isolate,
+ Address fp, Address sp,
Address low_bound, Address high_bound);
StackFrame* frame() const {
@@ -734,7 +772,7 @@ class SafeStackFrameIterator BASE_EMBEDDED {
void Advance();
void Reset();
- static bool is_active() { return active_count_ > 0; }
+ static bool is_active(Isolate* isolate);
static bool IsWithinBounds(
Address low_bound, Address high_bound, Address addr) {
@@ -771,7 +809,8 @@ class SafeStackFrameIterator BASE_EMBEDDED {
bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
bool IsValidFrame(StackFrame* frame) const;
bool IsValidCaller(StackFrame* frame);
- static bool IsValidTop(Address low_bound, Address high_bound);
+ static bool IsValidTop(Isolate* isolate,
+ Address low_bound, Address high_bound);
// This is a nasty hack to make sure the active count is incremented
// before the constructor for the embedded iterator is invoked. This
@@ -780,12 +819,13 @@ class SafeStackFrameIterator BASE_EMBEDDED {
// heap objects.
class ActiveCountMaintainer BASE_EMBEDDED {
public:
- ActiveCountMaintainer() { active_count_++; }
- ~ActiveCountMaintainer() { active_count_--; }
+ explicit ActiveCountMaintainer(Isolate* isolate);
+ ~ActiveCountMaintainer();
+ private:
+ Isolate* isolate_;
};
ActiveCountMaintainer maintainer_;
- static int active_count_;
StackAddressValidator stack_validator_;
const bool is_valid_top_;
const bool is_valid_fp_;
@@ -802,7 +842,8 @@ typedef JavaScriptFrameIteratorTemp<SafeStackFrameIterator>
class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator {
public:
- explicit SafeStackTraceFrameIterator(Address fp, Address sp,
+ explicit SafeStackTraceFrameIterator(Isolate* isolate,
+ Address fp, Address sp,
Address low_bound, Address high_bound);
void Advance();
};
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index b3dc95bdf..03abfbd85 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,7 +27,7 @@
#include "v8.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
@@ -90,14 +90,14 @@ void BreakableStatementChecker::VisitReturnStatement(ReturnStatement* stmt) {
}
-void BreakableStatementChecker::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
+void BreakableStatementChecker::VisitEnterWithContextStatement(
+ EnterWithContextStatement* stmt) {
Visit(stmt->expression());
}
-void BreakableStatementChecker::VisitWithExitStatement(
- WithExitStatement* stmt) {
+void BreakableStatementChecker::VisitExitContextStatement(
+ ExitContextStatement* stmt) {
}
@@ -187,11 +187,6 @@ void BreakableStatementChecker::VisitArrayLiteral(ArrayLiteral* expr) {
}
-void BreakableStatementChecker::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
-}
-
-
void BreakableStatementChecker::VisitAssignment(Assignment* expr) {
// If assigning to a property (including a global property) the assignment is
// breakable.
@@ -213,12 +208,6 @@ void BreakableStatementChecker::VisitThrow(Throw* expr) {
}
-void BreakableStatementChecker::VisitIncrementOperation(
- IncrementOperation* expr) {
- UNREACHABLE();
-}
-
-
void BreakableStatementChecker::VisitProperty(Property* expr) {
// Property load is breakable.
is_breakable_ = true;
@@ -275,17 +264,18 @@ void BreakableStatementChecker::VisitThisFunction(ThisFunction* expr) {
#define __ ACCESS_MASM(masm())
bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
+ Isolate* isolate = info->isolate();
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
- Counters::total_full_codegen_source_size.Increment(len);
+ isolate->counters()->total_full_codegen_source_size()->Increment(len);
}
if (FLAG_trace_codegen) {
PrintF("Full Compiler - ");
}
CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB;
- MacroAssembler masm(NULL, kInitialBufferSize);
+ MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
#ifdef ENABLE_GDB_JIT_INTERFACE
masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif
@@ -293,7 +283,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
FullCodeGenerator cgen(&masm);
cgen.Generate(info);
if (cgen.HasStackOverflow()) {
- ASSERT(!Top::has_pending_exception());
+ ASSERT(!isolate->has_pending_exception());
return false;
}
unsigned table_offset = cgen.EmitStackCheckTable();
@@ -343,7 +333,8 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
if (!info_->HasDeoptimizationSupport()) return;
int length = bailout_entries_.length();
Handle<DeoptimizationOutputData> data =
- Factory::NewDeoptimizationOutputData(length, TENURED);
+ isolate()->factory()->
+ NewDeoptimizationOutputData(length, TENURED);
for (int i = 0; i < length; i++) {
data->SetAstId(i, Smi::FromInt(bailout_entries_[i].id));
data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state));
@@ -352,7 +343,7 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
}
-void FullCodeGenerator::PrepareForBailout(AstNode* node, State state) {
+void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
PrepareForBailoutForId(node->id(), state);
}
@@ -450,7 +441,7 @@ void FullCodeGenerator::TestContext::Plug(Register reg) const {
// For simplicity we always test the accumulator register.
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -472,7 +463,7 @@ void FullCodeGenerator::TestContext::PlugTOS() const {
// For simplicity we always test the accumulator register.
__ pop(result_register());
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -522,6 +513,14 @@ void FullCodeGenerator::TestContext::PrepareTest(
}
+void FullCodeGenerator::DoTest(const TestContext* context) {
+ DoTest(context->condition(),
+ context->true_label(),
+ context->false_label(),
+ context->fall_through());
+}
+
+
void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
int length = declarations->length();
@@ -545,7 +544,8 @@ void FullCodeGenerator::VisitDeclarations(
// Compute array of global variable and function declarations.
// Do nothing in case of no declared global functions or variables.
if (globals > 0) {
- Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
+ Handle<FixedArray> array =
+ isolate()->factory()->NewFixedArray(2 * globals, TENURED);
for (int j = 0, i = 0; i < length; i++) {
Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var();
@@ -580,88 +580,78 @@ void FullCodeGenerator::VisitDeclarations(
void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, fun->start_position());
- }
+ CodeGenerator::RecordPositions(masm_, fun->start_position());
}
void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, fun->end_position() - 1);
- }
+ CodeGenerator::RecordPositions(masm_, fun->end_position() - 1);
}
void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
- if (FLAG_debug_info) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!Debugger::IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
- } else {
- // Check if the statement will be breakable without adding a debug break
- // slot.
- BreakableStatementChecker checker;
- checker.Check(stmt);
- // Record the statement position right here if the statement is not
- // breakable. For breakable statements the actual recording of the
- // position will be postponed to the breakable code (typically an IC).
- bool position_recorded = CodeGenerator::RecordPositions(
- masm_, stmt->statement_pos(), !checker.is_breakable());
- // If the position recording did record a new position generate a debug
- // break slot to make the statement breakable.
- if (position_recorded) {
- Debug::GenerateSlot(masm_);
- }
+ if (!isolate()->debugger()->IsDebuggerActive()) {
+ CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+ } else {
+ // Check if the statement will be breakable without adding a debug break
+ // slot.
+ BreakableStatementChecker checker;
+ checker.Check(stmt);
+ // Record the statement position right here if the statement is not
+ // breakable. For breakable statements the actual recording of the
+ // position will be postponed to the breakable code (typically an IC).
+ bool position_recorded = CodeGenerator::RecordPositions(
+ masm_, stmt->statement_pos(), !checker.is_breakable());
+ // If the position recording did record a new position generate a debug
+ // break slot to make the statement breakable.
+ if (position_recorded) {
+ Debug::GenerateSlot(masm_);
}
+ }
#else
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+ CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
#endif
- }
}
void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
- if (FLAG_debug_info) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!Debugger::IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, pos);
- } else {
- // Check if the expression will be breakable without adding a debug break
- // slot.
- BreakableStatementChecker checker;
- checker.Check(expr);
- // Record a statement position right here if the expression is not
- // breakable. For breakable expressions the actual recording of the
- // position will be postponed to the breakable code (typically an IC).
- // NOTE this will record a statement position for something which might
- // not be a statement. As stepping in the debugger will only stop at
- // statement positions this is used for e.g. the condition expression of
- // a do while loop.
- bool position_recorded = CodeGenerator::RecordPositions(
- masm_, pos, !checker.is_breakable());
- // If the position recording did record a new position generate a debug
- // break slot to make the statement breakable.
- if (position_recorded) {
- Debug::GenerateSlot(masm_);
- }
+ if (!isolate()->debugger()->IsDebuggerActive()) {
+ CodeGenerator::RecordPositions(masm_, pos);
+ } else {
+ // Check if the expression will be breakable without adding a debug break
+ // slot.
+ BreakableStatementChecker checker;
+ checker.Check(expr);
+ // Record a statement position right here if the expression is not
+ // breakable. For breakable expressions the actual recording of the
+ // position will be postponed to the breakable code (typically an IC).
+ // NOTE this will record a statement position for something which might
+ // not be a statement. As stepping in the debugger will only stop at
+ // statement positions this is used for e.g. the condition expression of
+ // a do while loop.
+ bool position_recorded = CodeGenerator::RecordPositions(
+ masm_, pos, !checker.is_breakable());
+ // If the position recording did record a new position generate a debug
+ // break slot to make the statement breakable.
+ if (position_recorded) {
+ Debug::GenerateSlot(masm_);
}
+ }
#else
- CodeGenerator::RecordPositions(masm_, pos);
+ CodeGenerator::RecordPositions(masm_, pos);
#endif
- }
}
void FullCodeGenerator::SetStatementPosition(int pos) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, pos);
- }
+ CodeGenerator::RecordPositions(masm_, pos);
}
void FullCodeGenerator::SetSourcePosition(int pos) {
- if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+ if (pos != RelocInfo::kNoPosition) {
masm_->positions_recorder()->RecordPosition(pos);
}
}
@@ -694,7 +684,7 @@ FullCodeGenerator::InlineFunctionGenerator
void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
ZoneList<Expression*>* args = node->arguments();
Handle<String> name = node->name();
- Runtime::Function* function = node->function();
+ const Runtime::Function* function = node->function();
ASSERT(function != NULL);
ASSERT(function->intrinsic_type == Runtime::INLINE);
InlineFunctionGenerator generator =
@@ -704,143 +694,116 @@ void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
-
- OverwriteMode mode = NO_OVERWRITE;
- if (left->ResultOverwriteAllowed()) {
- mode = OVERWRITE_LEFT;
- } else if (right->ResultOverwriteAllowed()) {
- mode = OVERWRITE_RIGHT;
- }
-
- switch (op) {
+ switch (expr->op()) {
case Token::COMMA:
- VisitForEffect(left);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- context()->HandleExpression(right);
- break;
-
+ return VisitComma(expr);
case Token::OR:
case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- // Load both operands.
- VisitForStackValue(left);
- VisitForAccumulatorValue(right);
-
- SetSourcePosition(expr->position());
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr, op, mode, left, right);
- } else {
- EmitBinaryOp(op, mode);
- }
- break;
- }
-
+ return VisitLogicalExpression(expr);
default:
- UNREACHABLE();
+ return VisitArithmeticExpression(expr);
}
}
-void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
- Label eval_right, done;
-
- context()->EmitLogicalLeft(expr, &eval_right, &done);
-
- PrepareForBailoutForId(expr->RightId(), NO_REGISTERS);
- __ bind(&eval_right);
+void FullCodeGenerator::VisitComma(BinaryOperation* expr) {
+ Comment cmnt(masm_, "[ Comma");
+ VisitForEffect(expr->left());
if (context()->IsTest()) ForwardBailoutToChild(expr);
- context()->HandleExpression(expr->right());
-
- __ bind(&done);
+ VisitInCurrentContext(expr->right());
}
-void FullCodeGenerator::EffectContext::EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const {
- if (expr->op() == Token::OR) {
- codegen()->VisitForControl(expr->left(), done, eval_right, eval_right);
- } else {
- ASSERT(expr->op() == Token::AND);
- codegen()->VisitForControl(expr->left(), eval_right, done, eval_right);
- }
-}
+void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
+ bool is_logical_and = expr->op() == Token::AND;
+ Comment cmnt(masm_, is_logical_and ? "[ Logical AND" : "[ Logical OR");
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+ int right_id = expr->RightId();
+ Label done;
+ if (context()->IsTest()) {
+ Label eval_right;
+ const TestContext* test = TestContext::cast(context());
+ if (is_logical_and) {
+ VisitForControl(left, &eval_right, test->false_label(), &eval_right);
+ } else {
+ VisitForControl(left, test->true_label(), &eval_right, &eval_right);
+ }
+ PrepareForBailoutForId(right_id, NO_REGISTERS);
+ __ bind(&eval_right);
+ ForwardBailoutToChild(expr);
+
+ } else if (context()->IsAccumulatorValue()) {
+ VisitForAccumulatorValue(left);
+ // We want the value in the accumulator for the test, and on the stack in
+ // case we need it.
+ __ push(result_register());
+ Label discard, restore;
+ PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ if (is_logical_and) {
+ DoTest(left, &discard, &restore, &restore);
+ } else {
+ DoTest(left, &restore, &discard, &restore);
+ }
+ __ bind(&restore);
+ __ pop(result_register());
+ __ jmp(&done);
+ __ bind(&discard);
+ __ Drop(1);
+ PrepareForBailoutForId(right_id, NO_REGISTERS);
+
+ } else if (context()->IsStackValue()) {
+ VisitForAccumulatorValue(left);
+ // We want the value in the accumulator for the test, and on the stack in
+ // case we need it.
+ __ push(result_register());
+ Label discard;
+ PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ if (is_logical_and) {
+ DoTest(left, &discard, &done, &discard);
+ } else {
+ DoTest(left, &done, &discard, &discard);
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ PrepareForBailoutForId(right_id, NO_REGISTERS);
-void FullCodeGenerator::AccumulatorValueContext::EmitLogicalLeft(
- BinaryOperation* expr,
- Label* eval_right,
- Label* done) const {
- HandleExpression(expr->left());
- // We want the value in the accumulator for the test, and on the stack in case
- // we need it.
- __ push(result_register());
- Label discard, restore;
- if (expr->op() == Token::OR) {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(&restore, &discard, &restore);
} else {
- ASSERT(expr->op() == Token::AND);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(&discard, &restore, &restore);
+ ASSERT(context()->IsEffect());
+ Label eval_right;
+ if (is_logical_and) {
+ VisitForControl(left, &eval_right, &done, &eval_right);
+ } else {
+ VisitForControl(left, &done, &eval_right, &eval_right);
+ }
+ PrepareForBailoutForId(right_id, NO_REGISTERS);
+ __ bind(&eval_right);
}
- __ bind(&restore);
- __ pop(result_register());
- __ jmp(done);
- __ bind(&discard);
- __ Drop(1);
+
+ VisitInCurrentContext(right);
+ __ bind(&done);
}
-void FullCodeGenerator::StackValueContext::EmitLogicalLeft(
- BinaryOperation* expr,
- Label* eval_right,
- Label* done) const {
- codegen()->VisitForAccumulatorValue(expr->left());
- // We want the value in the accumulator for the test, and on the stack in case
- // we need it.
- __ push(result_register());
- Label discard;
- if (expr->op() == Token::OR) {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(done, &discard, &discard);
- } else {
- ASSERT(expr->op() == Token::AND);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(&discard, done, &discard);
- }
- __ bind(&discard);
- __ Drop(1);
-}
+void FullCodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
+ Token::Value op = expr->op();
+ Comment cmnt(masm_, "[ ArithmeticExpression");
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+ OverwriteMode mode =
+ left->ResultOverwriteAllowed()
+ ? OVERWRITE_LEFT
+ : (right->ResultOverwriteAllowed() ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ VisitForStackValue(left);
+ VisitForAccumulatorValue(right);
-void FullCodeGenerator::TestContext::EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const {
- if (expr->op() == Token::OR) {
- codegen()->VisitForControl(expr->left(),
- true_label_, eval_right, eval_right);
+ SetSourcePosition(expr->position());
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr, op, mode, left, right);
} else {
- ASSERT(expr->op() == Token::AND);
- codegen()->VisitForControl(expr->left(),
- eval_right, false_label_, eval_right);
+ EmitBinaryOp(expr, op, mode);
}
}
@@ -853,46 +816,23 @@ void FullCodeGenerator::ForwardBailoutToChild(Expression* expr) {
}
-void FullCodeGenerator::EffectContext::HandleExpression(
- Expression* expr) const {
- codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::HandleExpression(
- Expression* expr) const {
- codegen()->HandleInNonTestContext(expr, TOS_REG);
-}
-
-
-void FullCodeGenerator::StackValueContext::HandleExpression(
- Expression* expr) const {
- codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::TestContext::HandleExpression(Expression* expr) const {
- codegen()->VisitInTestContext(expr);
-}
-
-
-void FullCodeGenerator::HandleInNonTestContext(Expression* expr, State state) {
- ASSERT(forward_bailout_pending_ == NULL);
- AstVisitor::Visit(expr);
- PrepareForBailout(expr, state);
- // Forwarding bailouts to children is a one shot operation. It
- // should have been processed at this point.
- ASSERT(forward_bailout_pending_ == NULL);
-}
-
-
-void FullCodeGenerator::VisitInTestContext(Expression* expr) {
- ForwardBailoutStack stack(expr, forward_bailout_pending_);
- ForwardBailoutStack* saved = forward_bailout_stack_;
- forward_bailout_pending_ = NULL;
- forward_bailout_stack_ = &stack;
- AstVisitor::Visit(expr);
- forward_bailout_stack_ = saved;
+void FullCodeGenerator::VisitInCurrentContext(Expression* expr) {
+ if (context()->IsTest()) {
+ ForwardBailoutStack stack(expr, forward_bailout_pending_);
+ ForwardBailoutStack* saved = forward_bailout_stack_;
+ forward_bailout_pending_ = NULL;
+ forward_bailout_stack_ = &stack;
+ Visit(expr);
+ forward_bailout_stack_ = saved;
+ } else {
+ ASSERT(forward_bailout_pending_ == NULL);
+ Visit(expr);
+ State state = context()->IsAccumulatorValue() ? TOS_REG : NO_REGISTERS;
+ PrepareForBailout(expr, state);
+ // Forwarding bailouts to children is a one shot operation. It should have
+ // been processed at this point.
+ ASSERT(forward_bailout_pending_ == NULL);
+ }
}
@@ -945,7 +885,7 @@ void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
}
__ bind(&done);
- PrepareForBailoutForId(stmt->id(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->IfId(), NO_REGISTERS);
}
@@ -1010,26 +950,20 @@ void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
}
-void FullCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
- Comment cmnt(masm_, "[ WithEnterStatement");
+void FullCodeGenerator::VisitEnterWithContextStatement(
+ EnterWithContextStatement* stmt) {
+ Comment cmnt(masm_, "[ EnterWithContextStatement");
SetStatementPosition(stmt);
VisitForStackValue(stmt->expression());
- if (stmt->is_catch_block()) {
- __ CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- __ CallRuntime(Runtime::kPushContext, 1);
- }
- // Both runtime calls return the new context in both the context and the
- // result registers.
-
- // Update local stack frame context field.
+ PushFunctionArgumentForContextAllocation();
+ __ CallRuntime(Runtime::kPushWithContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
}
-void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
- Comment cmnt(masm_, "[ WithExitStatement");
+void FullCodeGenerator::VisitExitContextStatement(ExitContextStatement* stmt) {
+ Comment cmnt(masm_, "[ ExitContextStatement");
SetStatementPosition(stmt);
// Pop context.
@@ -1170,15 +1104,14 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
__ Call(&try_handler_setup);
// Try handler code, exception in result register.
- // Store exception in local .catch variable before executing catch block.
- {
- // The catch variable is *always* a variable proxy for a local variable.
- Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(catch_var);
- Slot* variable_slot = catch_var->AsSlot();
- ASSERT_NOT_NULL(variable_slot);
- ASSERT_EQ(Slot::LOCAL, variable_slot->type());
- StoreToFrameField(SlotOffset(variable_slot), result_register());
+ // Extend the context before executing the catch block.
+ { Comment cmnt(masm_, "[ Extend catch context");
+ __ Push(stmt->name());
+ __ push(result_register());
+ PushFunctionArgumentForContextAllocation();
+ __ CallRuntime(Runtime::kPushCatchContext, 3);
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
}
Visit(stmt->catch_block());
@@ -1290,7 +1223,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
for_test->false_label(),
NULL);
} else {
- context()->HandleExpression(expr->then_expression());
+ VisitInCurrentContext(expr->then_expression());
__ jmp(&done);
}
@@ -1299,7 +1232,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
if (context()->IsTest()) ForwardBailoutToChild(expr);
SetExpressionPosition(expr->else_expression(),
expr->else_expression_position());
- context()->HandleExpression(expr->else_expression());
+ VisitInCurrentContext(expr->else_expression());
// If control flow falls through Visit, merge it with true case here.
if (!context()->IsTest()) {
__ bind(&done);
@@ -1334,18 +1267,6 @@ void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
}
-void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- VisitForStackValue(expr->key());
- VisitForStackValue(expr->value());
- // Create catch extension object.
- __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- context()->Plug(result_register());
-}
-
-
void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception());
@@ -1354,11 +1275,6 @@ void FullCodeGenerator::VisitThrow(Throw* expr) {
}
-void FullCodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
- UNREACHABLE();
-}
-
-
int FullCodeGenerator::TryFinally::Exit(int stack_depth) {
// The macros used here must preserve the result register.
__ Drop(stack_depth);
@@ -1376,6 +1292,26 @@ int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
}
+bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ Expression *expr;
+ Handle<String> check;
+ if (compare->IsLiteralCompareTypeof(&expr, &check)) {
+ EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through);
+ return true;
+ }
+
+ if (compare->IsLiteralCompareUndefined(&expr)) {
+ EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through);
+ return true;
+ }
+
+ return false;
+}
+
+
#undef __
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 5fb11b43d..7a421841b 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,7 @@
#include "v8.h"
+#include "allocation.h"
#include "ast.h"
#include "code-stubs.h"
#include "codegen.h"
@@ -112,6 +113,7 @@ class FullCodeGenerator: public AstVisitor {
class TryFinally;
class Finally;
class ForIn;
+ class TestContext;
class NestedStatement BASE_EMBEDDED {
public:
@@ -150,9 +152,11 @@ class FullCodeGenerator: public AstVisitor {
return stack_depth;
}
NestedStatement* outer() { return previous_; }
- protected:
+
+ protected:
MacroAssembler* masm() { return codegen_->masm(); }
- private:
+
+ private:
FullCodeGenerator* codegen_;
NestedStatement* previous_;
DISALLOW_COPY_AND_ASSIGN(NestedStatement);
@@ -295,14 +299,27 @@ class FullCodeGenerator: public AstVisitor {
// Helper function to convert a pure value into a test context. The value
// is expected on the stack or the accumulator, depending on the platform.
// See the platform-specific implementation for details.
- void DoTest(Label* if_true, Label* if_false, Label* fall_through);
+ void DoTest(Expression* condition,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+ void DoTest(const TestContext* context);
// Helper function to split control flow and avoid a branch to the
// fall-through label if it is set up.
+#ifdef V8_TARGET_ARCH_MIPS
+ void Split(Condition cc,
+ Register lhs,
+ const Operand& rhs,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+#else // All non-mips arch.
void Split(Condition cc,
Label* if_true,
Label* if_false,
Label* fall_through);
+#endif // V8_TARGET_ARCH_MIPS
void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
void Move(Register dst, Slot* source);
@@ -318,48 +335,55 @@ class FullCodeGenerator: public AstVisitor {
void VisitForEffect(Expression* expr) {
EffectContext context(this);
- HandleInNonTestContext(expr, NO_REGISTERS);
+ VisitInCurrentContext(expr);
}
void VisitForAccumulatorValue(Expression* expr) {
AccumulatorValueContext context(this);
- HandleInNonTestContext(expr, TOS_REG);
+ VisitInCurrentContext(expr);
}
void VisitForStackValue(Expression* expr) {
StackValueContext context(this);
- HandleInNonTestContext(expr, NO_REGISTERS);
+ VisitInCurrentContext(expr);
}
void VisitForControl(Expression* expr,
Label* if_true,
Label* if_false,
Label* fall_through) {
- TestContext context(this, if_true, if_false, fall_through);
- VisitInTestContext(expr);
- // Forwarding bailouts to children is a one shot operation. It
- // should have been processed at this point.
- ASSERT(forward_bailout_pending_ == NULL);
+ TestContext context(this, expr, if_true, if_false, fall_through);
+ VisitInCurrentContext(expr);
}
- void HandleInNonTestContext(Expression* expr, State state);
- void VisitInTestContext(Expression* expr);
-
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
- bool TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
+ bool TryLiteralCompare(CompareOperation* compare,
Label* if_true,
Label* if_false,
Label* fall_through);
+ // Platform-specific code for comparing the type of a value with
+ // a given literal string.
+ void EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+
+ // Platform-specific code for strict equality comparison with
+ // the undefined value.
+ void EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+
// Bailout support.
- void PrepareForBailout(AstNode* node, State state);
+ void PrepareForBailout(Expression* node, State state);
void PrepareForBailoutForId(int id, State state);
// Record a call's return site offset, used to rebuild the frame if the
@@ -395,9 +419,9 @@ class FullCodeGenerator: public AstVisitor {
void EmitReturnSequence();
// Platform-specific code sequences for calls
- void EmitCallWithStub(Call* expr);
+ void EmitCallWithStub(Call* expr, CallFunctionFlags flags);
void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
- void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode);
+ void EmitKeyedCallWithIC(Call* expr, Expression* key);
// Platform-specific code for inline runtime calls.
InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id);
@@ -445,12 +469,13 @@ class FullCodeGenerator: public AstVisitor {
// Apply the compound assignment operator. Expects the left operand on top
// of the stack and the right one in the accumulator.
- void EmitBinaryOp(Token::Value op,
+ void EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
OverwriteMode mode);
// Helper functions for generating inlined smi code for certain
// binary operations.
- void EmitInlineSmiBinaryOp(Expression* expr,
+ void EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode,
Expression* left,
@@ -501,9 +526,9 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
- bool is_strict() { return function()->strict_mode(); }
+ bool is_strict_mode() { return function()->strict_mode(); }
StrictModeFlag strict_mode_flag() {
- return is_strict() ? kStrictMode : kNonStrictMode;
+ return is_strict_mode() ? kStrictMode : kNonStrictMode;
}
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return info_->scope(); }
@@ -512,12 +537,16 @@ class FullCodeGenerator: public AstVisitor {
static Register context_register();
// Helper for calling an IC stub.
- void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
+ void EmitCallIC(Handle<Code> ic,
+ RelocInfo::Mode mode,
+ unsigned ast_id);
// Calling an IC stub with a patch site. Passing NULL for patch_site
// or non NULL patch_site which is not activated indicates no inlined smi code
// and emits a nop after the IC call.
- void EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site);
+ void EmitCallIC(Handle<Code> ic,
+ JumpPatchSite* patch_site,
+ unsigned ast_id);
// Set fields in the stack frame. Offsets are the frame pointer relative
// offsets defined in, e.g., StandardFrameConstants.
@@ -527,12 +556,21 @@ class FullCodeGenerator: public AstVisitor {
// in v8::internal::Context.
void LoadContextField(Register dst, int context_index);
+ // Push the function argument for the runtime functions PushWithContext
+ // and PushCatchContext.
+ void PushFunctionArgumentForContextAllocation();
+
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- // Handles the shortcutted logical binary operations in VisitBinaryOperation.
- void EmitLogicalOperation(BinaryOperation* expr);
+
+ void EmitUnaryOperation(UnaryOperation* expr, const char* comment);
+
+ void VisitComma(BinaryOperation* expr);
+ void VisitLogicalExpression(BinaryOperation* expr);
+ void VisitArithmeticExpression(BinaryOperation* expr);
+ void VisitInCurrentContext(Expression* expr);
void VisitForTypeofValue(Expression* expr);
@@ -553,6 +591,8 @@ class FullCodeGenerator: public AstVisitor {
codegen_->set_new_context(old_);
}
+ Isolate* isolate() const { return codegen_->isolate(); }
+
// Convert constant control flow (true or false) to the result expected for
// this expression context.
virtual void Plug(bool flag) const = 0;
@@ -578,11 +618,6 @@ class FullCodeGenerator: public AstVisitor {
// context.
virtual void DropAndPlug(int count, Register reg) const = 0;
- // For shortcutting operations || and &&.
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const = 0;
-
// Set up branch labels for a test expression. The three Label** parameters
// are output parameters.
virtual void PrepareTest(Label* materialize_true,
@@ -591,12 +626,14 @@ class FullCodeGenerator: public AstVisitor {
Label** if_false,
Label** fall_through) const = 0;
- virtual void HandleExpression(Expression* expr) const = 0;
-
// Returns true if we are evaluating only for side effects (ie if the result
// will be discarded).
virtual bool IsEffect() const { return false; }
+ // Returns true if we are evaluating for the value (in accu/on stack).
+ virtual bool IsAccumulatorValue() const { return false; }
+ virtual bool IsStackValue() const { return false; }
+
// Returns true if we are branching on the value rather than materializing
// it. Only used for asserts.
virtual bool IsTest() const { return false; }
@@ -624,15 +661,12 @@ class FullCodeGenerator: public AstVisitor {
virtual void Plug(Heap::RootListIndex) const;
virtual void PlugTOS() const;
virtual void DropAndPlug(int count, Register reg) const;
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const;
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const;
- virtual void HandleExpression(Expression* expr) const;
+ virtual bool IsAccumulatorValue() const { return true; }
};
class StackValueContext : public ExpressionContext {
@@ -648,24 +682,23 @@ class FullCodeGenerator: public AstVisitor {
virtual void Plug(Heap::RootListIndex) const;
virtual void PlugTOS() const;
virtual void DropAndPlug(int count, Register reg) const;
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const;
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const;
- virtual void HandleExpression(Expression* expr) const;
+ virtual bool IsStackValue() const { return true; }
};
class TestContext : public ExpressionContext {
public:
- explicit TestContext(FullCodeGenerator* codegen,
- Label* true_label,
- Label* false_label,
- Label* fall_through)
+ TestContext(FullCodeGenerator* codegen,
+ Expression* condition,
+ Label* true_label,
+ Label* false_label,
+ Label* fall_through)
: ExpressionContext(codegen),
+ condition_(condition),
true_label_(true_label),
false_label_(false_label),
fall_through_(fall_through) { }
@@ -675,6 +708,7 @@ class FullCodeGenerator: public AstVisitor {
return reinterpret_cast<const TestContext*>(context);
}
+ Expression* condition() const { return condition_; }
Label* true_label() const { return true_label_; }
Label* false_label() const { return false_label_; }
Label* fall_through() const { return fall_through_; }
@@ -687,18 +721,15 @@ class FullCodeGenerator: public AstVisitor {
virtual void Plug(Heap::RootListIndex) const;
virtual void PlugTOS() const;
virtual void DropAndPlug(int count, Register reg) const;
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const;
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const;
- virtual void HandleExpression(Expression* expr) const;
virtual bool IsTest() const { return true; }
private:
+ Expression* condition_;
Label* true_label_;
Label* false_label_;
Label* fall_through_;
@@ -717,15 +748,11 @@ class FullCodeGenerator: public AstVisitor {
virtual void Plug(Heap::RootListIndex) const;
virtual void PlugTOS() const;
virtual void DropAndPlug(int count, Register reg) const;
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const;
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const;
- virtual void HandleExpression(Expression* expr) const;
virtual bool IsEffect() const { return true; }
};
diff --git a/deps/v8/src/func-name-inferrer.cc b/deps/v8/src/func-name-inferrer.cc
index f12d026bd..239358dfa 100644
--- a/deps/v8/src/func-name-inferrer.cc
+++ b/deps/v8/src/func-name-inferrer.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,51 +29,67 @@
#include "ast.h"
#include "func-name-inferrer.h"
+#include "list-inl.h"
namespace v8 {
namespace internal {
+FuncNameInferrer::FuncNameInferrer(Isolate* isolate)
+ : isolate_(isolate),
+ entries_stack_(10),
+ names_stack_(5),
+ funcs_to_infer_(4) {
+}
+
void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
// Enclosing name is a name of a constructor function. To check
// that it is really a constructor, we check that it is not empty
// and starts with a capital letter.
- if (name->length() > 0 && Runtime::IsUpperCaseChar(name->Get(0))) {
- names_stack_.Add(name);
+ if (name->length() > 0 && Runtime::IsUpperCaseChar(
+ isolate()->runtime_state(), name->Get(0))) {
+ names_stack_.Add(Name(name, kEnclosingConstructorName));
}
}
void FuncNameInferrer::PushLiteralName(Handle<String> name) {
- if (IsOpen() && !Heap::prototype_symbol()->Equals(*name)) {
- names_stack_.Add(name);
+ if (IsOpen() && !isolate()->heap()->prototype_symbol()->Equals(*name)) {
+ names_stack_.Add(Name(name, kLiteralName));
}
}
void FuncNameInferrer::PushVariableName(Handle<String> name) {
- if (IsOpen() && !Heap::result_symbol()->Equals(*name)) {
- names_stack_.Add(name);
+ if (IsOpen() && !isolate()->heap()->result_symbol()->Equals(*name)) {
+ names_stack_.Add(Name(name, kVariableName));
}
}
Handle<String> FuncNameInferrer::MakeNameFromStack() {
- if (names_stack_.is_empty()) {
- return Factory::empty_string();
- } else {
- return MakeNameFromStackHelper(1, names_stack_.at(0));
- }
+ return MakeNameFromStackHelper(0, isolate()->factory()->empty_string());
}
Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
Handle<String> prev) {
- if (pos >= names_stack_.length()) {
- return prev;
+ if (pos >= names_stack_.length()) return prev;
+ if (pos < names_stack_.length() - 1 &&
+ names_stack_.at(pos).type == kVariableName &&
+ names_stack_.at(pos + 1).type == kVariableName) {
+ // Skip consecutive variable declarations.
+ return MakeNameFromStackHelper(pos + 1, prev);
} else {
- Handle<String> curr = Factory::NewConsString(dot_, names_stack_.at(pos));
- return MakeNameFromStackHelper(pos + 1, Factory::NewConsString(prev, curr));
+ if (prev->length() > 0) {
+ Factory* factory = isolate()->factory();
+ Handle<String> curr = factory->NewConsString(
+ factory->dot_symbol(), names_stack_.at(pos).name);
+ return MakeNameFromStackHelper(pos + 1,
+ factory->NewConsString(prev, curr));
+ } else {
+ return MakeNameFromStackHelper(pos + 1, names_stack_.at(pos).name);
+ }
}
}
diff --git a/deps/v8/src/func-name-inferrer.h b/deps/v8/src/func-name-inferrer.h
index a35034ecb..bec3a5cf9 100644
--- a/deps/v8/src/func-name-inferrer.h
+++ b/deps/v8/src/func-name-inferrer.h
@@ -31,6 +31,8 @@
namespace v8 {
namespace internal {
+class Isolate;
+
// FuncNameInferrer is a stateful class that is used to perform name
// inference for anonymous functions during static analysis of source code.
// Inference is performed in cases when an anonymous function is assigned
@@ -43,12 +45,7 @@ namespace internal {
// a name.
class FuncNameInferrer : public ZoneObject {
public:
- FuncNameInferrer()
- : entries_stack_(10),
- names_stack_(5),
- funcs_to_infer_(4),
- dot_(Factory::NewStringFromAscii(CStrVector("."))) {
- }
+ explicit FuncNameInferrer(Isolate* isolate);
// Returns whether we have entered name collection state.
bool IsOpen() const { return !entries_stack_.is_empty(); }
@@ -81,13 +78,26 @@ class FuncNameInferrer : public ZoneObject {
}
}
- // Infers a function name and leaves names collection state.
+ // Leaves names collection state.
void Leave() {
ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast());
}
private:
+ enum NameType {
+ kEnclosingConstructorName,
+ kLiteralName,
+ kVariableName
+ };
+ struct Name {
+ Name(Handle<String> name, NameType type) : name(name), type(type) { }
+ Handle<String> name;
+ NameType type;
+ };
+
+ Isolate* isolate() { return isolate_; }
+
// Constructs a full name in dotted notation from gathered names.
Handle<String> MakeNameFromStack();
@@ -97,10 +107,10 @@ class FuncNameInferrer : public ZoneObject {
// Performs name inferring for added functions.
void InferFunctionsNames();
+ Isolate* isolate_;
ZoneList<int> entries_stack_;
- ZoneList<Handle<String> > names_stack_;
+ ZoneList<Name> names_stack_;
ZoneList<FunctionLiteral*> funcs_to_infer_;
- Handle<String> dot_;
DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
};
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 5136deddb..b4992a7f5 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef ENABLE_GDB_JIT_INTERFACE
+#include "v8.h"
#include "gdb-jit.h"
#include "bootstrapper.h"
@@ -643,7 +644,6 @@ class ELFSymbolTable : public ELFSection {
class CodeDescription BASE_EMBEDDED {
public:
-
#ifdef V8_TARGET_ARCH_X64
enum StackState {
POST_RBP_PUSH,
@@ -1326,7 +1326,7 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
- ZoneScope zone_scope(DELETE_ON_EXIT);
+ ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
ELF elf;
Writer w(&elf);
@@ -1444,11 +1444,16 @@ static void AddUnwindInfo(CodeDescription *desc) {
}
+Mutex* GDBJITInterface::mutex_ = OS::CreateMutex();
+
+
void GDBJITInterface::AddCode(const char* name,
Code* code,
GDBJITInterface::CodeTag tag,
Script* script) {
if (!FLAG_gdbjit) return;
+
+ ScopedLock lock(mutex_);
AssertNoAllocation no_gc;
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
@@ -1517,6 +1522,7 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) {
void GDBJITInterface::RemoveCode(Code* code) {
if (!FLAG_gdbjit) return;
+ ScopedLock lock(mutex_);
HashMap::Entry* e = GetEntries()->Lookup(code,
HashForCodeObject(code),
false);
@@ -1536,6 +1542,7 @@ void GDBJITInterface::RemoveCode(Code* code) {
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
+ ScopedLock lock(mutex_);
ASSERT(!IsLineInfoTagged(line_info));
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
ASSERT(e->value == NULL);
diff --git a/deps/v8/src/gdb-jit.h b/deps/v8/src/gdb-jit.h
index d46fec63a..0c80fb65b 100644
--- a/deps/v8/src/gdb-jit.h
+++ b/deps/v8/src/gdb-jit.h
@@ -28,6 +28,8 @@
#ifndef V8_GDB_JIT_H_
#define V8_GDB_JIT_H_
+#include "allocation.h"
+
//
// Basic implementation of GDB JIT Interface client.
// GBD JIT Interface is supported in GDB 7.0 and above.
@@ -126,6 +128,9 @@ class GDBJITInterface: public AllStatic {
static void RemoveCode(Code* code);
static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
+
+ private:
+ static Mutex* mutex_;
};
#define GDBJIT(action) GDBJITInterface::action
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 18cdc5a3a..87066faea 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -35,79 +35,160 @@
namespace v8 {
namespace internal {
-class GlobalHandles::Node : public Malloced {
- public:
- void Initialize(Object* object) {
- // Set the initial value of the handle.
- object_ = object;
- state_ = NORMAL;
- parameter_or_next_free_.parameter = NULL;
- callback_ = NULL;
- }
+ObjectGroup::~ObjectGroup() {
+ if (info_ != NULL) info_->Dispose();
+}
- Node() {
- state_ = DESTROYED;
- }
- explicit Node(Object* object) {
- Initialize(object);
- // Initialize link structure.
- next_ = NULL;
+class GlobalHandles::Node {
+ public:
+ // State transition diagram:
+ // FREE -> NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, FREE }
+ enum State {
+ FREE,
+ NORMAL, // Normal global handle.
+ WEAK, // Flagged as weak but not yet finalized.
+ PENDING, // Has been recognized as only reachable by weak handles.
+ NEAR_DEATH // Callback has informed the handle is near death.
+ };
+
+ // Maps handle location (slot) to the containing node.
+ static Node* FromLocation(Object** location) {
+ ASSERT(OFFSET_OF(Node, object_) == 0);
+ return reinterpret_cast<Node*>(location);
}
- ~Node() {
- if (state_ != DESTROYED) Destroy();
+ Node() {}
+
#ifdef DEBUG
+ ~Node() {
+ // TODO(1428): if it's a weak handle we should have invoked its callback.
// Zap the values for eager trapping.
object_ = NULL;
- next_ = NULL;
+ class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
+ index_ = 0;
+ independent_ = false;
+ in_new_space_list_ = false;
parameter_or_next_free_.next_free = NULL;
+ callback_ = NULL;
+ }
#endif
+
+ void Initialize(int index, Node** first_free) {
+ index_ = static_cast<uint8_t>(index);
+ ASSERT(static_cast<int>(index_) == index);
+ state_ = FREE;
+ in_new_space_list_ = false;
+ parameter_or_next_free_.next_free = *first_free;
+ *first_free = this;
}
- void Destroy() {
- if (state_ == WEAK || IsNearDeath()) {
- GlobalHandles::number_of_weak_handles_--;
+ void Acquire(Object* object, GlobalHandles* global_handles) {
+ ASSERT(state_ == FREE);
+ object_ = object;
+ class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
+ independent_ = false;
+ state_ = NORMAL;
+ parameter_or_next_free_.parameter = NULL;
+ callback_ = NULL;
+ IncreaseBlockUses(global_handles);
+ }
+
+ void Release(GlobalHandles* global_handles) {
+ ASSERT(state_ != FREE);
+ if (IsWeakRetainer()) {
+ global_handles->number_of_weak_handles_--;
if (object_->IsJSGlobalObject()) {
- GlobalHandles::number_of_global_object_weak_handles_--;
+ global_handles->number_of_global_object_weak_handles_--;
}
}
- state_ = DESTROYED;
+ state_ = FREE;
+ parameter_or_next_free_.next_free = global_handles->first_free_;
+ global_handles->first_free_ = this;
+ DecreaseBlockUses(global_handles);
+ }
+
+ // Object slot accessors.
+ Object* object() const { return object_; }
+ Object** location() { return &object_; }
+ Handle<Object> handle() { return Handle<Object>(location()); }
+
+ // Wrapper class ID accessors.
+ bool has_wrapper_class_id() const {
+ return class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId;
+ }
+ uint16_t wrapper_class_id() const { return class_id_; }
+ void set_wrapper_class_id(uint16_t class_id) {
+ class_id_ = class_id;
+ }
+
+ // State accessors.
+
+ State state() const { return state_; }
+
+ bool IsNearDeath() const {
+ // Check for PENDING to ensure correct answer when processing callbacks.
+ return state_ == PENDING || state_ == NEAR_DEATH;
}
- // Accessors for next_.
- Node* next() { return next_; }
- void set_next(Node* value) { next_ = value; }
- Node** next_addr() { return &next_; }
+ bool IsWeak() const { return state_ == WEAK; }
+
+ bool IsRetainer() const { return state_ != FREE; }
+
+ bool IsStrongRetainer() const { return state_ == NORMAL; }
+
+ bool IsWeakRetainer() const {
+ return state_ == WEAK || state_ == PENDING || state_ == NEAR_DEATH;
+ }
+
+ void MarkPending() {
+ ASSERT(state_ == WEAK);
+ state_ = PENDING;
+ }
+
+ // Independent flag accessors.
+ void MarkIndependent() {
+ ASSERT(state_ != FREE);
+ independent_ = true;
+ }
+ bool is_independent() const { return independent_; }
+
+ // In-new-space-list flag accessors.
+ void set_in_new_space_list(bool v) { in_new_space_list_ = v; }
+ bool is_in_new_space_list() const { return in_new_space_list_; }
+
+ // Callback accessor.
+ WeakReferenceCallback callback() { return callback_; }
+
+ // Callback parameter accessors.
+ void set_parameter(void* parameter) {
+ ASSERT(state_ != FREE);
+ parameter_or_next_free_.parameter = parameter;
+ }
+ void* parameter() const {
+ ASSERT(state_ != FREE);
+ return parameter_or_next_free_.parameter;
+ }
// Accessors for next free node in the free list.
Node* next_free() {
- ASSERT(state_ == DESTROYED);
+ ASSERT(state_ == FREE);
return parameter_or_next_free_.next_free;
}
void set_next_free(Node* value) {
- ASSERT(state_ == DESTROYED);
+ ASSERT(state_ == FREE);
parameter_or_next_free_.next_free = value;
}
- // Returns a link from the handle.
- static Node* FromLocation(Object** location) {
- ASSERT(OFFSET_OF(Node, object_) == 0);
- return reinterpret_cast<Node*>(location);
- }
-
- // Returns the handle.
- Handle<Object> handle() { return Handle<Object>(&object_); }
-
- // Make this handle weak.
- void MakeWeak(void* parameter, WeakReferenceCallback callback) {
- LOG(HandleEvent("GlobalHandle::MakeWeak", handle().location()));
- ASSERT(state_ != DESTROYED);
- if (state_ != WEAK && !IsNearDeath()) {
- GlobalHandles::number_of_weak_handles_++;
+ void MakeWeak(GlobalHandles* global_handles,
+ void* parameter,
+ WeakReferenceCallback callback) {
+ ASSERT(state_ != FREE);
+ if (!IsWeakRetainer()) {
+ global_handles->number_of_weak_handles_++;
if (object_->IsJSGlobalObject()) {
- GlobalHandles::number_of_global_object_weak_handles_++;
+ global_handles->number_of_global_object_weak_handles_++;
}
}
state_ = WEAK;
@@ -115,47 +196,24 @@ class GlobalHandles::Node : public Malloced {
callback_ = callback;
}
- void ClearWeakness() {
- LOG(HandleEvent("GlobalHandle::ClearWeakness", handle().location()));
- ASSERT(state_ != DESTROYED);
- if (state_ == WEAK || IsNearDeath()) {
- GlobalHandles::number_of_weak_handles_--;
+ void ClearWeakness(GlobalHandles* global_handles) {
+ ASSERT(state_ != FREE);
+ if (IsWeakRetainer()) {
+ global_handles->number_of_weak_handles_--;
if (object_->IsJSGlobalObject()) {
- GlobalHandles::number_of_global_object_weak_handles_--;
+ global_handles->number_of_global_object_weak_handles_--;
}
}
state_ = NORMAL;
set_parameter(NULL);
}
- bool IsNearDeath() {
- // Check for PENDING to ensure correct answer when processing callbacks.
- return state_ == PENDING || state_ == NEAR_DEATH;
- }
-
- bool IsWeak() {
- return state_ == WEAK;
- }
-
- // Returns the id for this weak handle.
- void set_parameter(void* parameter) {
- ASSERT(state_ != DESTROYED);
- parameter_or_next_free_.parameter = parameter;
- }
- void* parameter() {
- ASSERT(state_ != DESTROYED);
- return parameter_or_next_free_.parameter;
- }
-
- // Returns the callback for this weak handle.
- WeakReferenceCallback callback() { return callback_; }
-
- bool PostGarbageCollectionProcessing() {
+ bool PostGarbageCollectionProcessing(Isolate* isolate,
+ GlobalHandles* global_handles) {
if (state_ != Node::PENDING) return false;
- LOG(HandleEvent("GlobalHandle::Processing", handle().location()));
WeakReferenceCallback func = callback();
if (func == NULL) {
- Destroy();
+ Release(global_handles);
return false;
}
void* par = parameter();
@@ -164,13 +222,6 @@ class GlobalHandles::Node : public Malloced {
v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
{
- // Forbid reuse of destroyed nodes as they might be already deallocated.
- // It's fine though to reuse nodes that were destroyed in weak callback
- // as those cannot be deallocated until we are back from the callback.
- set_first_free(NULL);
- if (first_deallocated()) {
- first_deallocated()->set_next(head());
- }
// Check that we are not passing a finalized external string to
// the callback.
ASSERT(!object_->IsExternalAsciiString() ||
@@ -178,7 +229,7 @@ class GlobalHandles::Node : public Malloced {
ASSERT(!object_->IsExternalTwoByteString() ||
ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
func(object, par);
}
// Absense of explicit cleanup or revival of weak handle
@@ -187,143 +238,210 @@ class GlobalHandles::Node : public Malloced {
return true;
}
- // Place the handle address first to avoid offset computation.
- Object* object_; // Storage for object pointer.
+ private:
+ inline NodeBlock* FindBlock();
+ inline void IncreaseBlockUses(GlobalHandles* global_handles);
+ inline void DecreaseBlockUses(GlobalHandles* global_handles);
- // Transition diagram:
- // NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, DESTROYED }
- enum State {
- NORMAL, // Normal global handle.
- WEAK, // Flagged as weak but not yet finalized.
- PENDING, // Has been recognized as only reachable by weak handles.
- NEAR_DEATH, // Callback has informed the handle is near death.
- DESTROYED
- };
- State state_;
+ // Storage for object pointer.
+ // Placed first to avoid offset computation.
+ Object* object_;
+
+ // Next word stores class_id, index, state, and independent.
+ // Note: the most aligned fields should go first.
+
+ // Wrapper class ID.
+ uint16_t class_id_;
+
+ // Index in the containing handle block.
+ uint8_t index_;
+
+ // Need one more bit for MSVC as it treats enums as signed.
+ State state_ : 4;
+
+ bool independent_ : 1;
+ bool in_new_space_list_ : 1;
- private:
// Handle specific callback.
WeakReferenceCallback callback_;
- // Provided data for callback. In DESTROYED state, this is used for
+
+ // Provided data for callback. In FREE state, this is used for
// the free list link.
union {
void* parameter;
Node* next_free;
} parameter_or_next_free_;
- // Linkage for the list.
- Node* next_;
+ DISALLOW_COPY_AND_ASSIGN(Node);
+};
+
+class GlobalHandles::NodeBlock {
public:
- TRACK_MEMORY("GlobalHandles::Node")
-};
+ static const int kSize = 256;
+ explicit NodeBlock(NodeBlock* next)
+ : next_(next), used_nodes_(0), next_used_(NULL), prev_used_(NULL) {}
-class GlobalHandles::Pool BASE_EMBEDDED {
- public:
- Pool() {
- current_ = new Chunk();
- current_->previous = NULL;
- next_ = current_->nodes;
- limit_ = current_->nodes + kNodesPerChunk;
+ void PutNodesOnFreeList(Node** first_free) {
+ for (int i = kSize - 1; i >= 0; --i) {
+ nodes_[i].Initialize(i, first_free);
}
+ }
- ~Pool() {
- if (current_ != NULL) {
- Release();
- }
+ Node* node_at(int index) {
+ ASSERT(0 <= index && index < kSize);
+ return &nodes_[index];
+ }
+
+ void IncreaseUses(GlobalHandles* global_handles) {
+ ASSERT(used_nodes_ < kSize);
+ if (used_nodes_++ == 0) {
+ NodeBlock* old_first = global_handles->first_used_block_;
+ global_handles->first_used_block_ = this;
+ next_used_ = old_first;
+ prev_used_ = NULL;
+ if (old_first == NULL) return;
+ old_first->prev_used_ = this;
}
+ }
- Node* Allocate() {
- if (next_ < limit_) {
- return next_++;
+ void DecreaseUses(GlobalHandles* global_handles) {
+ ASSERT(used_nodes_ > 0);
+ if (--used_nodes_ == 0) {
+ if (next_used_ != NULL) next_used_->prev_used_ = prev_used_;
+ if (prev_used_ != NULL) prev_used_->next_used_ = next_used_;
+ if (this == global_handles->first_used_block_) {
+ global_handles->first_used_block_ = next_used_;
}
- return SlowAllocate();
}
+ }
- void Release() {
- Chunk* current = current_;
- ASSERT(current != NULL); // At least a single block must by allocated
- do {
- Chunk* previous = current->previous;
- delete current;
- current = previous;
- } while (current != NULL);
- current_ = NULL;
- next_ = limit_ = NULL;
- }
+ // Next block in the list of all blocks.
+ NodeBlock* next() const { return next_; }
- private:
- static const int kNodesPerChunk = (1 << 12) - 1;
- struct Chunk : public Malloced {
- Chunk* previous;
- Node nodes[kNodesPerChunk];
- };
-
- Node* SlowAllocate() {
- Chunk* chunk = new Chunk();
- chunk->previous = current_;
- current_ = chunk;
-
- Node* new_nodes = current_->nodes;
- next_ = new_nodes + 1;
- limit_ = new_nodes + kNodesPerChunk;
- return new_nodes;
- }
+ // Next/previous block in the list of blocks with used nodes.
+ NodeBlock* next_used() const { return next_used_; }
+ NodeBlock* prev_used() const { return prev_used_; }
+
+ private:
+ Node nodes_[kSize];
+ NodeBlock* const next_;
+ int used_nodes_;
+ NodeBlock* next_used_;
+ NodeBlock* prev_used_;
+};
+
+
+GlobalHandles::NodeBlock* GlobalHandles::Node::FindBlock() {
+ intptr_t ptr = reinterpret_cast<intptr_t>(this);
+ ptr = ptr - index_ * sizeof(Node);
+ NodeBlock* block = reinterpret_cast<NodeBlock*>(ptr);
+ ASSERT(block->node_at(index_) == this);
+ return block;
+}
+
+
+void GlobalHandles::Node::IncreaseBlockUses(GlobalHandles* global_handles) {
+ FindBlock()->IncreaseUses(global_handles);
+}
+
+
+void GlobalHandles::Node::DecreaseBlockUses(GlobalHandles* global_handles) {
+ FindBlock()->DecreaseUses(global_handles);
+}
+
+
+class GlobalHandles::NodeIterator {
+ public:
+ explicit NodeIterator(GlobalHandles* global_handles)
+ : block_(global_handles->first_used_block_),
+ index_(0) {}
- Chunk* current_;
- Node* next_;
- Node* limit_;
+ bool done() const { return block_ == NULL; }
+
+ Node* node() const {
+ ASSERT(!done());
+ return block_->node_at(index_);
+ }
+
+ void Advance() {
+ ASSERT(!done());
+ if (++index_ < NodeBlock::kSize) return;
+ index_ = 0;
+ block_ = block_->next_used();
+ }
+
+ private:
+ NodeBlock* block_;
+ int index_;
+
+ DISALLOW_COPY_AND_ASSIGN(NodeIterator);
};
-static GlobalHandles::Pool pool_;
+GlobalHandles::GlobalHandles(Isolate* isolate)
+ : isolate_(isolate),
+ number_of_weak_handles_(0),
+ number_of_global_object_weak_handles_(0),
+ first_block_(NULL),
+ first_used_block_(NULL),
+ first_free_(NULL),
+ post_gc_processing_count_(0) {}
+
+
+GlobalHandles::~GlobalHandles() {
+ NodeBlock* block = first_block_;
+ while (block != NULL) {
+ NodeBlock* tmp = block->next();
+ delete block;
+ block = tmp;
+ }
+ first_block_ = NULL;
+}
Handle<Object> GlobalHandles::Create(Object* value) {
- Counters::global_handles.Increment();
- Node* result;
- if (first_free()) {
- // Take the first node in the free list.
- result = first_free();
- set_first_free(result->next_free());
- } else if (first_deallocated()) {
- // Next try deallocated list
- result = first_deallocated();
- set_first_deallocated(result->next_free());
- ASSERT(result->next() == head());
- set_head(result);
- } else {
- // Allocate a new node.
- result = pool_.Allocate();
- result->set_next(head());
- set_head(result);
+ isolate_->counters()->global_handles()->Increment();
+ if (first_free_ == NULL) {
+ first_block_ = new NodeBlock(first_block_);
+ first_block_->PutNodesOnFreeList(&first_free_);
+ }
+ ASSERT(first_free_ != NULL);
+ // Take the first node in the free list.
+ Node* result = first_free_;
+ first_free_ = result->next_free();
+ result->Acquire(value, this);
+ if (isolate_->heap()->InNewSpace(value) &&
+ !result->is_in_new_space_list()) {
+ new_space_nodes_.Add(result);
+ result->set_in_new_space_list(true);
}
- result->Initialize(value);
return result->handle();
}
void GlobalHandles::Destroy(Object** location) {
- Counters::global_handles.Decrement();
+ isolate_->counters()->global_handles()->Decrement();
if (location == NULL) return;
- Node* node = Node::FromLocation(location);
- node->Destroy();
- // Link the destroyed.
- node->set_next_free(first_free());
- set_first_free(node);
+ Node::FromLocation(location)->Release(this);
}
void GlobalHandles::MakeWeak(Object** location, void* parameter,
WeakReferenceCallback callback) {
ASSERT(callback != NULL);
- Node::FromLocation(location)->MakeWeak(parameter, callback);
+ Node::FromLocation(location)->MakeWeak(this, parameter, callback);
}
void GlobalHandles::ClearWeakness(Object** location) {
- Node::FromLocation(location)->ClearWeakness();
+ Node::FromLocation(location)->ClearWeakness(this);
+}
+
+
+void GlobalHandles::MarkIndependent(Object** location) {
+ Node::FromLocation(location)->MarkIndependent();
}
@@ -337,136 +455,174 @@ bool GlobalHandles::IsWeak(Object** location) {
}
+void GlobalHandles::SetWrapperClassId(Object** location, uint16_t class_id) {
+ Node::FromLocation(location)->set_wrapper_class_id(class_id);
+}
+
+
void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
- // Traversal of GC roots in the global handle list that are marked as
- // WEAK or PENDING.
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ == Node::WEAK
- || current->state_ == Node::PENDING
- || current->state_ == Node::NEAR_DEATH) {
- v->VisitPointer(&current->object_);
- }
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsWeakRetainer()) v->VisitPointer(it.node()->location());
}
}
void GlobalHandles::IterateWeakRoots(WeakReferenceGuest f,
WeakReferenceCallback callback) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->IsWeak() && current->callback() == callback) {
- f(current->object_, current->parameter());
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsWeak() && it.node()->callback() == callback) {
+ f(it.node()->object(), it.node()->parameter());
}
}
}
void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ == Node::WEAK) {
- if (f(&current->object_)) {
- current->state_ = Node::PENDING;
- LOG(HandleEvent("GlobalHandle::Pending", current->handle().location()));
- }
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsWeak() && f(it.node()->location())) {
+ it.node()->MarkPending();
+ }
+ }
+}
+
+
+void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ if (node->IsStrongRetainer() ||
+ (node->IsWeakRetainer() && !node->is_independent())) {
+ v->VisitPointer(node->location());
}
}
}
-int post_gc_processing_count = 0;
+void GlobalHandles::IdentifyNewSpaceWeakIndependentHandles(
+ WeakSlotCallbackWithHeap f) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ ASSERT(node->is_in_new_space_list());
+ if (node->is_independent() && node->IsWeak() &&
+ f(isolate_->heap(), node->location())) {
+ node->MarkPending();
+ }
+ }
+}
+
+
+void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ ASSERT(node->is_in_new_space_list());
+ if (node->is_independent() && node->IsWeakRetainer()) {
+ v->VisitPointer(node->location());
+ }
+ }
+}
+
-bool GlobalHandles::PostGarbageCollectionProcessing() {
+bool GlobalHandles::PostGarbageCollectionProcessing(
+ GarbageCollector collector) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
- // At the same time deallocate all DESTROYED nodes.
- ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
- const int initial_post_gc_processing_count = ++post_gc_processing_count;
+ ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
+ const int initial_post_gc_processing_count = ++post_gc_processing_count_;
bool next_gc_likely_to_collect_more = false;
- Node** p = &head_;
- while (*p != NULL) {
- if ((*p)->PostGarbageCollectionProcessing()) {
- if (initial_post_gc_processing_count != post_gc_processing_count) {
- // Weak callback triggered another GC and another round of
- // PostGarbageCollection processing. The current node might
- // have been deleted in that round, so we need to bail out (or
- // restart the processing).
- break;
+ if (collector == SCAVENGER) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ ASSERT(node->is_in_new_space_list());
+ // Skip dependent handles. Their weak callbacks might expect to be
+ // called between two global garbage collection callbacks which
+ // are not called for minor collections.
+ if (!node->is_independent()) continue;
+ if (node->PostGarbageCollectionProcessing(isolate_, this)) {
+ if (initial_post_gc_processing_count != post_gc_processing_count_) {
+ // Weak callback triggered another GC and another round of
+ // PostGarbageCollection processing. The current node might
+ // have been deleted in that round, so we need to bail out (or
+ // restart the processing).
+ return next_gc_likely_to_collect_more;
+ }
+ }
+ if (!node->IsRetainer()) {
+ next_gc_likely_to_collect_more = true;
}
}
- if ((*p)->state_ == Node::DESTROYED) {
- // Delete the link.
- Node* node = *p;
- *p = node->next(); // Update the link.
- if (first_deallocated()) {
- first_deallocated()->set_next(node);
+ } else {
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) {
+ if (initial_post_gc_processing_count != post_gc_processing_count_) {
+ // See the comment above.
+ return next_gc_likely_to_collect_more;
+ }
+ }
+ if (!it.node()->IsRetainer()) {
+ next_gc_likely_to_collect_more = true;
}
- node->set_next_free(first_deallocated());
- set_first_deallocated(node);
- next_gc_likely_to_collect_more = true;
- } else {
- p = (*p)->next_addr();
}
}
- set_first_free(NULL);
- if (first_deallocated()) {
- first_deallocated()->set_next(head());
+ // Update the list of new space nodes.
+ int last = 0;
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ ASSERT(node->is_in_new_space_list());
+ if (node->IsRetainer() && isolate_->heap()->InNewSpace(node->object())) {
+ new_space_nodes_[last++] = node;
+ } else {
+ node->set_in_new_space_list(false);
+ }
}
-
+ new_space_nodes_.Rewind(last);
return next_gc_likely_to_collect_more;
}
void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) {
- // Traversal of global handles marked as NORMAL.
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ == Node::NORMAL) {
- v->VisitPointer(&current->object_);
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsStrongRetainer()) {
+ v->VisitPointer(it.node()->location());
}
}
}
void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ != Node::DESTROYED) {
- v->VisitPointer(&current->object_);
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsRetainer()) {
+ v->VisitPointer(it.node()->location());
}
}
}
-void GlobalHandles::TearDown() {
- // Reset all the lists.
- set_head(NULL);
- set_first_free(NULL);
- set_first_deallocated(NULL);
- pool_.Release();
+void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->has_wrapper_class_id() && it.node()->IsRetainer()) {
+ v->VisitEmbedderReference(it.node()->location(),
+ it.node()->wrapper_class_id());
+ }
+ }
}
-int GlobalHandles::number_of_weak_handles_ = 0;
-int GlobalHandles::number_of_global_object_weak_handles_ = 0;
-
-GlobalHandles::Node* GlobalHandles::head_ = NULL;
-GlobalHandles::Node* GlobalHandles::first_free_ = NULL;
-GlobalHandles::Node* GlobalHandles::first_deallocated_ = NULL;
-
void GlobalHandles::RecordStats(HeapStats* stats) {
*stats->global_handle_count = 0;
*stats->weak_global_handle_count = 0;
*stats->pending_global_handle_count = 0;
*stats->near_death_global_handle_count = 0;
- *stats->destroyed_global_handle_count = 0;
- for (Node* current = head_; current != NULL; current = current->next()) {
+ *stats->free_global_handle_count = 0;
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
*stats->global_handle_count += 1;
- if (current->state_ == Node::WEAK) {
+ if (it.node()->state() == Node::WEAK) {
*stats->weak_global_handle_count += 1;
- } else if (current->state_ == Node::PENDING) {
+ } else if (it.node()->state() == Node::PENDING) {
*stats->pending_global_handle_count += 1;
- } else if (current->state_ == Node::NEAR_DEATH) {
+ } else if (it.node()->state() == Node::NEAR_DEATH) {
*stats->near_death_global_handle_count += 1;
- } else if (current->state_ == Node::DESTROYED) {
- *stats->destroyed_global_handle_count += 1;
+ } else if (it.node()->state() == Node::FREE) {
+ *stats->free_global_handle_count += 1;
}
}
}
@@ -480,12 +636,12 @@ void GlobalHandles::PrintStats() {
int near_death = 0;
int destroyed = 0;
- for (Node* current = head_; current != NULL; current = current->next()) {
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
total++;
- if (current->state_ == Node::WEAK) weak++;
- if (current->state_ == Node::PENDING) pending++;
- if (current->state_ == Node::NEAR_DEATH) near_death++;
- if (current->state_ == Node::DESTROYED) destroyed++;
+ if (it.node()->state() == Node::WEAK) weak++;
+ if (it.node()->state() == Node::PENDING) pending++;
+ if (it.node()->state() == Node::NEAR_DEATH) near_death++;
+ if (it.node()->state() == Node::FREE) destroyed++;
}
PrintF("Global Handle Statistics:\n");
@@ -493,42 +649,73 @@ void GlobalHandles::PrintStats() {
PrintF(" # weak = %d\n", weak);
PrintF(" # pending = %d\n", pending);
PrintF(" # near_death = %d\n", near_death);
- PrintF(" # destroyed = %d\n", destroyed);
+ PrintF(" # free = %d\n", destroyed);
PrintF(" # total = %d\n", total);
}
void GlobalHandles::Print() {
PrintF("Global handles:\n");
- for (Node* current = head_; current != NULL; current = current->next()) {
- PrintF(" handle %p to %p (weak=%d)\n",
- reinterpret_cast<void*>(current->handle().location()),
- reinterpret_cast<void*>(*current->handle()),
- current->state_ == Node::WEAK);
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ PrintF(" handle %p to %p%s\n",
+ reinterpret_cast<void*>(it.node()->location()),
+ reinterpret_cast<void*>(it.node()->object()),
+ it.node()->IsWeak() ? " (weak)" : "");
}
}
#endif
-List<ObjectGroup*>* GlobalHandles::ObjectGroups() {
- // Lazily initialize the list to avoid startup time static constructors.
- static List<ObjectGroup*> groups(4);
- return &groups;
+
+
+void GlobalHandles::AddObjectGroup(Object*** handles,
+ size_t length,
+ v8::RetainedObjectInfo* info) {
+#ifdef DEBUG
+ for (size_t i = 0; i < length; ++i) {
+ ASSERT(!Node::FromLocation(handles[i])->is_independent());
+ }
+#endif
+ if (length == 0) {
+ if (info != NULL) info->Dispose();
+ return;
+ }
+ object_groups_.Add(ObjectGroup::New(handles, length, info));
}
-void GlobalHandles::AddGroup(Object*** handles, size_t length) {
- ObjectGroup* new_entry = new ObjectGroup(length);
- for (size_t i = 0; i < length; ++i)
- new_entry->objects_.Add(handles[i]);
- ObjectGroups()->Add(new_entry);
+
+void GlobalHandles::AddImplicitReferences(HeapObject** parent,
+ Object*** children,
+ size_t length) {
+#ifdef DEBUG
+ ASSERT(!Node::FromLocation(BitCast<Object**>(parent))->is_independent());
+ for (size_t i = 0; i < length; ++i) {
+ ASSERT(!Node::FromLocation(children[i])->is_independent());
+ }
+#endif
+ if (length == 0) return;
+ implicit_ref_groups_.Add(ImplicitRefGroup::New(parent, children, length));
}
void GlobalHandles::RemoveObjectGroups() {
- List<ObjectGroup*>* object_groups = ObjectGroups();
- for (int i = 0; i< object_groups->length(); i++) {
- delete object_groups->at(i);
+ for (int i = 0; i < object_groups_.length(); i++) {
+ object_groups_.at(i)->Dispose();
+ }
+ object_groups_.Clear();
+}
+
+
+void GlobalHandles::RemoveImplicitRefGroups() {
+ for (int i = 0; i < implicit_ref_groups_.length(); i++) {
+ implicit_ref_groups_.at(i)->Dispose();
}
- object_groups->Clear();
+ implicit_ref_groups_.Clear();
}
+
+void GlobalHandles::TearDown() {
+ // TODO(1428): invoke weak callbacks.
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 37b2b4452..153d4dac1 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,7 +28,9 @@
#ifndef V8_GLOBAL_HANDLES_H_
#define V8_GLOBAL_HANDLES_H_
-#include "list-inl.h"
+#include "../include/v8-profiler.h"
+
+#include "list.h"
namespace v8 {
namespace internal {
@@ -39,31 +41,84 @@ namespace internal {
// At GC the destroyed global handles are removed from the free list
// and deallocated.
-// Callback function on handling weak global handles.
-// typedef bool (*WeakSlotCallback)(Object** pointer);
-
// An object group is treated like a single JS object: if one of object in
// the group is alive, all objects in the same group are considered alive.
// An object group is used to simulate object relationship in a DOM tree.
-class ObjectGroup : public Malloced {
+class ObjectGroup {
+ public:
+ static ObjectGroup* New(Object*** handles,
+ size_t length,
+ v8::RetainedObjectInfo* info) {
+ ASSERT(length > 0);
+ ObjectGroup* group = reinterpret_cast<ObjectGroup*>(
+ malloc(OFFSET_OF(ObjectGroup, objects_[length])));
+ group->length_ = length;
+ group->info_ = info;
+ CopyWords(group->objects_, handles, static_cast<int>(length));
+ return group;
+ }
+
+ void Dispose() {
+ if (info_ != NULL) info_->Dispose();
+ free(this);
+ }
+
+ size_t length_;
+ v8::RetainedObjectInfo* info_;
+ Object** objects_[1]; // Variable sized array.
+
+ private:
+ void* operator new(size_t size);
+ void operator delete(void* p);
+ ~ObjectGroup();
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectGroup);
+};
+
+
+// An implicit references group consists of two parts: a parent object and
+// a list of children objects. If the parent is alive, all the children
+// are alive too.
+class ImplicitRefGroup {
public:
- ObjectGroup() : objects_(4) {}
- explicit ObjectGroup(size_t capacity)
- : objects_(static_cast<int>(capacity)) { }
+ static ImplicitRefGroup* New(HeapObject** parent,
+ Object*** children,
+ size_t length) {
+ ASSERT(length > 0);
+ ImplicitRefGroup* group = reinterpret_cast<ImplicitRefGroup*>(
+ malloc(OFFSET_OF(ImplicitRefGroup, children_[length])));
+ group->parent_ = parent;
+ group->length_ = length;
+ CopyWords(group->children_, children, static_cast<int>(length));
+ return group;
+ }
+
+ void Dispose() {
+ free(this);
+ }
+
+ HeapObject** parent_;
+ size_t length_;
+ Object** children_[1]; // Variable sized array.
- List<Object**> objects_;
+ private:
+ void* operator new(size_t size);
+ void operator delete(void* p);
+ ~ImplicitRefGroup();
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ImplicitRefGroup);
};
typedef void (*WeakReferenceGuest)(Object* object, void* parameter);
-class GlobalHandles : public AllStatic {
+class GlobalHandles {
public:
+ ~GlobalHandles();
+
// Creates a new global handle that is alive until Destroy is called.
- static Handle<Object> Create(Object* value);
+ Handle<Object> Create(Object* value);
// Destroy a global handle.
- static void Destroy(Object** location);
+ void Destroy(Object** location);
// Make the global handle weak and set the callback parameter for the
// handle. When the garbage collector recognizes that only weak global
@@ -71,23 +126,28 @@ class GlobalHandles : public AllStatic {
// function is invoked (for each handle) with the handle and corresponding
// parameter as arguments. Note: cleared means set to Smi::FromInt(0). The
// reason is that Smi::FromInt(0) does not change during garage collection.
- static void MakeWeak(Object** location,
- void* parameter,
- WeakReferenceCallback callback);
+ void MakeWeak(Object** location,
+ void* parameter,
+ WeakReferenceCallback callback);
+
+ static void SetWrapperClassId(Object** location, uint16_t class_id);
// Returns the current number of weak handles.
- static int NumberOfWeakHandles() { return number_of_weak_handles_; }
+ int NumberOfWeakHandles() { return number_of_weak_handles_; }
- static void RecordStats(HeapStats* stats);
+ void RecordStats(HeapStats* stats);
// Returns the current number of weak handles to global objects.
// These handles are also included in NumberOfWeakHandles().
- static int NumberOfGlobalObjectWeakHandles() {
+ int NumberOfGlobalObjectWeakHandles() {
return number_of_global_object_weak_handles_;
}
// Clear the weakness of a global handle.
- static void ClearWeakness(Object** location);
+ void ClearWeakness(Object** location);
+
+ // Clear the weakness of a global handle.
+ void MarkIndependent(Object** location);
// Tells whether global handle is near death.
static bool IsNearDeath(Object** location);
@@ -97,82 +157,118 @@ class GlobalHandles : public AllStatic {
// Process pending weak handles.
// Returns true if next major GC is likely to collect more garbage.
- static bool PostGarbageCollectionProcessing();
+ bool PostGarbageCollectionProcessing(GarbageCollector collector);
// Iterates over all strong handles.
- static void IterateStrongRoots(ObjectVisitor* v);
+ void IterateStrongRoots(ObjectVisitor* v);
// Iterates over all handles.
- static void IterateAllRoots(ObjectVisitor* v);
+ void IterateAllRoots(ObjectVisitor* v);
+
+ // Iterates over all handles that have embedder-assigned class ID.
+ void IterateAllRootsWithClassIds(ObjectVisitor* v);
// Iterates over all weak roots in heap.
- static void IterateWeakRoots(ObjectVisitor* v);
+ void IterateWeakRoots(ObjectVisitor* v);
// Iterates over weak roots that are bound to a given callback.
- static void IterateWeakRoots(WeakReferenceGuest f,
- WeakReferenceCallback callback);
+ void IterateWeakRoots(WeakReferenceGuest f,
+ WeakReferenceCallback callback);
// Find all weak handles satisfying the callback predicate, mark
// them as pending.
- static void IdentifyWeakHandles(WeakSlotCallback f);
+ void IdentifyWeakHandles(WeakSlotCallback f);
+
+ // NOTE: Three ...NewSpace... functions below are used during
+ // scavenge collections and iterate over sets of handles that are
+ // guaranteed to contain all handles holding new space objects (but
+ // may also include old space objects).
+
+ // Iterates over strong and dependent handles. See the node above.
+ void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v);
+
+ // Finds weak independent handles satisfying the callback predicate
+ // and marks them as pending. See the note above.
+ void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f);
+
+ // Iterates over weak independent handles. See the note above.
+ void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
// Add an object group.
- // Should only used in GC callback function before a collection.
+ // Should be only used in GC callback function before a collection.
+ // All groups are destroyed after a mark-compact collection.
+ void AddObjectGroup(Object*** handles,
+ size_t length,
+ v8::RetainedObjectInfo* info);
+
+ // Add an implicit references' group.
+ // Should be only used in GC callback function before a collection.
// All groups are destroyed after a mark-compact collection.
- static void AddGroup(Object*** handles, size_t length);
+ void AddImplicitReferences(HeapObject** parent,
+ Object*** children,
+ size_t length);
// Returns the object groups.
- static List<ObjectGroup*>* ObjectGroups();
+ List<ObjectGroup*>* object_groups() { return &object_groups_; }
+
+ // Returns the implicit references' groups.
+ List<ImplicitRefGroup*>* implicit_ref_groups() {
+ return &implicit_ref_groups_;
+ }
// Remove bags, this should only happen after GC.
- static void RemoveObjectGroups();
+ void RemoveObjectGroups();
+ void RemoveImplicitRefGroups();
// Tear down the global handle structure.
- static void TearDown();
+ void TearDown();
+
+ Isolate* isolate() { return isolate_; }
#ifdef DEBUG
- static void PrintStats();
- static void Print();
+ void PrintStats();
+ void Print();
#endif
- class Pool;
+
private:
- // Internal node structure, one for each global handle.
+ explicit GlobalHandles(Isolate* isolate);
+
+ // Internal node structures.
class Node;
+ class NodeBlock;
+ class NodeIterator;
+
+ Isolate* isolate_;
// Field always containing the number of weak and near-death handles.
- static int number_of_weak_handles_;
+ int number_of_weak_handles_;
// Field always containing the number of weak and near-death handles
// to global objects. These objects are also included in
// number_of_weak_handles_.
- static int number_of_global_object_weak_handles_;
-
- // Global handles are kept in a single linked list pointed to by head_.
- static Node* head_;
- static Node* head() { return head_; }
- static void set_head(Node* value) { head_ = value; }
-
- // Free list for DESTROYED global handles not yet deallocated.
- static Node* first_free_;
- static Node* first_free() { return first_free_; }
- static void set_first_free(Node* value) { first_free_ = value; }
-
- // List of deallocated nodes.
- // Deallocated nodes form a prefix of all the nodes and
- // |first_deallocated| points to last deallocated node before
- // |head|. Those deallocated nodes are additionally linked
- // by |next_free|:
- // 1st deallocated head
- // | |
- // V V
- // node node ... node node
- // .next -> .next -> .next ->
- // <- .next_free <- .next_free <- .next_free
- static Node* first_deallocated_;
- static Node* first_deallocated() { return first_deallocated_; }
- static void set_first_deallocated(Node* value) {
- first_deallocated_ = value;
- }
+ int number_of_global_object_weak_handles_;
+
+ // List of all allocated node blocks.
+ NodeBlock* first_block_;
+
+ // List of node blocks with used nodes.
+ NodeBlock* first_used_block_;
+
+ // Free list of nodes.
+ Node* first_free_;
+
+ // Contains all nodes holding new space objects. Note: when the list
+ // is accessed, some of the objects may have been promoted already.
+ List<Node*> new_space_nodes_;
+
+ int post_gc_processing_count_;
+
+ List<ObjectGroup*> object_groups_;
+ List<ImplicitRefGroup*> implicit_ref_groups_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(GlobalHandles);
};
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 9b24bf635..5ab9806ed 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -54,7 +54,7 @@ namespace internal {
#if CAN_USE_UNALIGNED_ACCESSES
#define V8_HOST_CAN_READ_UNALIGNED 1
#endif
-#elif defined(_MIPS_ARCH_MIPS32R2)
+#elif defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else
@@ -72,7 +72,7 @@ namespace internal {
#define V8_TARGET_ARCH_IA32 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
-#elif defined(_MIPS_ARCH_MIPS32R2)
+#elif defined(__MIPSEL__)
#define V8_TARGET_ARCH_MIPS 1
#else
#error Target architecture was not detected as supported by v8
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index c0f2fda92..a5c81cec5 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -29,17 +29,33 @@
#ifndef V8_HANDLES_INL_H_
#define V8_HANDLES_INL_H_
+#include "api.h"
#include "apiutils.h"
#include "handles.h"
-#include "api.h"
+#include "isolate.h"
namespace v8 {
namespace internal {
+inline Isolate* GetIsolateForHandle(Object* obj) {
+ return Isolate::Current();
+}
+
+inline Isolate* GetIsolateForHandle(HeapObject* obj) {
+ return obj->GetIsolate();
+}
+
template<typename T>
Handle<T>::Handle(T* obj) {
ASSERT(!obj->IsFailure());
- location_ = HandleScope::CreateHandle(obj);
+ location_ = HandleScope::CreateHandle(obj, GetIsolateForHandle(obj));
+}
+
+
+template<typename T>
+Handle<T>::Handle(T* obj, Isolate* isolate) {
+ ASSERT(!obj->IsFailure());
+ location_ = HandleScope::CreateHandle(obj, isolate);
}
@@ -51,10 +67,91 @@ inline T* Handle<T>::operator*() const {
}
+HandleScope::HandleScope() {
+ Isolate* isolate = Isolate::Current();
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate->handle_scope_data();
+ isolate_ = isolate;
+ prev_next_ = current->next;
+ prev_limit_ = current->limit;
+ current->level++;
+}
+
+
+HandleScope::HandleScope(Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate->handle_scope_data();
+ isolate_ = isolate;
+ prev_next_ = current->next;
+ prev_limit_ = current->limit;
+ current->level++;
+}
+
+
+HandleScope::~HandleScope() {
+ CloseScope();
+}
+
+void HandleScope::CloseScope() {
+ ASSERT(isolate_ == Isolate::Current());
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate_->handle_scope_data();
+ current->next = prev_next_;
+ current->level--;
+ if (current->limit != prev_limit_) {
+ current->limit = prev_limit_;
+ DeleteExtensions(isolate_);
+ }
+#ifdef DEBUG
+ ZapRange(prev_next_, prev_limit_);
+#endif
+}
+
+
+template <typename T>
+Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
+ T* value = *handle_value;
+ // Throw away all handles in the current scope.
+ CloseScope();
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate_->handle_scope_data();
+ // Allocate one handle in the parent scope.
+ ASSERT(current->level > 0);
+ Handle<T> result(CreateHandle<T>(value, isolate_));
+ // Reinitialize the current scope (so that it's ready
+ // to be used or closed again).
+ prev_next_ = current->next;
+ prev_limit_ = current->limit;
+ current->level++;
+ return result;
+}
+
+
+template <typename T>
+T** HandleScope::CreateHandle(T* value, Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate->handle_scope_data();
+
+ internal::Object** cur = current->next;
+ if (cur == current->limit) cur = Extend();
+ // Update the current next field, set the value in the created
+ // handle, and return the result.
+ ASSERT(cur < current->limit);
+ current->next = cur + 1;
+
+ T** result = reinterpret_cast<T**>(cur);
+ *result = value;
+ return result;
+}
+
+
#ifdef DEBUG
inline NoHandleAllocation::NoHandleAllocation() {
v8::ImplementationUtilities::HandleScopeData* current =
- v8::ImplementationUtilities::CurrentHandleScope();
+ Isolate::Current()->handle_scope_data();
+
// Shrink the current handle scope to make it impossible to do
// handle allocations without an explicit handle scope.
current->limit = current->next;
@@ -67,10 +164,10 @@ inline NoHandleAllocation::NoHandleAllocation() {
inline NoHandleAllocation::~NoHandleAllocation() {
// Restore state in current handle scope to re-enable handle
// allocations.
- v8::ImplementationUtilities::HandleScopeData* current =
- v8::ImplementationUtilities::CurrentHandleScope();
- ASSERT_EQ(0, current->level);
- current->level = level_;
+ v8::ImplementationUtilities::HandleScopeData* data =
+ Isolate::Current()->handle_scope_data();
+ ASSERT_EQ(0, data->level);
+ data->level = level_;
}
#endif
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 05c81bb3f..1891ef5ca 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -45,57 +45,62 @@ namespace v8 {
namespace internal {
-v8::ImplementationUtilities::HandleScopeData HandleScope::current_ =
- { NULL, NULL, 0 };
-
-
int HandleScope::NumberOfHandles() {
- int n = HandleScopeImplementer::instance()->blocks()->length();
+ Isolate* isolate = Isolate::Current();
+ HandleScopeImplementer* impl = isolate->handle_scope_implementer();
+ int n = impl->blocks()->length();
if (n == 0) return 0;
return ((n - 1) * kHandleBlockSize) + static_cast<int>(
- (current_.next - HandleScopeImplementer::instance()->blocks()->last()));
+ (isolate->handle_scope_data()->next - impl->blocks()->last()));
}
Object** HandleScope::Extend() {
- Object** result = current_.next;
+ Isolate* isolate = Isolate::Current();
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate->handle_scope_data();
- ASSERT(result == current_.limit);
+ Object** result = current->next;
+
+ ASSERT(result == current->limit);
// Make sure there's at least one scope on the stack and that the
// top of the scope stack isn't a barrier.
- if (current_.level == 0) {
+ if (current->level == 0) {
Utils::ReportApiFailure("v8::HandleScope::CreateHandle()",
"Cannot create a handle without a HandleScope");
return NULL;
}
- HandleScopeImplementer* impl = HandleScopeImplementer::instance();
+ HandleScopeImplementer* impl = isolate->handle_scope_implementer();
// If there's more room in the last block, we use that. This is used
// for fast creation of scopes after scope barriers.
if (!impl->blocks()->is_empty()) {
Object** limit = &impl->blocks()->last()[kHandleBlockSize];
- if (current_.limit != limit) {
- current_.limit = limit;
- ASSERT(limit - current_.next < kHandleBlockSize);
+ if (current->limit != limit) {
+ current->limit = limit;
+ ASSERT(limit - current->next < kHandleBlockSize);
}
}
// If we still haven't found a slot for the handle, we extend the
// current handle scope by allocating a new handle block.
- if (result == current_.limit) {
+ if (result == current->limit) {
// If there's a spare block, use it for growing the current scope.
result = impl->GetSpareOrNewBlock();
// Add the extension to the global list of blocks, but count the
// extension as part of the current scope.
impl->blocks()->Add(result);
- current_.limit = &result[kHandleBlockSize];
+ current->limit = &result[kHandleBlockSize];
}
return result;
}
-void HandleScope::DeleteExtensions() {
- HandleScopeImplementer::instance()->DeleteExtensions(current_.limit);
+void HandleScope::DeleteExtensions(Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate->handle_scope_data();
+ isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
}
@@ -108,37 +113,44 @@ void HandleScope::ZapRange(Object** start, Object** end) {
Address HandleScope::current_level_address() {
- return reinterpret_cast<Address>(&current_.level);
+ return reinterpret_cast<Address>(
+ &Isolate::Current()->handle_scope_data()->level);
}
Address HandleScope::current_next_address() {
- return reinterpret_cast<Address>(&current_.next);
+ return reinterpret_cast<Address>(
+ &Isolate::Current()->handle_scope_data()->next);
}
Address HandleScope::current_limit_address() {
- return reinterpret_cast<Address>(&current_.limit);
+ return reinterpret_cast<Address>(
+ &Isolate::Current()->handle_scope_data()->limit);
}
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content,
Handle<JSArray> array) {
- CALL_HEAP_FUNCTION(content->AddKeysFromJSArray(*array), FixedArray);
+ CALL_HEAP_FUNCTION(content->GetIsolate(),
+ content->AddKeysFromJSArray(*array), FixedArray);
}
Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
Handle<FixedArray> second) {
- CALL_HEAP_FUNCTION(first->UnionOfKeys(*second), FixedArray);
+ CALL_HEAP_FUNCTION(first->GetIsolate(),
+ first->UnionOfKeys(*second), FixedArray);
}
Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
Handle<JSFunction> constructor,
Handle<JSGlobalProxy> global) {
- CALL_HEAP_FUNCTION(Heap::ReinitializeJSGlobalProxy(*constructor, *global),
- JSGlobalProxy);
+ CALL_HEAP_FUNCTION(
+ constructor->GetIsolate(),
+ constructor->GetHeap()->ReinitializeJSGlobalProxy(*constructor, *global),
+ JSGlobalProxy);
}
@@ -153,7 +165,8 @@ void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
func->shared()->set_expected_nof_properties(nof);
if (func->has_initial_map()) {
Handle<Map> new_initial_map =
- Factory::CopyMapDropTransitions(Handle<Map>(func->initial_map()));
+ func->GetIsolate()->factory()->CopyMapDropTransitions(
+ Handle<Map>(func->initial_map()));
new_initial_map->set_unused_property_fields(nof);
func->set_initial_map(*new_initial_map);
}
@@ -161,7 +174,8 @@ void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value) {
- CALL_HEAP_FUNCTION_VOID(func->SetPrototype(*value));
+ CALL_HEAP_FUNCTION_VOID(func->GetIsolate(),
+ func->SetPrototype(*value));
}
@@ -193,58 +207,66 @@ void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
void NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties) {
- CALL_HEAP_FUNCTION_VOID(object->NormalizeProperties(
- mode,
- expected_additional_properties));
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+ object->NormalizeProperties(
+ mode,
+ expected_additional_properties));
}
void NormalizeElements(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION_VOID(object->NormalizeElements());
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+ object->NormalizeElements());
}
void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields) {
CALL_HEAP_FUNCTION_VOID(
+ object->GetIsolate(),
object->TransformToFastProperties(unused_property_fields));
}
-void NumberDictionarySet(Handle<NumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION_VOID(dictionary->Set(index, *value, details));
+Handle<NumberDictionary> NumberDictionarySet(
+ Handle<NumberDictionary> dictionary,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
+ dictionary->Set(index, *value, details),
+ NumberDictionary);
}
void FlattenString(Handle<String> string) {
- CALL_HEAP_FUNCTION_VOID(string->TryFlatten());
+ CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
}
Handle<String> FlattenGetString(Handle<String> string) {
- CALL_HEAP_FUNCTION(string->TryFlatten(), String);
+ CALL_HEAP_FUNCTION(string->GetIsolate(), string->TryFlatten(), String);
}
Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> prototype) {
ASSERT(function->should_have_prototype());
- CALL_HEAP_FUNCTION(Accessors::FunctionSetPrototype(*function,
+ CALL_HEAP_FUNCTION(function->GetIsolate(),
+ Accessors::FunctionSetPrototype(*function,
*prototype,
NULL),
Object);
}
-Handle<Object> SetProperty(Handle<JSObject> object,
+Handle<Object> SetProperty(Handle<JSReceiver> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict) {
- CALL_HEAP_FUNCTION(object->SetProperty(*key, *value, attributes, strict),
+ StrictModeFlag strict_mode) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetProperty(*key, *value, attributes, strict_mode),
Object);
}
@@ -253,9 +275,12 @@ Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict) {
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
- Runtime::SetObjectProperty(object, key, value, attributes, strict),
+ isolate,
+ Runtime::SetObjectProperty(
+ isolate, object, key, value, attributes, strict_mode),
Object);
}
@@ -264,8 +289,12 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes) {
+ Isolate* isolate = object->GetIsolate();
CALL_HEAP_FUNCTION(
- Runtime::ForceSetObjectProperty(object, key, value, attributes), Object);
+ isolate,
+ Runtime::ForceSetObjectProperty(
+ isolate, object, key, value, attributes),
+ Object);
}
@@ -273,14 +302,18 @@ Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyDetails details) {
- CALL_HEAP_FUNCTION(object->SetNormalizedProperty(*key, *value, details),
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetNormalizedProperty(*key, *value, details),
Object);
}
Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key) {
- CALL_HEAP_FUNCTION(Runtime::ForceDeleteObjectProperty(object, key), Object);
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ Runtime::ForceDeleteObjectProperty(isolate, object, key),
+ Object);
}
@@ -289,8 +322,10 @@ Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(object->
- SetLocalPropertyIgnoreAttributes(*key, *value, attributes), Object);
+ CALL_HEAP_FUNCTION(
+ object->GetIsolate(),
+ object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
+ Object);
}
@@ -298,10 +333,11 @@ void SetLocalPropertyNoThrow(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
- ASSERT(!Top::has_pending_exception());
+ Isolate* isolate = object->GetIsolate();
+ ASSERT(!isolate->has_pending_exception());
CHECK(!SetLocalPropertyIgnoreAttributes(
object, key, value, attributes).is_null());
- CHECK(!Top::has_pending_exception());
+ CHECK(!isolate->has_pending_exception());
}
@@ -309,31 +345,47 @@ Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict) {
- CALL_HEAP_FUNCTION(object->SetPropertyWithInterceptor(*key,
+ StrictModeFlag strict_mode) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetPropertyWithInterceptor(*key,
*value,
attributes,
- strict),
+ strict_mode),
Object);
}
-Handle<Object> GetProperty(Handle<JSObject> obj,
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name) {
- Handle<String> str = Factory::LookupAsciiSymbol(name);
- CALL_HEAP_FUNCTION(obj->GetProperty(*str), Object);
+ Isolate* isolate = obj->GetIsolate();
+ Handle<String> str = isolate->factory()->LookupAsciiSymbol(name);
+ CALL_HEAP_FUNCTION(isolate, obj->GetProperty(*str), Object);
}
Handle<Object> GetProperty(Handle<Object> obj,
Handle<Object> key) {
- CALL_HEAP_FUNCTION(Runtime::GetObjectProperty(obj, key), Object);
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(isolate,
+ Runtime::GetObjectProperty(isolate, obj, key), Object);
+}
+
+
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
+ Handle<String> name,
+ LookupResult* result) {
+ PropertyAttributes attributes;
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(isolate,
+ obj->GetProperty(*obj, result, *name, &attributes),
+ Object);
}
Handle<Object> GetElement(Handle<Object> obj,
uint32_t index) {
- CALL_HEAP_FUNCTION(Runtime::GetElement(obj, index), Object);
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(isolate, Runtime::GetElement(obj, index), Object);
}
@@ -341,7 +393,9 @@ Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<String> name,
PropertyAttributes* attributes) {
- CALL_HEAP_FUNCTION(holder->GetPropertyWithInterceptor(*receiver,
+ Isolate* isolate = receiver->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ holder->GetPropertyWithInterceptor(*receiver,
*name,
attributes),
Object);
@@ -356,15 +410,22 @@ Handle<Object> GetPrototype(Handle<Object> obj) {
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
const bool skip_hidden_prototypes = false;
- CALL_HEAP_FUNCTION(obj->SetPrototype(*value, skip_hidden_prototypes), Object);
+ CALL_HEAP_FUNCTION(obj->GetIsolate(),
+ obj->SetPrototype(*value, skip_hidden_prototypes), Object);
+}
+
+
+Handle<Object> PreventExtensions(Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
}
Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
bool create_if_needed) {
+ Isolate* isolate = obj->GetIsolate();
Object* holder = obj->BypassGlobalProxy();
- if (holder->IsUndefined()) return Factory::undefined_value();
- obj = Handle<JSObject>(JSObject::cast(holder));
+ if (holder->IsUndefined()) return isolate->factory()->undefined_value();
+ obj = Handle<JSObject>(JSObject::cast(holder), isolate);
if (obj->HasFastProperties()) {
// If the object has fast properties, check whether the first slot
@@ -373,10 +434,11 @@ Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = obj->map()->instance_descriptors();
if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == Heap::hidden_symbol()) &&
+ (descriptors->GetKey(0) == isolate->heap()->hidden_symbol()) &&
descriptors->IsProperty(0)) {
ASSERT(descriptors->GetType(0) == FIELD);
- return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0)));
+ return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0)),
+ isolate);
}
}
@@ -387,32 +449,39 @@ Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
// Hidden properties object not found. Allocate a new hidden properties
// object if requested. Otherwise return the undefined value.
if (create_if_needed) {
- Handle<Object> hidden_obj = Factory::NewJSObject(Top::object_function());
- CALL_HEAP_FUNCTION(obj->SetHiddenPropertiesObject(*hidden_obj), Object);
+ Handle<Object> hidden_obj =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ CALL_HEAP_FUNCTION(isolate,
+ obj->SetHiddenPropertiesObject(*hidden_obj), Object);
} else {
- return Factory::undefined_value();
+ return isolate->factory()->undefined_value();
}
}
- return Handle<Object>(obj->GetHiddenPropertiesObject());
+ return Handle<Object>(obj->GetHiddenPropertiesObject(), isolate);
}
Handle<Object> DeleteElement(Handle<JSObject> obj,
uint32_t index) {
- CALL_HEAP_FUNCTION(obj->DeleteElement(index, JSObject::NORMAL_DELETION),
+ CALL_HEAP_FUNCTION(obj->GetIsolate(),
+ obj->DeleteElement(index, JSObject::NORMAL_DELETION),
Object);
}
Handle<Object> DeleteProperty(Handle<JSObject> obj,
Handle<String> prop) {
- CALL_HEAP_FUNCTION(obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
+ CALL_HEAP_FUNCTION(obj->GetIsolate(),
+ obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
Object);
}
Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
- CALL_HEAP_FUNCTION(Heap::LookupSingleCharacterStringFromCode(index), Object);
+ Isolate* isolate = Isolate::Current();
+ CALL_HEAP_FUNCTION(
+ isolate,
+ isolate->heap()->LookupSingleCharacterStringFromCode(index), Object);
}
@@ -420,14 +489,16 @@ Handle<String> SubString(Handle<String> str,
int start,
int end,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(str->SubString(start, end, pretenure), String);
+ CALL_HEAP_FUNCTION(str->GetIsolate(),
+ str->SubString(start, end, pretenure), String);
}
Handle<Object> SetElement(Handle<JSObject> object,
uint32_t index,
- Handle<Object> value) {
- if (object->HasPixelElements() || object->HasExternalArrayElements()) {
+ Handle<Object> value,
+ StrictModeFlag strict_mode) {
+ if (object->HasExternalArrayElements()) {
if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
bool has_exception;
Handle<Object> number = Execution::ToNumber(value, &has_exception);
@@ -435,31 +506,37 @@ Handle<Object> SetElement(Handle<JSObject> object,
value = number;
}
}
- CALL_HEAP_FUNCTION(object->SetElement(index, *value), Object);
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetElement(index, *value, strict_mode, true),
+ Object);
}
Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
- Handle<Object> value) {
- ASSERT(!object->HasPixelElements());
+ Handle<Object> value,
+ StrictModeFlag strict_mode) {
ASSERT(!object->HasExternalArrayElements());
- CALL_HEAP_FUNCTION(object->SetElement(index, *value, false), Object);
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetElement(index, *value, strict_mode, false),
+ Object);
}
Handle<JSObject> Copy(Handle<JSObject> obj) {
- CALL_HEAP_FUNCTION(Heap::CopyJSObject(*obj), JSObject);
+ Isolate* isolate = obj->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ isolate->heap()->CopyJSObject(*obj), JSObject);
}
Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
- CALL_HEAP_FUNCTION(obj->DefineAccessor(*info), Object);
+ CALL_HEAP_FUNCTION(obj->GetIsolate(), obj->DefineAccessor(*info), Object);
}
// Wrappers for scripts are kept alive and cached in weak global
-// handles referred from proxy objects held by the scripts as long as
+// handles referred from foreign objects held by the scripts as long as
// they are used. When they are not used anymore, the garbage
// collector will call the weak callback on the global handle
// associated with the wrapper and get rid of both the wrapper and the
@@ -472,34 +549,36 @@ static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
#endif
Handle<Object> cache = Utils::OpenHandle(*handle);
JSValue* wrapper = JSValue::cast(*cache);
- Proxy* proxy = Script::cast(wrapper->value())->wrapper();
- ASSERT(proxy->proxy() == reinterpret_cast<Address>(cache.location()));
- proxy->set_proxy(0);
- GlobalHandles::Destroy(cache.location());
- Counters::script_wrappers.Decrement();
+ Foreign* foreign = Script::cast(wrapper->value())->wrapper();
+ ASSERT(foreign->address() == reinterpret_cast<Address>(cache.location()));
+ foreign->set_address(0);
+ Isolate* isolate = Isolate::Current();
+ isolate->global_handles()->Destroy(cache.location());
+ isolate->counters()->script_wrappers()->Decrement();
}
Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
- if (script->wrapper()->proxy() != NULL) {
+ if (script->wrapper()->address() != NULL) {
// Return the script wrapper directly from the cache.
return Handle<JSValue>(
- reinterpret_cast<JSValue**>(script->wrapper()->proxy()));
+ reinterpret_cast<JSValue**>(script->wrapper()->address()));
}
-
+ Isolate* isolate = Isolate::Current();
// Construct a new script wrapper.
- Counters::script_wrappers.Increment();
- Handle<JSFunction> constructor = Top::script_function();
+ isolate->counters()->script_wrappers()->Increment();
+ Handle<JSFunction> constructor = isolate->script_function();
Handle<JSValue> result =
- Handle<JSValue>::cast(Factory::NewJSObject(constructor));
+ Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
result->set_value(*script);
// Create a new weak global handle and use it to cache the wrapper
// for future use. The cache will automatically be cleared by the
// garbage collector when it is not used anymore.
- Handle<Object> handle = GlobalHandles::Create(*result);
- GlobalHandles::MakeWeak(handle.location(), NULL, &ClearWrapperCache);
- script->wrapper()->set_proxy(reinterpret_cast<Address>(handle.location()));
+ Handle<Object> handle = isolate->global_handles()->Create(*result);
+ isolate->global_handles()->MakeWeak(handle.location(), NULL,
+ &ClearWrapperCache);
+ script->wrapper()->set_address(reinterpret_cast<Address>(handle.location()));
return result;
}
@@ -509,20 +588,22 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
void InitScriptLineEnds(Handle<Script> script) {
if (!script->line_ends()->IsUndefined()) return;
+ Isolate* isolate = script->GetIsolate();
+
if (!script->source()->IsString()) {
ASSERT(script->source()->IsUndefined());
- Handle<FixedArray> empty = Factory::NewFixedArray(0);
+ Handle<FixedArray> empty = isolate->factory()->NewFixedArray(0);
script->set_line_ends(*empty);
ASSERT(script->line_ends()->IsFixedArray());
return;
}
- Handle<String> src(String::cast(script->source()));
+ Handle<String> src(String::cast(script->source()), isolate);
Handle<FixedArray> array = CalculateLineEnds(src, true);
- if (*array != Heap::empty_fixed_array()) {
- array->set_map(Heap::fixed_cow_array_map());
+ if (*array != isolate->heap()->empty_fixed_array()) {
+ array->set_map(isolate->heap()->fixed_cow_array_map());
}
script->set_line_ends(*array);
@@ -531,11 +612,12 @@ void InitScriptLineEnds(Handle<Script> script) {
template <typename SourceChar>
-static void CalculateLineEnds(List<int>* line_ends,
+static void CalculateLineEnds(Isolate* isolate,
+ List<int>* line_ends,
Vector<const SourceChar> src,
bool with_last_line) {
const int src_len = src.length();
- StringSearch<char, SourceChar> search(CStrVector("\n"));
+ StringSearch<char, SourceChar> search(isolate, CStrVector("\n"));
// Find and record line ends.
int position = 0;
@@ -560,17 +642,24 @@ Handle<FixedArray> CalculateLineEnds(Handle<String> src,
// length of (unpacked) code.
int line_count_estimate = src->length() >> 4;
List<int> line_ends(line_count_estimate);
+ Isolate* isolate = src->GetIsolate();
{
AssertNoAllocation no_heap_allocation; // ensure vectors stay valid.
// Dispatch on type of strings.
if (src->IsAsciiRepresentation()) {
- CalculateLineEnds(&line_ends, src->ToAsciiVector(), with_last_line);
+ CalculateLineEnds(isolate,
+ &line_ends,
+ src->ToAsciiVector(),
+ with_last_line);
} else {
- CalculateLineEnds(&line_ends, src->ToUC16Vector(), with_last_line);
+ CalculateLineEnds(isolate,
+ &line_ends,
+ src->ToUC16Vector(),
+ with_last_line);
}
}
int line_count = line_ends.length();
- Handle<FixedArray> array = Factory::NewFixedArray(line_count);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(line_count);
for (int i = 0; i < line_count; i++) {
array->set(i, Smi::FromInt(line_ends[i]));
}
@@ -636,17 +725,18 @@ void CustomArguments::IterateInstance(ObjectVisitor* v) {
// Compute the property keys from the interceptor.
v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object) {
+ Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- CustomArguments args(interceptor->data(), *receiver, *object);
+ CustomArguments args(isolate, interceptor->data(), *receiver, *object);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::NamedPropertyEnumerator enum_fun =
v8::ToCData<v8::NamedPropertyEnumerator>(interceptor->enumerator());
- LOG(ApiObjectAccess("interceptor-named-enum", *object));
+ LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = enum_fun(info);
}
}
@@ -657,17 +747,18 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
// Compute the element keys from the interceptor.
v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object) {
+ Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- CustomArguments args(interceptor->data(), *receiver, *object);
+ CustomArguments args(isolate, interceptor->data(), *receiver, *object);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::IndexedPropertyEnumerator enum_fun =
v8::ToCData<v8::IndexedPropertyEnumerator>(interceptor->enumerator());
- LOG(ApiObjectAccess("interceptor-indexed-enum", *object));
+ LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = enum_fun(info);
}
}
@@ -688,31 +779,33 @@ static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
KeyCollectionType type) {
USE(ContainsOnlyValidKeys);
- Handle<FixedArray> content = Factory::empty_fixed_array();
- Handle<JSObject> arguments_boilerplate =
- Handle<JSObject>(
- Top::context()->global_context()->arguments_boilerplate());
- Handle<JSFunction> arguments_function =
- Handle<JSFunction>(
- JSFunction::cast(arguments_boilerplate->map()->constructor()));
+ Isolate* isolate = object->GetIsolate();
+ Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
+ Handle<JSObject> arguments_boilerplate = Handle<JSObject>(
+ isolate->context()->global_context()->arguments_boilerplate(),
+ isolate);
+ Handle<JSFunction> arguments_function = Handle<JSFunction>(
+ JSFunction::cast(arguments_boilerplate->map()->constructor()),
+ isolate);
// Only collect keys if access is permitted.
for (Handle<Object> p = object;
- *p != Heap::null_value();
- p = Handle<Object>(p->GetPrototype())) {
- Handle<JSObject> current(JSObject::cast(*p));
+ *p != isolate->heap()->null_value();
+ p = Handle<Object>(p->GetPrototype(), isolate)) {
+ Handle<JSObject> current(JSObject::cast(*p), isolate);
// Check access rights if required.
if (current->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(*current, Heap::undefined_value(),
- v8::ACCESS_KEYS)) {
- Top::ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccess(*current,
+ isolate->heap()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
break;
}
// Compute the element keys.
Handle<FixedArray> element_keys =
- Factory::NewFixedArray(current->NumberOfEnumElements());
+ isolate->factory()->NewFixedArray(current->NumberOfEnumElements());
current->GetEnumElementKeys(*element_keys);
content = UnionOfKeys(content, element_keys);
ASSERT(ContainsOnlyValidKeys(content));
@@ -766,28 +859,31 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
- Counters::for_in.Increment();
+ Isolate* isolate = object->GetIsolate();
+ isolate->counters()->for_in()->Increment();
Handle<FixedArray> elements = GetKeysInFixedArrayFor(object,
INCLUDE_PROTOS);
- return Factory::NewJSArrayWithElements(elements);
+ return isolate->factory()->NewJSArrayWithElements(elements);
}
Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
bool cache_result) {
int index = 0;
+ Isolate* isolate = object->GetIsolate();
if (object->HasFastProperties()) {
if (object->map()->instance_descriptors()->HasEnumCache()) {
- Counters::enum_cache_hits.Increment();
+ isolate->counters()->enum_cache_hits()->Increment();
DescriptorArray* desc = object->map()->instance_descriptors();
- return Handle<FixedArray>(FixedArray::cast(desc->GetEnumCache()));
+ return Handle<FixedArray>(FixedArray::cast(desc->GetEnumCache()),
+ isolate);
}
- Counters::enum_cache_misses.Increment();
+ isolate->counters()->enum_cache_misses()->Increment();
int num_enum = object->NumberOfEnumProperties();
- Handle<FixedArray> storage = Factory::NewFixedArray(num_enum);
- Handle<FixedArray> sort_array = Factory::NewFixedArray(num_enum);
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
+ Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(object->map()->instance_descriptors());
+ Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
for (int i = 0; i < descs->number_of_descriptors(); i++) {
if (descs->IsProperty(i) && !descs->IsDontEnum(i)) {
(*storage)->set(index, descs->GetKey(i));
@@ -799,7 +895,8 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
(*storage)->SortPairs(*sort_array, sort_array->length());
if (cache_result) {
Handle<FixedArray> bridge_storage =
- Factory::NewFixedArray(DescriptorArray::kEnumCacheBridgeLength);
+ isolate->factory()->NewFixedArray(
+ DescriptorArray::kEnumCacheBridgeLength);
DescriptorArray* desc = object->map()->instance_descriptors();
desc->SetEnumCache(*bridge_storage, *storage);
}
@@ -807,8 +904,8 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
return storage;
} else {
int num_enum = object->NumberOfEnumProperties();
- Handle<FixedArray> storage = Factory::NewFixedArray(num_enum);
- Handle<FixedArray> sort_array = Factory::NewFixedArray(num_enum);
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
+ Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
object->property_dictionary()->CopyEnumKeysTo(*storage, *sort_array);
return storage;
}
@@ -825,10 +922,12 @@ static bool CompileLazyHelper(CompilationInfo* info,
ClearExceptionFlag flag) {
// Compile the source information to a code object.
ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
- ASSERT(!Top::has_pending_exception());
+ ASSERT(!info->isolate()->has_pending_exception());
bool result = Compiler::CompileLazy(info);
- ASSERT(result != Top::has_pending_exception());
- if (!result && flag == CLEAR_EXCEPTION) Top::clear_pending_exception();
+ ASSERT(result != Isolate::Current()->has_pending_exception());
+ if (!result && flag == CLEAR_EXCEPTION) {
+ info->isolate()->clear_pending_exception();
+ }
return result;
}
@@ -877,34 +976,4 @@ bool CompileOptimized(Handle<JSFunction> function,
return CompileLazyHelper(&info, flag);
}
-
-OptimizedObjectForAddingMultipleProperties::
-OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
- int expected_additional_properties,
- bool condition) {
- object_ = object;
- if (condition && object_->HasFastProperties() && !object->IsJSGlobalProxy()) {
- // Normalize the properties of object to avoid n^2 behavior
- // when extending the object multiple properties. Indicate the number of
- // properties to be added.
- unused_property_fields_ = object->map()->unused_property_fields();
- NormalizeProperties(object_,
- KEEP_INOBJECT_PROPERTIES,
- expected_additional_properties);
- has_been_transformed_ = true;
-
- } else {
- has_been_transformed_ = false;
- }
-}
-
-
-OptimizedObjectForAddingMultipleProperties::
-~OptimizedObjectForAddingMultipleProperties() {
- // Reoptimize the object to allow fast property access.
- if (has_been_transformed_) {
- TransformToFastProperties(object_, unused_property_fields_);
- }
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 9d3588bf9..5c64cf501 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,7 @@
#ifndef V8_HANDLES_H_
#define V8_HANDLES_H_
+#include "allocation.h"
#include "apiutils.h"
namespace v8 {
@@ -44,6 +45,7 @@ class Handle {
public:
INLINE(explicit Handle(T** location)) { location_ = location; }
INLINE(explicit Handle(T* obj));
+ INLINE(Handle(T* obj, Isolate* isolate));
INLINE(Handle()) : location_(NULL) {}
@@ -82,7 +84,7 @@ class Handle {
}
static Handle<T> null() { return Handle<T>(); }
- bool is_null() { return location_ == NULL; }
+ bool is_null() const { return location_ == NULL; }
// Closes the given scope, but lets this handle escape. See
// implementation in api.h.
@@ -107,34 +109,20 @@ class Handle {
// for which the handle scope has been deleted is undefined.
class HandleScope {
public:
- HandleScope() : prev_next_(current_.next), prev_limit_(current_.limit) {
- current_.level++;
- }
+ inline HandleScope();
+ explicit inline HandleScope(Isolate* isolate);
- ~HandleScope() {
- CloseScope();
- }
+ inline ~HandleScope();
// Counts the number of allocated handles.
static int NumberOfHandles();
// Creates a new handle with the given value.
template <typename T>
- static inline T** CreateHandle(T* value) {
- internal::Object** cur = current_.next;
- if (cur == current_.limit) cur = Extend();
- // Update the current next field, set the value in the created
- // handle, and return the result.
- ASSERT(cur < current_.limit);
- current_.next = cur + 1;
-
- T** result = reinterpret_cast<T**>(cur);
- *result = value;
- return result;
- }
+ static inline T** CreateHandle(T* value, Isolate* isolate);
// Deallocates any extensions used by the current scope.
- static void DeleteExtensions();
+ static void DeleteExtensions(Isolate* isolate);
static Address current_next_address();
static Address current_limit_address();
@@ -145,20 +133,9 @@ class HandleScope {
// a Handle backed by the parent scope holding the
// value of the argument handle.
template <typename T>
- Handle<T> CloseAndEscape(Handle<T> handle_value) {
- T* value = *handle_value;
- // Throw away all handles in the current scope.
- CloseScope();
- // Allocate one handle in the parent scope.
- ASSERT(current_.level > 0);
- Handle<T> result(CreateHandle<T>(value));
- // Reinitialize the current scope (so that it's ready
- // to be used or closed again).
- prev_next_ = current_.next;
- prev_limit_ = current_.limit;
- current_.level++;
- return result;
- }
+ Handle<T> CloseAndEscape(Handle<T> handle_value);
+
+ Isolate* isolate() { return isolate_; }
private:
// Prevent heap allocation or illegal handle scopes.
@@ -167,21 +144,9 @@ class HandleScope {
void* operator new(size_t size);
void operator delete(void* size_t);
- inline void CloseScope() {
- current_.next = prev_next_;
- current_.level--;
- if (current_.limit != prev_limit_) {
- current_.limit = prev_limit_;
- DeleteExtensions();
- }
-#ifdef DEBUG
- ZapRange(prev_next_, prev_limit_);
-#endif
- }
+ inline void CloseScope();
- static v8::ImplementationUtilities::HandleScopeData current_;
- // Holds values on entry. The prev_next_ value is never NULL
- // on_entry, but is set to NULL when this scope is closed.
+ Isolate* isolate_;
Object** prev_next_;
Object** prev_limit_;
@@ -208,10 +173,11 @@ void NormalizeProperties(Handle<JSObject> object,
void NormalizeElements(Handle<JSObject> object);
void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
-void NumberDictionarySet(Handle<NumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value,
- PropertyDetails details);
+MUST_USE_RESULT Handle<NumberDictionary> NumberDictionarySet(
+ Handle<NumberDictionary> dictionary,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyDetails details);
// Flattens a string.
void FlattenString(Handle<String> str);
@@ -220,17 +186,17 @@ void FlattenString(Handle<String> str);
// string.
Handle<String> FlattenGetString(Handle<String> str);
-Handle<Object> SetProperty(Handle<JSObject> object,
+Handle<Object> SetProperty(Handle<JSReceiver> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict);
+ StrictModeFlag strict_mode);
Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict);
+ StrictModeFlag strict_mode);
Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> key,
@@ -262,22 +228,29 @@ Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict);
+ StrictModeFlag strict_mode);
-Handle<Object> SetElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value);
+MUST_USE_RESULT Handle<Object> SetElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode);
Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
- Handle<Object> value);
+ Handle<Object> value,
+ StrictModeFlag strict_mode);
-Handle<Object> GetProperty(Handle<JSObject> obj,
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name);
Handle<Object> GetProperty(Handle<Object> obj,
Handle<Object> key);
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
+ Handle<String> name,
+ LookupResult* result);
+
+
Handle<Object> GetElement(Handle<Object> obj,
uint32_t index);
@@ -368,6 +341,7 @@ Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> prototype);
+Handle<Object> PreventExtensions(Handle<JSObject> object);
// Does lazy compilation of the given function. Returns true on success and
// false if the compilation resulted in a stack overflow.
@@ -400,25 +374,6 @@ class NoHandleAllocation BASE_EMBEDDED {
#endif
};
-
-// ----------------------------------------------------------------------------
-
-
-// Stack allocated wrapper call for optimizing adding multiple
-// properties to an object.
-class OptimizedObjectForAddingMultipleProperties BASE_EMBEDDED {
- public:
- OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
- int expected_property_count,
- bool condition = true);
- ~OptimizedObjectForAddingMultipleProperties();
- private:
- bool has_been_transformed_; // Tells whether the object has been transformed.
- int unused_property_fields_; // Captures the unused number of field.
- Handle<JSObject> object_; // The object being optimized.
-};
-
-
} } // namespace v8::internal
#endif // V8_HANDLES_H_
diff --git a/deps/v8/src/hashmap.h b/deps/v8/src/hashmap.h
index 27989889c..5c13212eb 100644
--- a/deps/v8/src/hashmap.h
+++ b/deps/v8/src/hashmap.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,8 @@
#ifndef V8_HASHMAP_H_
#define V8_HASHMAP_H_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
@@ -55,9 +57,9 @@ class HashMap {
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
- HashMap(MatchFun match,
- Allocator* allocator = &DefaultAllocator,
- uint32_t initial_capacity = 8);
+ explicit HashMap(MatchFun match,
+ Allocator* allocator = &DefaultAllocator,
+ uint32_t initial_capacity = 8);
~HashMap();
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 7b91e8715..3f5554e2c 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,12 +29,22 @@
#define V8_HEAP_INL_H_
#include "heap.h"
+#include "isolate.h"
+#include "list-inl.h"
#include "objects.h"
#include "v8-counters.h"
namespace v8 {
namespace internal {
+void PromotionQueue::insert(HeapObject* target, int size) {
+ *(--rear_) = reinterpret_cast<intptr_t>(target);
+ *(--rear_) = size;
+ // Assert no overflow into live objects.
+ ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top());
+}
+
+
int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize;
}
@@ -146,8 +156,8 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
Heap::allocation_timeout_-- <= 0) {
return Failure::RetryAfterGC(space);
}
- Counters::objs_since_last_full.Increment();
- Counters::objs_since_last_young.Increment();
+ isolate_->counters()->objs_since_last_full()->Increment();
+ isolate_->counters()->objs_since_last_young()->Increment();
#endif
MaybeObject* result;
if (NEW_SPACE == space) {
@@ -214,8 +224,8 @@ void Heap::FinalizeExternalString(String* string) {
MaybeObject* Heap::AllocateRawMap() {
#ifdef DEBUG
- Counters::objs_since_last_full.Increment();
- Counters::objs_since_last_young.Increment();
+ isolate_->counters()->objs_since_last_full()->Increment();
+ isolate_->counters()->objs_since_last_young()->Increment();
#endif
MaybeObject* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
@@ -232,8 +242,8 @@ MaybeObject* Heap::AllocateRawMap() {
MaybeObject* Heap::AllocateRawCell() {
#ifdef DEBUG
- Counters::objs_since_last_full.Increment();
- Counters::objs_since_last_young.Increment();
+ isolate_->counters()->objs_since_last_full()->Increment();
+ isolate_->counters()->objs_since_last_young()->Increment();
#endif
MaybeObject* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
@@ -341,7 +351,7 @@ void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
remaining--) {
Memory::Object_at(dst) = Memory::Object_at(src);
- if (Heap::InNewSpace(Memory::Object_at(dst))) {
+ if (InNewSpace(Memory::Object_at(dst))) {
marks |= page->GetRegionMaskForAddress(dst);
}
@@ -387,8 +397,13 @@ void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
}
+void Heap::ScavengePointer(HeapObject** p) {
+ ScavengeObject(p, *p);
+}
+
+
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
- ASSERT(InFromSpace(object));
+ ASSERT(HEAP->InFromSpace(object));
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
@@ -461,10 +476,15 @@ void Heap::SetLastScriptId(Object* last_script_id) {
roots_[kLastScriptIdRootIndex] = last_script_id;
}
+Isolate* Heap::isolate() {
+ return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
+ reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
+}
+
#ifdef DEBUG
#define GC_GREEDY_CHECK() \
- if (FLAG_gc_greedy) v8::internal::Heap::GarbageCollectionGreedyCheck()
+ if (FLAG_gc_greedy) HEAP->GarbageCollectionGreedyCheck()
#else
#define GC_GREEDY_CHECK() { }
#endif
@@ -477,7 +497,7 @@ void Heap::SetLastScriptId(Object* last_script_id) {
// Warning: Do not use the identifiers __object__, __maybe_object__ or
// __scope__ in a call to this macro.
-#define CALL_AND_RETRY(FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
+#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)\
do { \
GC_GREEDY_CHECK(); \
MaybeObject* __maybe_object__ = FUNCTION_CALL; \
@@ -487,16 +507,16 @@ void Heap::SetLastScriptId(Object* last_script_id) {
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- Heap::CollectGarbage( \
- Failure::cast(__maybe_object__)->allocation_space()); \
+ ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
+ allocation_space()); \
__maybe_object__ = FUNCTION_CALL; \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
if (__maybe_object__->IsOutOfMemory()) { \
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1", true);\
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- Counters::gc_last_resort_from_handles.Increment(); \
- Heap::CollectAllAvailableGarbage(); \
+ ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \
+ ISOLATE->heap()->CollectAllAvailableGarbage(); \
{ \
AlwaysAllocateScope __scope__; \
__maybe_object__ = FUNCTION_CALL; \
@@ -511,14 +531,15 @@ void Heap::SetLastScriptId(Object* last_script_id) {
} while (false)
-#define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE) \
- CALL_AND_RETRY(FUNCTION_CALL, \
- return Handle<TYPE>(TYPE::cast(__object__)), \
+#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
+ CALL_AND_RETRY(ISOLATE, \
+ FUNCTION_CALL, \
+ return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
return Handle<TYPE>())
-#define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \
- CALL_AND_RETRY(FUNCTION_CALL, return, return)
+#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
+ CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, return, return)
#ifdef DEBUG
@@ -534,7 +555,7 @@ inline bool Heap::allow_allocation(bool new_state) {
void ExternalStringTable::AddString(String* string) {
ASSERT(string->IsExternalString());
- if (Heap::InNewSpace(string)) {
+ if (heap_->InNewSpace(string)) {
new_space_strings_.Add(string);
} else {
old_space_strings_.Add(string);
@@ -559,12 +580,12 @@ void ExternalStringTable::Iterate(ObjectVisitor* v) {
void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
- ASSERT(Heap::InNewSpace(new_space_strings_[i]));
- ASSERT(new_space_strings_[i] != Heap::raw_unchecked_null_value());
+ ASSERT(heap_->InNewSpace(new_space_strings_[i]));
+ ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_null_value());
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
- ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
- ASSERT(old_space_strings_[i] != Heap::raw_unchecked_null_value());
+ ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
+ ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_null_value());
}
#endif
}
@@ -572,7 +593,7 @@ void ExternalStringTable::Verify() {
void ExternalStringTable::AddOldString(String* string) {
ASSERT(string->IsExternalString());
- ASSERT(!Heap::InNewSpace(string));
+ ASSERT(!heap_->InNewSpace(string));
old_space_strings_.Add(string);
}
@@ -582,6 +603,100 @@ void ExternalStringTable::ShrinkNewStrings(int position) {
Verify();
}
+
+void Heap::ClearInstanceofCache() {
+ set_instanceof_cache_function(the_hole_value());
+}
+
+
+Object* Heap::ToBoolean(bool condition) {
+ return condition ? true_value() : false_value();
+}
+
+
+void Heap::CompletelyClearInstanceofCache() {
+ set_instanceof_cache_map(the_hole_value());
+ set_instanceof_cache_function(the_hole_value());
+}
+
+
+MaybeObject* TranscendentalCache::Get(Type type, double input) {
+ SubCache* cache = caches_[type];
+ if (cache == NULL) {
+ caches_[type] = cache = new SubCache(type);
+ }
+ return cache->Get(input);
+}
+
+
+Address TranscendentalCache::cache_array_address() {
+ return reinterpret_cast<Address>(caches_);
+}
+
+
+double TranscendentalCache::SubCache::Calculate(double input) {
+ switch (type_) {
+ case ACOS:
+ return acos(input);
+ case ASIN:
+ return asin(input);
+ case ATAN:
+ return atan(input);
+ case COS:
+ return cos(input);
+ case EXP:
+ return exp(input);
+ case LOG:
+ return log(input);
+ case SIN:
+ return sin(input);
+ case TAN:
+ return tan(input);
+ default:
+ return 0.0; // Never happens.
+ }
+}
+
+
+MaybeObject* TranscendentalCache::SubCache::Get(double input) {
+ Converter c;
+ c.dbl = input;
+ int hash = Hash(c);
+ Element e = elements_[hash];
+ if (e.in[0] == c.integers[0] &&
+ e.in[1] == c.integers[1]) {
+ ASSERT(e.output != NULL);
+ isolate_->counters()->transcendental_cache_hit()->Increment();
+ return e.output;
+ }
+ double answer = Calculate(input);
+ isolate_->counters()->transcendental_cache_miss()->Increment();
+ Object* heap_number;
+ { MaybeObject* maybe_heap_number =
+ isolate_->heap()->AllocateHeapNumber(answer);
+ if (!maybe_heap_number->ToObject(&heap_number)) return maybe_heap_number;
+ }
+ elements_[hash].in[0] = c.integers[0];
+ elements_[hash].in[1] = c.integers[1];
+ elements_[hash].output = heap_number;
+ return heap_number;
+}
+
+
+Heap* _inline_get_heap_() {
+ return HEAP;
+}
+
+
+void MarkCompactCollector::SetMark(HeapObject* obj) {
+ tracer_->increment_marked_count();
+#ifdef DEBUG
+ UpdateLiveObjectCount(obj);
+#endif
+ obj->SetMark();
+}
+
+
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 07b631fa7..fb1ea8a64 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -28,295 +28,13 @@
#include "v8.h"
#include "heap-profiler.h"
-#include "frames-inl.h"
-#include "global-handles.h"
#include "profile-generator.h"
-#include "string-stream.h"
namespace v8 {
namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
-namespace {
-
-// Clusterizer is a set of helper functions for converting
-// object references into clusters.
-class Clusterizer : public AllStatic {
- public:
- static JSObjectsCluster Clusterize(HeapObject* obj) {
- return Clusterize(obj, true);
- }
- static void InsertIntoTree(JSObjectsClusterTree* tree,
- HeapObject* obj, bool fine_grain);
- static void InsertReferenceIntoTree(JSObjectsClusterTree* tree,
- const JSObjectsCluster& cluster) {
- InsertIntoTree(tree, cluster, 0);
- }
-
- private:
- static JSObjectsCluster Clusterize(HeapObject* obj, bool fine_grain);
- static int CalculateNetworkSize(JSObject* obj);
- static int GetObjectSize(HeapObject* obj) {
- return obj->IsJSObject() ?
- CalculateNetworkSize(JSObject::cast(obj)) : obj->Size();
- }
- static void InsertIntoTree(JSObjectsClusterTree* tree,
- const JSObjectsCluster& cluster, int size);
-};
-
-
-JSObjectsCluster Clusterizer::Clusterize(HeapObject* obj, bool fine_grain) {
- if (obj->IsJSObject()) {
- JSObject* js_obj = JSObject::cast(obj);
- String* constructor = GetConstructorNameForHeapProfile(
- JSObject::cast(js_obj));
- // Differentiate Object and Array instances.
- if (fine_grain && (constructor == Heap::Object_symbol() ||
- constructor == Heap::Array_symbol())) {
- return JSObjectsCluster(constructor, obj);
- } else {
- return JSObjectsCluster(constructor);
- }
- } else if (obj->IsString()) {
- return JSObjectsCluster(Heap::String_symbol());
- } else if (obj->IsJSGlobalPropertyCell()) {
- return JSObjectsCluster(JSObjectsCluster::GLOBAL_PROPERTY);
- } else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
- return JSObjectsCluster(JSObjectsCluster::CODE);
- }
- return JSObjectsCluster();
-}
-
-
-void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
- HeapObject* obj, bool fine_grain) {
- JSObjectsCluster cluster = Clusterize(obj, fine_grain);
- if (cluster.is_null()) return;
- InsertIntoTree(tree, cluster, GetObjectSize(obj));
-}
-
-
-void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
- const JSObjectsCluster& cluster, int size) {
- JSObjectsClusterTree::Locator loc;
- tree->Insert(cluster, &loc);
- NumberAndSizeInfo number_and_size = loc.value();
- number_and_size.increment_number(1);
- number_and_size.increment_bytes(size);
- loc.set_value(number_and_size);
-}
-
-
-int Clusterizer::CalculateNetworkSize(JSObject* obj) {
- int size = obj->Size();
- // If 'properties' and 'elements' are non-empty (thus, non-shared),
- // take their size into account.
- if (obj->properties() != Heap::empty_fixed_array()) {
- size += obj->properties()->Size();
- }
- if (obj->elements() != Heap::empty_fixed_array()) {
- size += obj->elements()->Size();
- }
- // For functions, also account non-empty context and literals sizes.
- if (obj->IsJSFunction()) {
- JSFunction* f = JSFunction::cast(obj);
- if (f->unchecked_context()->IsContext()) {
- size += f->context()->Size();
- }
- if (f->literals()->length() != 0) {
- size += f->literals()->Size();
- }
- }
- return size;
-}
-
-
-// A helper class for recording back references.
-class ReferencesExtractor : public ObjectVisitor {
- public:
- ReferencesExtractor(const JSObjectsCluster& cluster,
- RetainerHeapProfile* profile)
- : cluster_(cluster),
- profile_(profile),
- inside_array_(false) {
- }
-
- void VisitPointer(Object** o) {
- if ((*o)->IsFixedArray() && !inside_array_) {
- // Traverse one level deep for data members that are fixed arrays.
- // This covers the case of 'elements' and 'properties' of JSObject,
- // and function contexts.
- inside_array_ = true;
- FixedArray::cast(*o)->Iterate(this);
- inside_array_ = false;
- } else if ((*o)->IsHeapObject()) {
- profile_->StoreReference(cluster_, HeapObject::cast(*o));
- }
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) VisitPointer(p);
- }
-
- private:
- const JSObjectsCluster& cluster_;
- RetainerHeapProfile* profile_;
- bool inside_array_;
-};
-
-
-// A printer interface implementation for the Retainers profile.
-class RetainersPrinter : public RetainerHeapProfile::Printer {
- public:
- void PrintRetainers(const JSObjectsCluster& cluster,
- const StringStream& retainers) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- cluster.Print(&stream);
- LOG(HeapSampleJSRetainersEvent(
- *(stream.ToCString()), *(retainers.ToCString())));
- }
-};
-
-
-// Visitor for printing a cluster tree.
-class ClusterTreePrinter BASE_EMBEDDED {
- public:
- explicit ClusterTreePrinter(StringStream* stream) : stream_(stream) {}
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- Print(stream_, cluster, number_and_size);
- }
- static void Print(StringStream* stream,
- const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
-
- private:
- StringStream* stream_;
-};
-
-
-void ClusterTreePrinter::Print(StringStream* stream,
- const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- stream->Put(',');
- cluster.Print(stream);
- stream->Add(";%d", number_and_size.number());
-}
-
-
-// Visitor for printing a retainer tree.
-class SimpleRetainerTreePrinter BASE_EMBEDDED {
- public:
- explicit SimpleRetainerTreePrinter(RetainerHeapProfile::Printer* printer)
- : printer_(printer) {}
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
-
- private:
- RetainerHeapProfile::Printer* printer_;
-};
-
-
-void SimpleRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- ClusterTreePrinter retainers_printer(&stream);
- tree->ForEach(&retainers_printer);
- printer_->PrintRetainers(cluster, stream);
-}
-
-
-// Visitor for aggregating references count of equivalent clusters.
-class RetainersAggregator BASE_EMBEDDED {
- public:
- RetainersAggregator(ClustersCoarser* coarser, JSObjectsClusterTree* dest_tree)
- : coarser_(coarser), dest_tree_(dest_tree) {}
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
-
- private:
- ClustersCoarser* coarser_;
- JSObjectsClusterTree* dest_tree_;
-};
-
-
-void RetainersAggregator::Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
- if (eq.is_null()) eq = cluster;
- JSObjectsClusterTree::Locator loc;
- dest_tree_->Insert(eq, &loc);
- NumberAndSizeInfo aggregated_number = loc.value();
- aggregated_number.increment_number(number_and_size.number());
- loc.set_value(aggregated_number);
-}
-
-
-// Visitor for printing retainers tree. Aggregates equivalent retainer clusters.
-class AggregatingRetainerTreePrinter BASE_EMBEDDED {
- public:
- AggregatingRetainerTreePrinter(ClustersCoarser* coarser,
- RetainerHeapProfile::Printer* printer)
- : coarser_(coarser), printer_(printer) {}
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
-
- private:
- ClustersCoarser* coarser_;
- RetainerHeapProfile::Printer* printer_;
-};
-
-
-void AggregatingRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- if (!coarser_->GetCoarseEquivalent(cluster).is_null()) return;
- JSObjectsClusterTree dest_tree_;
- RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
- tree->ForEach(&retainers_aggregator);
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- ClusterTreePrinter retainers_printer(&stream);
- dest_tree_.ForEach(&retainers_printer);
- printer_->PrintRetainers(cluster, stream);
-}
-
-} // namespace
-
-
-// A helper class for building a retainers tree, that aggregates
-// all equivalent clusters.
-class RetainerTreeAggregator {
- public:
- explicit RetainerTreeAggregator(ClustersCoarser* coarser)
- : coarser_(coarser) {}
- void Process(JSObjectsRetainerTree* input_tree) {
- input_tree->ForEach(this);
- }
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
- JSObjectsRetainerTree& output_tree() { return output_tree_; }
-
- private:
- ClustersCoarser* coarser_;
- JSObjectsRetainerTree output_tree_;
-};
-
-
-void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
- if (eq.is_null()) return;
- JSObjectsRetainerTree::Locator loc;
- if (output_tree_.Insert(eq, &loc)) {
- loc.set_value(new JSObjectsClusterTree());
- }
- RetainersAggregator retainers_aggregator(coarser_, loc.value());
- tree->ForEach(&retainers_aggregator);
-}
-
-
-HeapProfiler* HeapProfiler::singleton_ = NULL;
-
HeapProfiler::HeapProfiler()
: snapshots_(new HeapSnapshotsCollection()),
next_snapshot_uid_(1) {
@@ -327,12 +45,20 @@ HeapProfiler::~HeapProfiler() {
delete snapshots_;
}
+
+void HeapProfiler::ResetSnapshots() {
+ delete snapshots_;
+ snapshots_ = new HeapSnapshotsCollection();
+}
+
+
#endif // ENABLE_LOGGING_AND_PROFILING
void HeapProfiler::Setup() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (singleton_ == NULL) {
- singleton_ = new HeapProfiler();
+ Isolate* isolate = Isolate::Current();
+ if (isolate->heap_profiler() == NULL) {
+ isolate->set_heap_profiler(new HeapProfiler());
}
#endif
}
@@ -340,8 +66,9 @@ void HeapProfiler::Setup() {
void HeapProfiler::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- delete singleton_;
- singleton_ = NULL;
+ Isolate* isolate = Isolate::Current();
+ delete isolate->heap_profiler();
+ isolate->set_heap_profiler(NULL);
#endif
}
@@ -351,16 +78,39 @@ void HeapProfiler::TearDown() {
HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control) {
- ASSERT(singleton_ != NULL);
- return singleton_->TakeSnapshotImpl(name, type, control);
+ ASSERT(Isolate::Current()->heap_profiler() != NULL);
+ return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
+ type,
+ control);
}
HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
int type,
v8::ActivityControl* control) {
- ASSERT(singleton_ != NULL);
- return singleton_->TakeSnapshotImpl(name, type, control);
+ ASSERT(Isolate::Current()->heap_profiler() != NULL);
+ return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
+ type,
+ control);
+}
+
+
+void HeapProfiler::DefineWrapperClass(
+ uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
+ ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
+ if (wrapper_callbacks_.length() <= class_id) {
+ wrapper_callbacks_.AddBlock(
+ NULL, class_id - wrapper_callbacks_.length() + 1);
+ }
+ wrapper_callbacks_[class_id] = callback;
+}
+
+
+v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
+ uint16_t class_id, Object** wrapper) {
+ if (wrapper_callbacks_.length() <= class_id) return NULL;
+ return wrapper_callbacks_[class_id](
+ class_id, Utils::ToLocal(Handle<Object>(wrapper)));
}
@@ -373,19 +123,11 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
- Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot();
break;
}
- case HeapSnapshot::kAggregated: {
- Heap::CollectAllGarbage(true);
- AggregatedHeapSnapshot agg_snapshot;
- AggregatedHeapSnapshotGenerator generator(&agg_snapshot);
- generator.GenerateSnapshot();
- generator.FillHeapSnapshot(result);
- break;
- }
default:
UNREACHABLE();
}
@@ -401,727 +143,42 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
int type,
v8::ActivityControl* control) {
- return TakeSnapshotImpl(snapshots_->GetName(name), type, control);
+ return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control);
}
int HeapProfiler::GetSnapshotsCount() {
- ASSERT(singleton_ != NULL);
- return singleton_->snapshots_->snapshots()->length();
+ HeapProfiler* profiler = Isolate::Current()->heap_profiler();
+ ASSERT(profiler != NULL);
+ return profiler->snapshots_->snapshots()->length();
}
HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- ASSERT(singleton_ != NULL);
- return singleton_->snapshots_->snapshots()->at(index);
+ HeapProfiler* profiler = Isolate::Current()->heap_profiler();
+ ASSERT(profiler != NULL);
+ return profiler->snapshots_->snapshots()->at(index);
}
HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
- ASSERT(singleton_ != NULL);
- return singleton_->snapshots_->GetSnapshot(uid);
-}
-
-
-void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
- ASSERT(singleton_ != NULL);
- singleton_->snapshots_->ObjectMoveEvent(from, to);
-}
-
-
-const JSObjectsClusterTreeConfig::Key JSObjectsClusterTreeConfig::kNoKey;
-const JSObjectsClusterTreeConfig::Value JSObjectsClusterTreeConfig::kNoValue;
-
-
-ConstructorHeapProfile::ConstructorHeapProfile()
- : zscope_(DELETE_ON_EXIT) {
-}
-
-
-void ConstructorHeapProfile::Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- cluster.Print(&stream);
- LOG(HeapSampleJSConstructorEvent(*(stream.ToCString()),
- number_and_size.number(),
- number_and_size.bytes()));
-}
-
-
-void ConstructorHeapProfile::CollectStats(HeapObject* obj) {
- Clusterizer::InsertIntoTree(&js_objects_info_tree_, obj, false);
-}
-
-
-void ConstructorHeapProfile::PrintStats() {
- js_objects_info_tree_.ForEach(this);
-}
-
-
-static const char* GetConstructorName(const char* name) {
- return name[0] != '\0' ? name : "(anonymous)";
-}
-
-
-const char* JSObjectsCluster::GetSpecialCaseName() const {
- if (constructor_ == FromSpecialCase(ROOTS)) {
- return "(roots)";
- } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
- return "(global property)";
- } else if (constructor_ == FromSpecialCase(CODE)) {
- return "(code)";
- } else if (constructor_ == FromSpecialCase(SELF)) {
- return "(self)";
- }
- return NULL;
-}
-
-
-void JSObjectsCluster::Print(StringStream* accumulator) const {
- ASSERT(!is_null());
- const char* special_case_name = GetSpecialCaseName();
- if (special_case_name != NULL) {
- accumulator->Add(special_case_name);
- } else {
- SmartPointer<char> s_name(
- constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
- accumulator->Add("%s", GetConstructorName(*s_name));
- if (instance_ != NULL) {
- accumulator->Add(":%p", static_cast<void*>(instance_));
- }
- }
-}
-
-
-void JSObjectsCluster::DebugPrint(StringStream* accumulator) const {
- if (!is_null()) {
- Print(accumulator);
- } else {
- accumulator->Add("(null cluster)");
- }
-}
-
-
-inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
- const JSObjectsCluster& cluster_)
- : cluster(cluster_), refs(kInitialBackrefsListCapacity) {
-}
-
-
-inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
- const ClustersCoarser::ClusterBackRefs& src)
- : cluster(src.cluster), refs(src.refs.capacity()) {
- refs.AddAll(src.refs);
-}
-
-
-inline ClustersCoarser::ClusterBackRefs&
- ClustersCoarser::ClusterBackRefs::operator=(
- const ClustersCoarser::ClusterBackRefs& src) {
- if (this == &src) return *this;
- cluster = src.cluster;
- refs.Clear();
- refs.AddAll(src.refs);
- return *this;
-}
-
-
-inline int ClustersCoarser::ClusterBackRefs::Compare(
- const ClustersCoarser::ClusterBackRefs& a,
- const ClustersCoarser::ClusterBackRefs& b) {
- int cmp = JSObjectsCluster::CompareConstructors(a.cluster, b.cluster);
- if (cmp != 0) return cmp;
- if (a.refs.length() < b.refs.length()) return -1;
- if (a.refs.length() > b.refs.length()) return 1;
- for (int i = 0; i < a.refs.length(); ++i) {
- int cmp = JSObjectsCluster::Compare(a.refs[i], b.refs[i]);
- if (cmp != 0) return cmp;
- }
- return 0;
-}
-
-
-ClustersCoarser::ClustersCoarser()
- : zscope_(DELETE_ON_EXIT),
- sim_list_(ClustersCoarser::kInitialSimilarityListCapacity),
- current_pair_(NULL),
- current_set_(NULL),
- self_(NULL) {
-}
-
-
-void ClustersCoarser::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- if (!cluster.can_be_coarsed()) return;
- ClusterBackRefs pair(cluster);
- ASSERT(current_pair_ == NULL);
- current_pair_ = &pair;
- current_set_ = new JSObjectsRetainerTree();
- self_ = &cluster;
- tree->ForEach(this);
- sim_list_.Add(pair);
- current_pair_ = NULL;
- current_set_ = NULL;
- self_ = NULL;
-}
-
-
-void ClustersCoarser::Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- ASSERT(current_pair_ != NULL);
- ASSERT(current_set_ != NULL);
- ASSERT(self_ != NULL);
- JSObjectsRetainerTree::Locator loc;
- if (JSObjectsCluster::Compare(*self_, cluster) == 0) {
- current_pair_->refs.Add(JSObjectsCluster(JSObjectsCluster::SELF));
- return;
- }
- JSObjectsCluster eq = GetCoarseEquivalent(cluster);
- if (!eq.is_null()) {
- if (current_set_->Find(eq, &loc)) return;
- current_pair_->refs.Add(eq);
- current_set_->Insert(eq, &loc);
- } else {
- current_pair_->refs.Add(cluster);
- }
-}
-
-
-void ClustersCoarser::Process(JSObjectsRetainerTree* tree) {
- int last_eq_clusters = -1;
- for (int i = 0; i < kMaxPassesCount; ++i) {
- sim_list_.Clear();
- const int curr_eq_clusters = DoProcess(tree);
- // If no new cluster equivalents discovered, abort processing.
- if (last_eq_clusters == curr_eq_clusters) break;
- last_eq_clusters = curr_eq_clusters;
- }
-}
-
-
-int ClustersCoarser::DoProcess(JSObjectsRetainerTree* tree) {
- tree->ForEach(this);
- sim_list_.Iterate(ClusterBackRefs::SortRefsIterator);
- sim_list_.Sort(ClusterBackRefsCmp);
- return FillEqualityTree();
-}
-
-
-JSObjectsCluster ClustersCoarser::GetCoarseEquivalent(
- const JSObjectsCluster& cluster) {
- if (!cluster.can_be_coarsed()) return JSObjectsCluster();
- EqualityTree::Locator loc;
- return eq_tree_.Find(cluster, &loc) ? loc.value() : JSObjectsCluster();
-}
-
-
-bool ClustersCoarser::HasAnEquivalent(const JSObjectsCluster& cluster) {
- // Return true for coarsible clusters that have a non-identical equivalent.
- if (!cluster.can_be_coarsed()) return false;
- JSObjectsCluster eq = GetCoarseEquivalent(cluster);
- return !eq.is_null() && JSObjectsCluster::Compare(cluster, eq) != 0;
-}
-
-
-int ClustersCoarser::FillEqualityTree() {
- int eq_clusters_count = 0;
- int eq_to = 0;
- bool first_added = false;
- for (int i = 1; i < sim_list_.length(); ++i) {
- if (ClusterBackRefs::Compare(sim_list_[i], sim_list_[eq_to]) == 0) {
- EqualityTree::Locator loc;
- if (!first_added) {
- // Add self-equivalence, if we have more than one item in this
- // equivalence class.
- eq_tree_.Insert(sim_list_[eq_to].cluster, &loc);
- loc.set_value(sim_list_[eq_to].cluster);
- first_added = true;
- }
- eq_tree_.Insert(sim_list_[i].cluster, &loc);
- loc.set_value(sim_list_[eq_to].cluster);
- ++eq_clusters_count;
- } else {
- eq_to = i;
- first_added = false;
- }
- }
- return eq_clusters_count;
-}
-
-
-const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoKey;
-const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoValue;
-const JSObjectsRetainerTreeConfig::Key JSObjectsRetainerTreeConfig::kNoKey;
-const JSObjectsRetainerTreeConfig::Value JSObjectsRetainerTreeConfig::kNoValue =
- NULL;
-
-
-RetainerHeapProfile::RetainerHeapProfile()
- : zscope_(DELETE_ON_EXIT),
- aggregator_(NULL) {
- JSObjectsCluster roots(JSObjectsCluster::ROOTS);
- ReferencesExtractor extractor(roots, this);
- Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
-}
-
-
-RetainerHeapProfile::~RetainerHeapProfile() {
- delete aggregator_;
-}
-
-
-void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
- HeapObject* ref) {
- JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
- if (ref_cluster.is_null()) return;
- JSObjectsRetainerTree::Locator ref_loc;
- if (retainers_tree_.Insert(ref_cluster, &ref_loc)) {
- ref_loc.set_value(new JSObjectsClusterTree());
- }
- JSObjectsClusterTree* referenced_by = ref_loc.value();
- Clusterizer::InsertReferenceIntoTree(referenced_by, cluster);
-}
-
-
-void RetainerHeapProfile::CollectStats(HeapObject* obj) {
- const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
- if (cluster.is_null()) return;
- ReferencesExtractor extractor(cluster, this);
- obj->Iterate(&extractor);
-}
-
-
-void RetainerHeapProfile::CoarseAndAggregate() {
- coarser_.Process(&retainers_tree_);
- ASSERT(aggregator_ == NULL);
- aggregator_ = new RetainerTreeAggregator(&coarser_);
- aggregator_->Process(&retainers_tree_);
-}
-
-
-void RetainerHeapProfile::DebugPrintStats(
- RetainerHeapProfile::Printer* printer) {
- // Print clusters that have no equivalents, aggregating their retainers.
- AggregatingRetainerTreePrinter agg_printer(&coarser_, printer);
- retainers_tree_.ForEach(&agg_printer);
- // Print clusters that have equivalents.
- SimpleRetainerTreePrinter s_printer(printer);
- aggregator_->output_tree().ForEach(&s_printer);
-}
-
-
-void RetainerHeapProfile::PrintStats() {
- RetainersPrinter printer;
- DebugPrintStats(&printer);
-}
-
-
-//
-// HeapProfiler class implementation.
-//
-static void StackWeakReferenceCallback(Persistent<Value> object,
- void* trace) {
- DeleteArray(static_cast<Address*>(trace));
- object.Dispose();
-}
-
-
-static void PrintProducerStackTrace(Object* obj, void* trace) {
- if (!obj->IsJSObject()) return;
- String* constructor = GetConstructorNameForHeapProfile(JSObject::cast(obj));
- SmartPointer<char> s_name(
- constructor->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
- LOG(HeapSampleJSProducerEvent(GetConstructorName(*s_name),
- reinterpret_cast<Address*>(trace)));
-}
-
-
-void HeapProfiler::WriteSample() {
- LOG(HeapSampleBeginEvent("Heap", "allocated"));
- LOG(HeapSampleStats(
- "Heap", "allocated", Heap::CommittedMemory(), Heap::SizeOfObjects()));
-
- AggregatedHeapSnapshot snapshot;
- AggregatedHeapSnapshotGenerator generator(&snapshot);
- generator.GenerateSnapshot();
-
- HistogramInfo* info = snapshot.info();
- for (int i = FIRST_NONSTRING_TYPE;
- i <= AggregatedHeapSnapshotGenerator::kAllStringsType;
- ++i) {
- if (info[i].bytes() > 0) {
- LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
- info[i].bytes()));
- }
- }
-
- snapshot.js_cons_profile()->PrintStats();
- snapshot.js_retainer_profile()->PrintStats();
-
- GlobalHandles::IterateWeakRoots(PrintProducerStackTrace,
- StackWeakReferenceCallback);
-
- LOG(HeapSampleEndEvent("Heap", "allocated"));
-}
-
-
-AggregatedHeapSnapshot::AggregatedHeapSnapshot()
- : info_(NewArray<HistogramInfo>(
- AggregatedHeapSnapshotGenerator::kAllStringsType + 1)) {
-#define DEF_TYPE_NAME(name) info_[name].set_name(#name);
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME);
-#undef DEF_TYPE_NAME
- info_[AggregatedHeapSnapshotGenerator::kAllStringsType].set_name(
- "STRING_TYPE");
-}
-
-
-AggregatedHeapSnapshot::~AggregatedHeapSnapshot() {
- DeleteArray(info_);
-}
-
-
-AggregatedHeapSnapshotGenerator::AggregatedHeapSnapshotGenerator(
- AggregatedHeapSnapshot* agg_snapshot)
- : agg_snapshot_(agg_snapshot) {
-}
-
-
-void AggregatedHeapSnapshotGenerator::CalculateStringsStats() {
- HistogramInfo* info = agg_snapshot_->info();
- HistogramInfo& strings = info[kAllStringsType];
- // Lump all the string types together.
-#define INCREMENT_SIZE(type, size, name, camel_name) \
- strings.increment_number(info[type].number()); \
- strings.increment_bytes(info[type].bytes());
- STRING_TYPE_LIST(INCREMENT_SIZE);
-#undef INCREMENT_SIZE
-}
-
-
-void AggregatedHeapSnapshotGenerator::CollectStats(HeapObject* obj) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- agg_snapshot_->info()[type].increment_number(1);
- agg_snapshot_->info()[type].increment_bytes(obj->Size());
+ HeapProfiler* profiler = Isolate::Current()->heap_profiler();
+ ASSERT(profiler != NULL);
+ return profiler->snapshots_->GetSnapshot(uid);
}
-void AggregatedHeapSnapshotGenerator::GenerateSnapshot() {
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- CollectStats(obj);
- agg_snapshot_->js_cons_profile()->CollectStats(obj);
- agg_snapshot_->js_retainer_profile()->CollectStats(obj);
- }
- CalculateStringsStats();
- agg_snapshot_->js_retainer_profile()->CoarseAndAggregate();
+void HeapProfiler::DeleteAllSnapshots() {
+ HeapProfiler* profiler = Isolate::Current()->heap_profiler();
+ ASSERT(profiler != NULL);
+ profiler->ResetSnapshots();
}
-class CountingConstructorHeapProfileIterator {
- public:
- CountingConstructorHeapProfileIterator()
- : entities_count_(0), children_count_(0) {
- }
-
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- ++entities_count_;
- children_count_ += number_and_size.number();
- }
-
- int entities_count() { return entities_count_; }
- int children_count() { return children_count_; }
-
- private:
- int entities_count_;
- int children_count_;
-};
-
-
-static HeapEntry* AddEntryFromAggregatedSnapshot(HeapSnapshot* snapshot,
- int* root_child_index,
- HeapEntry::Type type,
- const char* name,
- int count,
- int size,
- int children_count,
- int retainers_count) {
- HeapEntry* entry = snapshot->AddEntry(
- type, name, count, size, children_count, retainers_count);
- ASSERT(entry != NULL);
- snapshot->root()->SetUnidirElementReference(*root_child_index,
- *root_child_index + 1,
- entry);
- *root_child_index = *root_child_index + 1;
- return entry;
-}
-
-
-class AllocatingConstructorHeapProfileIterator {
- public:
- AllocatingConstructorHeapProfileIterator(HeapSnapshot* snapshot,
- int* root_child_index)
- : snapshot_(snapshot),
- root_child_index_(root_child_index) {
- }
-
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- const char* name = cluster.GetSpecialCaseName();
- if (name == NULL) {
- name = snapshot_->collection()->GetFunctionName(cluster.constructor());
- }
- AddEntryFromAggregatedSnapshot(snapshot_,
- root_child_index_,
- HeapEntry::kObject,
- name,
- number_and_size.number(),
- number_and_size.bytes(),
- 0,
- 0);
- }
-
- private:
- HeapSnapshot* snapshot_;
- int* root_child_index_;
-};
-
-
-static HeapObject* ClusterAsHeapObject(const JSObjectsCluster& cluster) {
- return cluster.can_be_coarsed() ?
- reinterpret_cast<HeapObject*>(cluster.instance()) : cluster.constructor();
-}
-
-
-static JSObjectsCluster HeapObjectAsCluster(HeapObject* object) {
- if (object->IsString()) {
- return JSObjectsCluster(String::cast(object));
- } else {
- JSObject* js_obj = JSObject::cast(object);
- String* constructor = GetConstructorNameForHeapProfile(
- JSObject::cast(js_obj));
- return JSObjectsCluster(constructor, object);
- }
-}
-
-
-class CountingRetainersIterator {
- public:
- CountingRetainersIterator(const JSObjectsCluster& child_cluster,
- HeapEntriesAllocator* allocator,
- HeapEntriesMap* map)
- : child_(ClusterAsHeapObject(child_cluster)),
- allocator_(allocator),
- map_(map) {
- if (map_->Map(child_) == NULL)
- map_->Pair(child_, allocator_, HeapEntriesMap::kHeapEntryPlaceholder);
- }
-
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- if (map_->Map(ClusterAsHeapObject(cluster)) == NULL)
- map_->Pair(ClusterAsHeapObject(cluster),
- allocator_,
- HeapEntriesMap::kHeapEntryPlaceholder);
- map_->CountReference(ClusterAsHeapObject(cluster), child_);
- }
-
- private:
- HeapObject* child_;
- HeapEntriesAllocator* allocator_;
- HeapEntriesMap* map_;
-};
-
-
-class AllocatingRetainersIterator {
- public:
- AllocatingRetainersIterator(const JSObjectsCluster& child_cluster,
- HeapEntriesAllocator*,
- HeapEntriesMap* map)
- : child_(ClusterAsHeapObject(child_cluster)), map_(map) {
- child_entry_ = map_->Map(child_);
- ASSERT(child_entry_ != NULL);
- }
-
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- int child_index, retainer_index;
- map_->CountReference(ClusterAsHeapObject(cluster),
- child_,
- &child_index,
- &retainer_index);
- map_->Map(ClusterAsHeapObject(cluster))->SetIndexedReference(
- HeapGraphEdge::kElement,
- child_index,
- number_and_size.number(),
- child_entry_,
- retainer_index);
- }
-
- private:
- HeapObject* child_;
- HeapEntriesMap* map_;
- HeapEntry* child_entry_;
-};
-
-
-template<class RetainersIterator>
-class AggregatingRetainerTreeIterator {
- public:
- explicit AggregatingRetainerTreeIterator(ClustersCoarser* coarser,
- HeapEntriesAllocator* allocator,
- HeapEntriesMap* map)
- : coarser_(coarser), allocator_(allocator), map_(map) {
- }
-
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree) {
- if (coarser_ != NULL &&
- !coarser_->GetCoarseEquivalent(cluster).is_null()) return;
- JSObjectsClusterTree* tree_to_iterate = tree;
- ZoneScope zs(DELETE_ON_EXIT);
- JSObjectsClusterTree dest_tree_;
- if (coarser_ != NULL) {
- RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
- tree->ForEach(&retainers_aggregator);
- tree_to_iterate = &dest_tree_;
- }
- RetainersIterator iterator(cluster, allocator_, map_);
- tree_to_iterate->ForEach(&iterator);
- }
-
- private:
- ClustersCoarser* coarser_;
- HeapEntriesAllocator* allocator_;
- HeapEntriesMap* map_;
-};
-
-
-class AggregatedRetainerTreeAllocator : public HeapEntriesAllocator {
- public:
- AggregatedRetainerTreeAllocator(HeapSnapshot* snapshot,
- int* root_child_index)
- : snapshot_(snapshot), root_child_index_(root_child_index) {
- }
- ~AggregatedRetainerTreeAllocator() { }
-
- HeapEntry* AllocateEntry(
- HeapThing ptr, int children_count, int retainers_count) {
- HeapObject* obj = reinterpret_cast<HeapObject*>(ptr);
- JSObjectsCluster cluster = HeapObjectAsCluster(obj);
- const char* name = cluster.GetSpecialCaseName();
- if (name == NULL) {
- name = snapshot_->collection()->GetFunctionName(cluster.constructor());
- }
- return AddEntryFromAggregatedSnapshot(
- snapshot_, root_child_index_, HeapEntry::kObject, name,
- 0, 0, children_count, retainers_count);
- }
-
- private:
- HeapSnapshot* snapshot_;
- int* root_child_index_;
-};
-
-
-template<class Iterator>
-void AggregatedHeapSnapshotGenerator::IterateRetainers(
- HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map) {
- RetainerHeapProfile* p = agg_snapshot_->js_retainer_profile();
- AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_1(
- p->coarser(), allocator, entries_map);
- p->retainers_tree()->ForEach(&agg_ret_iter_1);
- AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2(
- NULL, allocator, entries_map);
- p->aggregator()->output_tree().ForEach(&agg_ret_iter_2);
-}
-
-
-void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
- // Count the number of entities.
- int histogram_entities_count = 0;
- int histogram_children_count = 0;
- int histogram_retainers_count = 0;
- for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
- if (agg_snapshot_->info()[i].bytes() > 0) {
- ++histogram_entities_count;
- }
- }
- CountingConstructorHeapProfileIterator counting_cons_iter;
- agg_snapshot_->js_cons_profile()->ForEach(&counting_cons_iter);
- histogram_entities_count += counting_cons_iter.entities_count();
- HeapEntriesMap entries_map;
- int root_child_index = 0;
- AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index);
- IterateRetainers<CountingRetainersIterator>(&allocator, &entries_map);
- histogram_entities_count += entries_map.entries_count();
- histogram_children_count += entries_map.total_children_count();
- histogram_retainers_count += entries_map.total_retainers_count();
-
- // Root entry references all other entries.
- histogram_children_count += histogram_entities_count;
- int root_children_count = histogram_entities_count;
- ++histogram_entities_count;
-
- // Allocate and fill entries in the snapshot, allocate references.
- snapshot->AllocateEntries(histogram_entities_count,
- histogram_children_count,
- histogram_retainers_count);
- snapshot->AddRootEntry(root_children_count);
- for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
- if (agg_snapshot_->info()[i].bytes() > 0) {
- AddEntryFromAggregatedSnapshot(snapshot,
- &root_child_index,
- HeapEntry::kHidden,
- agg_snapshot_->info()[i].name(),
- agg_snapshot_->info()[i].number(),
- agg_snapshot_->info()[i].bytes(),
- 0,
- 0);
- }
- }
- AllocatingConstructorHeapProfileIterator alloc_cons_iter(
- snapshot, &root_child_index);
- agg_snapshot_->js_cons_profile()->ForEach(&alloc_cons_iter);
- entries_map.AllocateEntries();
-
- // Fill up references.
- IterateRetainers<AllocatingRetainersIterator>(&allocator, &entries_map);
-
- snapshot->SetDominatorsToSelf();
-}
-
-
-bool ProducerHeapProfile::can_log_ = false;
-
-void ProducerHeapProfile::Setup() {
- can_log_ = true;
-}
-
-void ProducerHeapProfile::DoRecordJSObjectAllocation(Object* obj) {
- ASSERT(FLAG_log_producers);
- if (!can_log_) return;
- int framesCount = 0;
- for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
- ++framesCount;
- }
- if (framesCount == 0) return;
- ++framesCount; // Reserve place for the terminator item.
- Vector<Address> stack(NewArray<Address>(framesCount), framesCount);
- int i = 0;
- for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
- stack[i++] = it.frame()->pc();
- }
- stack[i] = NULL;
- Handle<Object> handle = GlobalHandles::Create(obj);
- GlobalHandles::MakeWeak(handle.location(),
- static_cast<void*>(stack.start()),
- StackWeakReferenceCallback);
+void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
+ snapshots_->ObjectMoveEvent(from, to);
}
-
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h
index 20ba457c5..c32f4c425 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/heap-profiler.h
@@ -28,7 +28,7 @@
#ifndef V8_HEAP_PROFILER_H_
#define V8_HEAP_PROFILER_H_
-#include "zone-inl.h"
+#include "isolate.h"
namespace v8 {
namespace internal {
@@ -38,14 +38,15 @@ namespace internal {
class HeapSnapshot;
class HeapSnapshotsCollection;
-#define HEAP_PROFILE(Call) \
- do { \
- if (v8::internal::HeapProfiler::is_profiling()) { \
- v8::internal::HeapProfiler::Call; \
- } \
+#define HEAP_PROFILE(heap, call) \
+ do { \
+ v8::internal::HeapProfiler* profiler = heap->isolate()->heap_profiler(); \
+ if (profiler != NULL && profiler->is_profiling()) { \
+ profiler->call; \
+ } \
} while (false)
#else
-#define HEAP_PROFILE(Call) ((void) 0)
+#define HEAP_PROFILE(heap, call) ((void) 0)
#endif // ENABLE_LOGGING_AND_PROFILING
// The HeapProfiler writes data to the log files, which can be postprocessed
@@ -65,16 +66,18 @@ class HeapProfiler {
static int GetSnapshotsCount();
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
+ static void DeleteAllSnapshots();
- static void ObjectMoveEvent(Address from, Address to);
+ void ObjectMoveEvent(Address from, Address to);
- static INLINE(bool is_profiling()) {
- return singleton_ != NULL && singleton_->snapshots_->is_tracking_objects();
- }
+ void DefineWrapperClass(
+ uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
- // Obsolete interface.
- // Write a single heap sample to the log file.
- static void WriteSample();
+ v8::RetainedObjectInfo* ExecuteWrapperClassCallback(uint16_t class_id,
+ Object** wrapper);
+ INLINE(bool is_profiling()) {
+ return snapshots_->is_tracking_objects();
+ }
private:
HeapProfiler();
@@ -85,297 +88,15 @@ class HeapProfiler {
HeapSnapshot* TakeSnapshotImpl(String* name,
int type,
v8::ActivityControl* control);
+ void ResetSnapshots();
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
+ List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
- static HeapProfiler* singleton_;
#endif // ENABLE_LOGGING_AND_PROFILING
};
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-// JSObjectsCluster describes a group of JS objects that are
-// considered equivalent in terms of a particular profile.
-class JSObjectsCluster BASE_EMBEDDED {
- public:
- // These special cases are used in retainer profile.
- enum SpecialCase {
- ROOTS = 1,
- GLOBAL_PROPERTY = 2,
- CODE = 3,
- SELF = 100 // This case is used in ClustersCoarser only.
- };
-
- JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
- explicit JSObjectsCluster(String* constructor)
- : constructor_(constructor), instance_(NULL) {}
- explicit JSObjectsCluster(SpecialCase special)
- : constructor_(FromSpecialCase(special)), instance_(NULL) {}
- JSObjectsCluster(String* constructor, Object* instance)
- : constructor_(constructor), instance_(instance) {}
-
- static int CompareConstructors(const JSObjectsCluster& a,
- const JSObjectsCluster& b) {
- // Strings are unique, so it is sufficient to compare their pointers.
- return a.constructor_ == b.constructor_ ? 0
- : (a.constructor_ < b.constructor_ ? -1 : 1);
- }
- static int Compare(const JSObjectsCluster& a, const JSObjectsCluster& b) {
- // Strings are unique, so it is sufficient to compare their pointers.
- const int cons_cmp = CompareConstructors(a, b);
- return cons_cmp == 0 ?
- (a.instance_ == b.instance_ ? 0 : (a.instance_ < b.instance_ ? -1 : 1))
- : cons_cmp;
- }
- static int Compare(const JSObjectsCluster* a, const JSObjectsCluster* b) {
- return Compare(*a, *b);
- }
-
- bool is_null() const { return constructor_ == NULL; }
- bool can_be_coarsed() const { return instance_ != NULL; }
- String* constructor() const { return constructor_; }
- Object* instance() const { return instance_; }
-
- const char* GetSpecialCaseName() const;
- void Print(StringStream* accumulator) const;
- // Allows null clusters to be printed.
- void DebugPrint(StringStream* accumulator) const;
-
- private:
- static String* FromSpecialCase(SpecialCase special) {
- // We use symbols that are illegal JS identifiers to identify special cases.
- // Their actual value is irrelevant for us.
- switch (special) {
- case ROOTS: return Heap::result_symbol();
- case GLOBAL_PROPERTY: return Heap::code_symbol();
- case CODE: return Heap::arguments_shadow_symbol();
- case SELF: return Heap::catch_var_symbol();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-
- String* constructor_;
- Object* instance_;
-};
-
-
-struct JSObjectsClusterTreeConfig {
- typedef JSObjectsCluster Key;
- typedef NumberAndSizeInfo Value;
- static const Key kNoKey;
- static const Value kNoValue;
- static int Compare(const Key& a, const Key& b) {
- return Key::Compare(a, b);
- }
-};
-typedef ZoneSplayTree<JSObjectsClusterTreeConfig> JSObjectsClusterTree;
-
-
-// ConstructorHeapProfile is responsible for gathering and logging
-// "constructor profile" of JS objects allocated on heap.
-// It is run during garbage collection cycle, thus it doesn't need
-// to use handles.
-class ConstructorHeapProfile BASE_EMBEDDED {
- public:
- ConstructorHeapProfile();
- virtual ~ConstructorHeapProfile() {}
- void CollectStats(HeapObject* obj);
- void PrintStats();
-
- template<class Callback>
- void ForEach(Callback* callback) { js_objects_info_tree_.ForEach(callback); }
- // Used by ZoneSplayTree::ForEach. Made virtual to allow overriding in tests.
- virtual void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
-
- private:
- ZoneScope zscope_;
- JSObjectsClusterTree js_objects_info_tree_;
-};
-
-
-// JSObjectsRetainerTree is used to represent retainer graphs using
-// adjacency list form:
-//
-// Cluster -> (Cluster -> NumberAndSizeInfo)
-//
-// Subordinate splay trees are stored by pointer. They are zone-allocated,
-// so it isn't needed to manage their lifetime.
-//
-struct JSObjectsRetainerTreeConfig {
- typedef JSObjectsCluster Key;
- typedef JSObjectsClusterTree* Value;
- static const Key kNoKey;
- static const Value kNoValue;
- static int Compare(const Key& a, const Key& b) {
- return Key::Compare(a, b);
- }
-};
-typedef ZoneSplayTree<JSObjectsRetainerTreeConfig> JSObjectsRetainerTree;
-
-
-class ClustersCoarser BASE_EMBEDDED {
- public:
- ClustersCoarser();
-
- // Processes a given retainer graph.
- void Process(JSObjectsRetainerTree* tree);
-
- // Returns an equivalent cluster (can be the cluster itself).
- // If the given cluster doesn't have an equivalent, returns null cluster.
- JSObjectsCluster GetCoarseEquivalent(const JSObjectsCluster& cluster);
- // Returns whether a cluster can be substitued with an equivalent and thus,
- // skipped in some cases.
- bool HasAnEquivalent(const JSObjectsCluster& cluster);
-
- // Used by JSObjectsRetainerTree::ForEach.
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
-
- private:
- // Stores a list of back references for a cluster.
- struct ClusterBackRefs {
- explicit ClusterBackRefs(const JSObjectsCluster& cluster_);
- ClusterBackRefs(const ClusterBackRefs& src);
- ClusterBackRefs& operator=(const ClusterBackRefs& src);
-
- static int Compare(const ClusterBackRefs& a, const ClusterBackRefs& b);
- void SortRefs() { refs.Sort(JSObjectsCluster::Compare); }
- static void SortRefsIterator(ClusterBackRefs* ref) { ref->SortRefs(); }
-
- JSObjectsCluster cluster;
- ZoneList<JSObjectsCluster> refs;
- };
- typedef ZoneList<ClusterBackRefs> SimilarityList;
-
- // A tree for storing a list of equivalents for a cluster.
- struct ClusterEqualityConfig {
- typedef JSObjectsCluster Key;
- typedef JSObjectsCluster Value;
- static const Key kNoKey;
- static const Value kNoValue;
- static int Compare(const Key& a, const Key& b) {
- return Key::Compare(a, b);
- }
- };
- typedef ZoneSplayTree<ClusterEqualityConfig> EqualityTree;
-
- static int ClusterBackRefsCmp(const ClusterBackRefs* a,
- const ClusterBackRefs* b) {
- return ClusterBackRefs::Compare(*a, *b);
- }
- int DoProcess(JSObjectsRetainerTree* tree);
- int FillEqualityTree();
-
- static const int kInitialBackrefsListCapacity = 2;
- static const int kInitialSimilarityListCapacity = 2000;
- // Number of passes for finding equivalents. Limits the length of paths
- // that can be considered equivalent.
- static const int kMaxPassesCount = 10;
-
- ZoneScope zscope_;
- SimilarityList sim_list_;
- EqualityTree eq_tree_;
- ClusterBackRefs* current_pair_;
- JSObjectsRetainerTree* current_set_;
- const JSObjectsCluster* self_;
-};
-
-
-// RetainerHeapProfile is responsible for gathering and logging
-// "retainer profile" of JS objects allocated on heap.
-// It is run during garbage collection cycle, thus it doesn't need
-// to use handles.
-class RetainerTreeAggregator;
-
-class RetainerHeapProfile BASE_EMBEDDED {
- public:
- class Printer {
- public:
- virtual ~Printer() {}
- virtual void PrintRetainers(const JSObjectsCluster& cluster,
- const StringStream& retainers) = 0;
- };
-
- RetainerHeapProfile();
- ~RetainerHeapProfile();
-
- RetainerTreeAggregator* aggregator() { return aggregator_; }
- ClustersCoarser* coarser() { return &coarser_; }
- JSObjectsRetainerTree* retainers_tree() { return &retainers_tree_; }
-
- void CollectStats(HeapObject* obj);
- void CoarseAndAggregate();
- void PrintStats();
- void DebugPrintStats(Printer* printer);
- void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref);
-
- private:
- ZoneScope zscope_;
- JSObjectsRetainerTree retainers_tree_;
- ClustersCoarser coarser_;
- RetainerTreeAggregator* aggregator_;
-};
-
-
-class AggregatedHeapSnapshot {
- public:
- AggregatedHeapSnapshot();
- ~AggregatedHeapSnapshot();
-
- HistogramInfo* info() { return info_; }
- ConstructorHeapProfile* js_cons_profile() { return &js_cons_profile_; }
- RetainerHeapProfile* js_retainer_profile() { return &js_retainer_profile_; }
-
- private:
- HistogramInfo* info_;
- ConstructorHeapProfile js_cons_profile_;
- RetainerHeapProfile js_retainer_profile_;
-};
-
-
-class HeapEntriesMap;
-class HeapEntriesAllocator;
-class HeapSnapshot;
-
-class AggregatedHeapSnapshotGenerator {
- public:
- explicit AggregatedHeapSnapshotGenerator(AggregatedHeapSnapshot* snapshot);
- void GenerateSnapshot();
- void FillHeapSnapshot(HeapSnapshot* snapshot);
-
- static const int kAllStringsType = LAST_TYPE + 1;
-
- private:
- void CalculateStringsStats();
- void CollectStats(HeapObject* obj);
- template<class Iterator>
- void IterateRetainers(
- HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map);
-
- AggregatedHeapSnapshot* agg_snapshot_;
-};
-
-
-class ProducerHeapProfile : public AllStatic {
- public:
- static void Setup();
- static void RecordJSObjectAllocation(Object* obj) {
- if (FLAG_log_producers) DoRecordJSObjectAllocation(obj);
- }
-
- private:
- static void DoRecordJSObjectAllocation(Object* obj);
- static bool can_log_;
-};
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
} } // namespace v8::internal
#endif // V8_HEAP_PROFILER_H_
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 34ab9aafc..6bb0206cf 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,11 +30,11 @@
#include "accessors.h"
#include "api.h"
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "compilation-cache.h"
#include "debug.h"
-#include "heap-profiler.h"
#include "global-handles.h"
+#include "heap-profiler.h"
#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "natives.h"
@@ -49,119 +49,125 @@
#include "regexp-macro-assembler.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
-
+#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
+#include "regexp-macro-assembler.h"
+#include "mips/regexp-macro-assembler-mips.h"
+#endif
namespace v8 {
namespace internal {
-String* Heap::hidden_symbol_;
-Object* Heap::roots_[Heap::kRootListLength];
-Object* Heap::global_contexts_list_;
-
-
-NewSpace Heap::new_space_;
-OldSpace* Heap::old_pointer_space_ = NULL;
-OldSpace* Heap::old_data_space_ = NULL;
-OldSpace* Heap::code_space_ = NULL;
-MapSpace* Heap::map_space_ = NULL;
-CellSpace* Heap::cell_space_ = NULL;
-LargeObjectSpace* Heap::lo_space_ = NULL;
-
static const intptr_t kMinimumPromotionLimit = 2 * MB;
static const intptr_t kMinimumAllocationLimit = 8 * MB;
-intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
-intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
-int Heap::old_gen_exhausted_ = false;
+static Mutex* gc_initializer_mutex = OS::CreateMutex();
-int Heap::amount_of_external_allocated_memory_ = 0;
-int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
+Heap::Heap()
+ : isolate_(NULL),
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
#if defined(ANDROID)
-static const int default_max_semispace_size_ = 2*MB;
-intptr_t Heap::max_old_generation_size_ = 192*MB;
-int Heap::initial_semispace_size_ = 128*KB;
-intptr_t Heap::code_range_size_ = 0;
-intptr_t Heap::max_executable_size_ = max_old_generation_size_;
+ reserved_semispace_size_(2*MB),
+ max_semispace_size_(2*MB),
+ initial_semispace_size_(128*KB),
+ max_old_generation_size_(192*MB),
+ max_executable_size_(max_old_generation_size_),
+ code_range_size_(0),
#elif defined(V8_TARGET_ARCH_X64)
-static const int default_max_semispace_size_ = 16*MB;
-intptr_t Heap::max_old_generation_size_ = 1*GB;
-int Heap::initial_semispace_size_ = 1*MB;
-intptr_t Heap::code_range_size_ = 512*MB;
-intptr_t Heap::max_executable_size_ = 256*MB;
-#else
-static const int default_max_semispace_size_ = 8*MB;
-intptr_t Heap::max_old_generation_size_ = 512*MB;
-int Heap::initial_semispace_size_ = 512*KB;
-intptr_t Heap::code_range_size_ = 0;
-intptr_t Heap::max_executable_size_ = 128*MB;
-#endif
-
-// Allow build-time customization of the max semispace size. Building
-// V8 with snapshots and a non-default max semispace size is much
-// easier if you can define it as part of the build environment.
-#if defined(V8_MAX_SEMISPACE_SIZE)
-int Heap::max_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
+ reserved_semispace_size_(16*MB),
+ max_semispace_size_(16*MB),
+ initial_semispace_size_(1*MB),
+ max_old_generation_size_(1*GB),
+ max_executable_size_(256*MB),
+ code_range_size_(512*MB),
#else
-int Heap::max_semispace_size_ = default_max_semispace_size_;
+ reserved_semispace_size_(8*MB),
+ max_semispace_size_(8*MB),
+ initial_semispace_size_(512*KB),
+ max_old_generation_size_(512*MB),
+ max_executable_size_(128*MB),
+ code_range_size_(0),
#endif
-
-// The snapshot semispace size will be the default semispace size if
-// snapshotting is used and will be the requested semispace size as
-// set up by ConfigureHeap otherwise.
-int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
-
-List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
-List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
-
-GCCallback Heap::global_gc_prologue_callback_ = NULL;
-GCCallback Heap::global_gc_epilogue_callback_ = NULL;
-HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
-
// Variables set based on semispace_size_ and old_generation_size_ in
-// ConfigureHeap.
-
+// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
-int Heap::survived_since_last_expansion_ = 0;
-intptr_t Heap::external_allocation_limit_ = 0;
-
-Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
-
-int Heap::mc_count_ = 0;
-int Heap::ms_count_ = 0;
-unsigned int Heap::gc_count_ = 0;
-
-GCTracer* Heap::tracer_ = NULL;
-
-int Heap::unflattened_strings_length_ = 0;
-
-int Heap::always_allocate_scope_depth_ = 0;
-int Heap::linear_allocation_scope_depth_ = 0;
-int Heap::contexts_disposed_ = 0;
+ survived_since_last_expansion_(0),
+ always_allocate_scope_depth_(0),
+ linear_allocation_scope_depth_(0),
+ contexts_disposed_(0),
+ new_space_(this),
+ old_pointer_space_(NULL),
+ old_data_space_(NULL),
+ code_space_(NULL),
+ map_space_(NULL),
+ cell_space_(NULL),
+ lo_space_(NULL),
+ gc_state_(NOT_IN_GC),
+ gc_post_processing_depth_(0),
+ mc_count_(0),
+ ms_count_(0),
+ gc_count_(0),
+ unflattened_strings_length_(0),
+#ifdef DEBUG
+ allocation_allowed_(true),
+ allocation_timeout_(0),
+ disallow_allocation_failure_(false),
+ debug_utils_(NULL),
+#endif // DEBUG
+ old_gen_promotion_limit_(kMinimumPromotionLimit),
+ old_gen_allocation_limit_(kMinimumAllocationLimit),
+ external_allocation_limit_(0),
+ amount_of_external_allocated_memory_(0),
+ amount_of_external_allocated_memory_at_last_global_gc_(0),
+ old_gen_exhausted_(false),
+ hidden_symbol_(NULL),
+ global_gc_prologue_callback_(NULL),
+ global_gc_epilogue_callback_(NULL),
+ gc_safe_size_of_old_object_(NULL),
+ total_regexp_code_generated_(0),
+ tracer_(NULL),
+ young_survivors_after_last_gc_(0),
+ high_survival_rate_period_length_(0),
+ survival_rate_(0),
+ previous_survival_rate_trend_(Heap::STABLE),
+ survival_rate_trend_(Heap::STABLE),
+ max_gc_pause_(0),
+ max_alive_after_gc_(0),
+ min_in_mutator_(kMaxInt),
+ alive_after_last_gc_(0),
+ last_gc_end_timestamp_(0.0),
+ page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
+ number_idle_notifications_(0),
+ last_idle_notification_gc_count_(0),
+ last_idle_notification_gc_count_init_(false),
+ configured_(false),
+ is_safe_to_read_maps_(true) {
+ // Allow build-time customization of the max semispace size. Building
+ // V8 with snapshots and a non-default max semispace size is much
+ // easier if you can define it as part of the build environment.
+#if defined(V8_MAX_SEMISPACE_SIZE)
+ max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
+#endif
-int Heap::young_survivors_after_last_gc_ = 0;
-int Heap::high_survival_rate_period_length_ = 0;
-double Heap::survival_rate_ = 0;
-Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
-Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
+ intptr_t max_virtual = OS::MaxVirtualMemory();
-#ifdef DEBUG
-bool Heap::allocation_allowed_ = true;
+ if (max_virtual > 0) {
+ if (code_range_size_ > 0) {
+ // Reserve no more than 1/8 of the memory for the code range.
+ code_range_size_ = Min(code_range_size_, max_virtual >> 3);
+ }
+ }
-int Heap::allocation_timeout_ = 0;
-bool Heap::disallow_allocation_failure_ = false;
-#endif // DEBUG
+ memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
+ global_contexts_list_ = NULL;
+ mark_compact_collector_.heap_ = this;
+ external_string_table_.heap_ = this;
+}
-intptr_t GCTracer::alive_after_last_gc_ = 0;
-double GCTracer::last_gc_end_timestamp_ = 0.0;
-int GCTracer::max_gc_pause_ = 0;
-intptr_t GCTracer::max_alive_after_gc_ = 0;
-int GCTracer::min_in_mutator_ = kMaxInt;
intptr_t Heap::Capacity() {
if (!HasBeenSetup()) return 0;
@@ -190,7 +196,7 @@ intptr_t Heap::CommittedMemory() {
intptr_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetup()) return 0;
- return MemoryAllocator::SizeExecutable();
+ return isolate()->memory_allocator()->SizeExecutable();
}
@@ -217,8 +223,8 @@ bool Heap::HasBeenSetup() {
int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
- ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
- ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
+ ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
+ ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
MapWord map_word = object->map_word();
map_word.ClearMark();
map_word.ClearOverflow();
@@ -227,8 +233,8 @@ int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
- ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
- ASSERT(MarkCompactCollector::are_map_pointers_encoded());
+ ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
+ ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
uint32_t marker = Memory::uint32_at(object->address());
if (marker == MarkCompactCollector::kSingleFreeEncoding) {
return kIntSize;
@@ -236,7 +242,7 @@ int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
return Memory::int_at(object->address() + kIntSize);
} else {
MapWord map_word = object->map_word();
- Address map_address = map_word.DecodeMapAddress(Heap::map_space());
+ Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
return object->SizeFromMap(map);
}
@@ -246,19 +252,20 @@ int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
// Is global GC requested?
if (space != NEW_SPACE || FLAG_gc_global) {
- Counters::gc_compactor_caused_by_request.Increment();
+ isolate_->counters()->gc_compactor_caused_by_request()->Increment();
return MARK_COMPACTOR;
}
// Is enough data promoted to justify a global GC?
if (OldGenerationPromotionLimitReached()) {
- Counters::gc_compactor_caused_by_promoted_data.Increment();
+ isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
return MARK_COMPACTOR;
}
// Have allocation in OLD and LO failed?
if (old_gen_exhausted_) {
- Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
+ isolate_->counters()->
+ gc_compactor_caused_by_oldspace_exhaustion()->Increment();
return MARK_COMPACTOR;
}
@@ -271,8 +278,9 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
// and does not count available bytes already in the old space or code
// space. Undercounting is safe---we may get an unrequested full GC when
// a scavenge would have succeeded.
- if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
- Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
+ if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
+ isolate_->counters()->
+ gc_compactor_caused_by_oldspace_exhaustion()->Increment();
return MARK_COMPACTOR;
}
@@ -317,8 +325,8 @@ void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
", available: %8" V8_PTR_PREFIX "d\n",
- MemoryAllocator::Size(),
- MemoryAllocator::Available());
+ isolate_->memory_allocator()->Size(),
+ isolate_->memory_allocator()->Available());
PrintF("New space, used: %8" V8_PTR_PREFIX "d"
", available: %8" V8_PTR_PREFIX "d\n",
Heap::new_space_.Size(),
@@ -383,7 +391,7 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() {
- TranscendentalCache::Clear();
+ isolate_->transcendental_cache()->Clear();
ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
@@ -424,21 +432,24 @@ void Heap::GarbageCollectionEpilogue() {
Verify();
}
- if (FLAG_print_global_handles) GlobalHandles::Print();
+ if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
if (FLAG_code_stats) ReportCodeStatistics("After GC");
#endif
- Counters::alive_after_last_gc.Set(static_cast<int>(SizeOfObjects()));
+ isolate_->counters()->alive_after_last_gc()->Set(
+ static_cast<int>(SizeOfObjects()));
- Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
- Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
+ isolate_->counters()->symbol_table_capacity()->Set(
+ symbol_table()->Capacity());
+ isolate_->counters()->number_of_symbols()->Set(
+ symbol_table()->NumberOfElements());
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ReportStatisticsAfterGC();
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug::AfterGarbageCollection();
+ isolate_->debug()->AfterGarbageCollection();
#endif
}
@@ -447,9 +458,9 @@ void Heap::CollectAllGarbage(bool force_compaction) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
- MarkCompactCollector::SetForceCompaction(force_compaction);
+ mark_compact_collector_.SetForceCompaction(force_compaction);
CollectGarbage(OLD_POINTER_SPACE);
- MarkCompactCollector::SetForceCompaction(false);
+ mark_compact_collector_.SetForceCompaction(false);
}
@@ -457,7 +468,7 @@ void Heap::CollectAllAvailableGarbage() {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
- MarkCompactCollector::SetForceCompaction(true);
+ mark_compact_collector()->SetForceCompaction(true);
// Major GC would invoke weak handle callbacks on weakly reachable
// handles, but won't collect weakly reachable objects until next
@@ -473,13 +484,13 @@ void Heap::CollectAllAvailableGarbage() {
break;
}
}
- MarkCompactCollector::SetForceCompaction(false);
+ mark_compact_collector()->SetForceCompaction(false);
}
bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
// The VM is in the GC state until exiting this function.
- VMState state(GC);
+ VMState state(isolate_, GC);
#ifdef DEBUG
// Reset the allocation timeout to the GC interval, but make sure to
@@ -492,7 +503,7 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
bool next_gc_likely_to_collect_more = false;
- { GCTracer tracer;
+ { GCTracer tracer(this);
GarbageCollectionPrologue();
// The GC count was incremented in the prologue. Tell the tracer about
// it.
@@ -502,8 +513,8 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
tracer.set_collector(collector);
HistogramTimer* rate = (collector == SCAVENGER)
- ? &Counters::gc_scavenger
- : &Counters::gc_compactor;
+ ? isolate_->counters()->gc_scavenger()
+ : isolate_->counters()->gc_compactor();
rate->Start();
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, &tracer);
@@ -512,17 +523,12 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
GarbageCollectionEpilogue();
}
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log_gc) HeapProfiler::WriteSample();
-#endif
-
return next_gc_likely_to_collect_more;
}
void Heap::PerformScavenge() {
- GCTracer tracer;
+ GCTracer tracer(this);
PerformGarbageCollection(SCAVENGER, &tracer);
}
@@ -531,7 +537,6 @@ void Heap::PerformScavenge() {
// Helper class for verifying the symbol table.
class SymbolTableVerifier : public ObjectVisitor {
public:
- SymbolTableVerifier() { }
void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
@@ -548,7 +553,7 @@ class SymbolTableVerifier : public ObjectVisitor {
static void VerifySymbolTable() {
#ifdef DEBUG
SymbolTableVerifier verifier;
- Heap::symbol_table()->IterateElements(&verifier);
+ HEAP->symbol_table()->IterateElements(&verifier);
#endif // DEBUG
}
@@ -633,7 +638,7 @@ void Heap::EnsureFromSpaceIsCommitted() {
void Heap::ClearJSFunctionResultCaches() {
- if (Bootstrapper::IsActive()) return;
+ if (isolate_->bootstrapper()->IsActive()) return;
Object* context = global_contexts_list_;
while (!context->IsUndefined()) {
@@ -651,8 +656,9 @@ void Heap::ClearJSFunctionResultCaches() {
}
+
void Heap::ClearNormalizedMapCaches() {
- if (Bootstrapper::IsActive()) return;
+ if (isolate_->bootstrapper()->IsActive()) return;
Object* context = global_contexts_list_;
while (!context->IsUndefined()) {
@@ -709,7 +715,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
bool next_gc_likely_to_collect_more = false;
if (collector != SCAVENGER) {
- PROFILE(CodeMovingGCEvent());
+ PROFILE(isolate_, CodeMovingGCEvent());
}
VerifySymbolTable();
@@ -768,14 +774,15 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
}
- Counters::objs_since_last_young.Set(0);
+ isolate_->counters()->objs_since_last_young()->Set(0);
- if (collector == MARK_COMPACTOR) {
- DisableAssertNoAllocation allow_allocation;
+ gc_post_processing_depth_++;
+ { DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
next_gc_likely_to_collect_more =
- GlobalHandles::PostGarbageCollectionProcessing();
+ isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
}
+ gc_post_processing_depth_--;
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing();
@@ -808,11 +815,11 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = MARK_COMPACT;
- LOG(ResourceEvent("markcompact", "begin"));
+ LOG(isolate_, ResourceEvent("markcompact", "begin"));
- MarkCompactCollector::Prepare(tracer);
+ mark_compact_collector_.Prepare(tracer);
- bool is_compacting = MarkCompactCollector::IsCompacting();
+ bool is_compacting = mark_compact_collector_.IsCompacting();
if (is_compacting) {
mc_count_++;
@@ -823,15 +830,17 @@ void Heap::MarkCompact(GCTracer* tracer) {
MarkCompactPrologue(is_compacting);
- MarkCompactCollector::CollectGarbage();
+ is_safe_to_read_maps_ = false;
+ mark_compact_collector_.CollectGarbage();
+ is_safe_to_read_maps_ = true;
- LOG(ResourceEvent("markcompact", "end"));
+ LOG(isolate_, ResourceEvent("markcompact", "end"));
gc_state_ = NOT_IN_GC;
Shrink();
- Counters::objs_since_last_full.Set(0);
+ isolate_->counters()->objs_since_last_full()->Set(0);
contexts_disposed_ = 0;
}
@@ -840,15 +849,18 @@ void Heap::MarkCompact(GCTracer* tracer) {
void Heap::MarkCompactPrologue(bool is_compacting) {
// At any old GC clear the keyed lookup cache to enable collection of unused
// maps.
- KeyedLookupCache::Clear();
- ContextSlotCache::Clear();
- DescriptorLookupCache::Clear();
+ isolate_->keyed_lookup_cache()->Clear();
+ isolate_->context_slot_cache()->Clear();
+ isolate_->descriptor_lookup_cache()->Clear();
- CompilationCache::MarkCompactPrologue();
+ isolate_->compilation_cache()->MarkCompactPrologue();
CompletelyClearInstanceofCache();
if (is_compacting) FlushNumberStringCache();
+ if (FLAG_cleanup_code_caches_at_gc) {
+ polymorphic_code_cache()->set_cache(undefined_value());
+ }
ClearNormalizedMapCaches();
}
@@ -868,6 +880,7 @@ Object* Heap::FindCodeObject(Address a) {
// Helper class for copying HeapObjects
class ScavengeVisitor: public ObjectVisitor {
public:
+ explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
void VisitPointer(Object** p) { ScavengePointer(p); }
@@ -879,48 +892,15 @@ class ScavengeVisitor: public ObjectVisitor {
private:
void ScavengePointer(Object** p) {
Object* object = *p;
- if (!Heap::InNewSpace(object)) return;
+ if (!heap_->InNewSpace(object)) return;
Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
reinterpret_cast<HeapObject*>(object));
}
-};
-
-
-// A queue of objects promoted during scavenge. Each object is accompanied
-// by it's size to avoid dereferencing a map pointer for scanning.
-class PromotionQueue {
- public:
- void Initialize(Address start_address) {
- front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
- }
- bool is_empty() { return front_ <= rear_; }
-
- void insert(HeapObject* target, int size) {
- *(--rear_) = reinterpret_cast<intptr_t>(target);
- *(--rear_) = size;
- // Assert no overflow into live objects.
- ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
- }
-
- void remove(HeapObject** target, int* size) {
- *target = reinterpret_cast<HeapObject*>(*(--front_));
- *size = static_cast<int>(*(--front_));
- // Assert no underflow.
- ASSERT(front_ >= rear_);
- }
-
- private:
- // The front of the queue is higher in memory than the rear.
- intptr_t* front_;
- intptr_t* rear_;
+ Heap* heap_;
};
-// Shared state read by the scavenge collector and set by ScavengeObject.
-static PromotionQueue promotion_queue;
-
-
#ifdef DEBUG
// Visitor class to verify pointers in code or data space do not point into
// new space.
@@ -929,7 +909,7 @@ class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
void VisitPointers(Object** start, Object**end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
- ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
+ ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
}
}
}
@@ -940,12 +920,12 @@ static void VerifyNonPointerSpacePointers() {
// Verify that there are no pointers to new space in spaces where we
// do not expect them.
VerifyNonPointerSpacePointersVisitor v;
- HeapObjectIterator code_it(Heap::code_space());
+ HeapObjectIterator code_it(HEAP->code_space());
for (HeapObject* object = code_it.next();
object != NULL; object = code_it.next())
object->Iterate(&v);
- HeapObjectIterator data_it(Heap::old_data_space());
+ HeapObjectIterator data_it(HEAP->old_data_space());
for (HeapObject* object = data_it.next();
object != NULL; object = data_it.next())
object->Iterate(&v);
@@ -964,6 +944,12 @@ void Heap::CheckNewSpaceExpansionCriteria() {
}
+static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
+ return heap->InNewSpace(*p) &&
+ !HeapObject::cast(*p)->map_word().IsForwardingAddress();
+}
+
+
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
@@ -971,7 +957,9 @@ void Heap::Scavenge() {
gc_state_ = SCAVENGE;
- Page::FlipMeaningOfInvalidatedWatermarkFlag();
+ SwitchScavengingVisitorsTableIfProfilingWasEnabled();
+
+ Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
#ifdef DEBUG
VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
VerifyPageWatermarkValidity(map_space_, ALL_VALID);
@@ -986,10 +974,10 @@ void Heap::Scavenge() {
map_space_->FlushTopPageWatermark();
// Implements Cheney's copying algorithm
- LOG(ResourceEvent("scavenge", "begin"));
+ LOG(isolate_, ResourceEvent("scavenge", "begin"));
// Clear descriptor cache.
- DescriptorLookupCache::Clear();
+ isolate_->descriptor_lookup_cache()->Clear();
// Used for updating survived_since_last_expansion_ at function end.
intptr_t survived_watermark = PromotedSpaceSize();
@@ -1019,16 +1007,17 @@ void Heap::Scavenge() {
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_.ToSpaceLow();
- promotion_queue.Initialize(new_space_.ToSpaceHigh());
+ promotion_queue_.Initialize(new_space_.ToSpaceHigh());
- ScavengeVisitor scavenge_visitor;
+ is_safe_to_read_maps_ = false;
+ ScavengeVisitor scavenge_visitor(this);
// Copy roots.
IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateDirtyRegions(old_pointer_space_,
- &IteratePointersInDirtyRegion,
+ &Heap::IteratePointersInDirtyRegion,
&ScavengePointer,
WATERMARK_CAN_BE_INVALID);
@@ -1055,15 +1044,23 @@ void Heap::Scavenge() {
scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
+ &IsUnscavengedHeapObject);
+ isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
+ &scavenge_visitor);
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
LiveObjectList::UpdateReferencesForScavengeGC();
- RuntimeProfiler::UpdateSamplesAfterScavenge();
+ isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
ASSERT(new_space_front == new_space_.top());
+ is_safe_to_read_maps_ = true;
+
// Set age mark.
new_space_.set_age_mark(new_space_.top());
@@ -1071,18 +1068,19 @@ void Heap::Scavenge() {
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
- LOG(ResourceEvent("scavenge", "end"));
+ LOG(isolate_, ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
}
-String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
+String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
+ Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
- FinalizeExternalString(String::cast(*p));
+ heap->FinalizeExternalString(String::cast(*p));
return NULL;
}
@@ -1093,48 +1091,49 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
- ExternalStringTable::Verify();
+ external_string_table_.Verify();
- if (ExternalStringTable::new_space_strings_.is_empty()) return;
+ if (external_string_table_.new_space_strings_.is_empty()) return;
- Object** start = &ExternalStringTable::new_space_strings_[0];
- Object** end = start + ExternalStringTable::new_space_strings_.length();
+ Object** start = &external_string_table_.new_space_strings_[0];
+ Object** end = start + external_string_table_.new_space_strings_.length();
Object** last = start;
for (Object** p = start; p < end; ++p) {
- ASSERT(Heap::InFromSpace(*p));
- String* target = updater_func(p);
+ ASSERT(InFromSpace(*p));
+ String* target = updater_func(this, p);
if (target == NULL) continue;
ASSERT(target->IsExternalString());
- if (Heap::InNewSpace(target)) {
+ if (InNewSpace(target)) {
// String is still in new space. Update the table entry.
*last = target;
++last;
} else {
// String got promoted. Move it to the old string list.
- ExternalStringTable::AddOldString(target);
+ external_string_table_.AddOldString(target);
}
}
ASSERT(last <= end);
- ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
+ external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
}
-static Object* ProcessFunctionWeakReferences(Object* function,
+static Object* ProcessFunctionWeakReferences(Heap* heap,
+ Object* function,
WeakObjectRetainer* retainer) {
- Object* head = Heap::undefined_value();
+ Object* head = heap->undefined_value();
JSFunction* tail = NULL;
Object* candidate = function;
- while (!candidate->IsUndefined()) {
+ while (candidate != heap->undefined_value()) {
// Check whether to keep the candidate in the list.
JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
Object* retain = retainer->RetainAs(candidate);
if (retain != NULL) {
- if (head->IsUndefined()) {
+ if (head == heap->undefined_value()) {
// First element in the list.
head = candidate_function;
} else {
@@ -1151,7 +1150,7 @@ static Object* ProcessFunctionWeakReferences(Object* function,
// Terminate the list if there is one or more elements.
if (tail != NULL) {
- tail->set_next_function_link(Heap::undefined_value());
+ tail->set_next_function_link(heap->undefined_value());
}
return head;
@@ -1162,18 +1161,19 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
Object* head = undefined_value();
Context* tail = NULL;
Object* candidate = global_contexts_list_;
- while (!candidate->IsUndefined()) {
+ while (candidate != undefined_value()) {
// Check whether to keep the candidate in the list.
Context* candidate_context = reinterpret_cast<Context*>(candidate);
Object* retain = retainer->RetainAs(candidate);
if (retain != NULL) {
- if (head->IsUndefined()) {
+ if (head == undefined_value()) {
// First element in the list.
head = candidate_context;
} else {
// Subsequent elements in the list.
ASSERT(tail != NULL);
- tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
+ tail->set_unchecked(this,
+ Context::NEXT_CONTEXT_LINK,
candidate_context,
UPDATE_WRITE_BARRIER);
}
@@ -1183,9 +1183,11 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
// Process the weak list of optimized functions for the context.
Object* function_list_head =
ProcessFunctionWeakReferences(
+ this,
candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
retainer);
- candidate_context->set_unchecked(Context::OPTIMIZED_FUNCTIONS_LIST,
+ candidate_context->set_unchecked(this,
+ Context::OPTIMIZED_FUNCTIONS_LIST,
function_list_head,
UPDATE_WRITE_BARRIER);
}
@@ -1195,21 +1197,22 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
// Terminate the list if there is one or more elements.
if (tail != NULL) {
- tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
+ tail->set_unchecked(this,
+ Context::NEXT_CONTEXT_LINK,
Heap::undefined_value(),
UPDATE_WRITE_BARRIER);
}
// Update the head of the list of contexts.
- Heap::global_contexts_list_ = head;
+ global_contexts_list_ = head;
}
class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
public:
- static inline void VisitPointer(Object** p) {
+ static inline void VisitPointer(Heap* heap, Object** p) {
Object* object = *p;
- if (!Heap::InNewSpace(object)) return;
+ if (!heap->InNewSpace(object)) return;
Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
reinterpret_cast<HeapObject*>(object));
}
@@ -1230,10 +1233,10 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
}
// Promote and process all the to-be-promoted objects.
- while (!promotion_queue.is_empty()) {
+ while (!promotion_queue_.is_empty()) {
HeapObject* target;
int size;
- promotion_queue.remove(&target, &size);
+ promotion_queue_.remove(&target, &size);
// Promoted object might be already partially visited
// during dirty regions iteration. Thus we search specificly
@@ -1253,6 +1256,32 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
}
+enum LoggingAndProfiling {
+ LOGGING_AND_PROFILING_ENABLED,
+ LOGGING_AND_PROFILING_DISABLED
+};
+
+
+typedef void (*ScavengingCallback)(Map* map,
+ HeapObject** slot,
+ HeapObject* object);
+
+
+static Atomic32 scavenging_visitors_table_mode_;
+static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+
+
+INLINE(static void DoScavengeObject(Map* map,
+ HeapObject** slot,
+ HeapObject* obj));
+
+
+void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
+ scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
+}
+
+
+template<LoggingAndProfiling logging_and_profiling_mode>
class ScavengingVisitor : public StaticVisitorBase {
public:
static void Initialize() {
@@ -1261,23 +1290,23 @@ class ScavengingVisitor : public StaticVisitorBase {
table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
table_.Register(kVisitByteArray, &EvacuateByteArray);
table_.Register(kVisitFixedArray, &EvacuateFixedArray);
+ table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
+
table_.Register(kVisitGlobalContext,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
- VisitSpecialized<Context::kSize>);
-
- typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
+ template VisitSpecialized<Context::kSize>);
table_.Register(kVisitConsString,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
- VisitSpecialized<ConsString::kSize>);
+ template VisitSpecialized<ConsString::kSize>);
table_.Register(kVisitSharedFunctionInfo,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
- VisitSpecialized<SharedFunctionInfo::kSize>);
+ template VisitSpecialized<SharedFunctionInfo::kSize>);
table_.Register(kVisitJSFunction,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
- VisitSpecialized<JSFunction::kSize>);
+ template VisitSpecialized<JSFunction::kSize>);
table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
kVisitDataObject,
@@ -1292,18 +1321,16 @@ class ScavengingVisitor : public StaticVisitorBase {
kVisitStructGeneric>();
}
-
- static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) {
- table_.GetVisitor(map)(map, slot, obj);
+ static VisitorDispatchTable<ScavengingCallback>* GetTable() {
+ return &table_;
}
-
private:
enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
enum SizeRestriction { SMALL, UNKNOWN_SIZE };
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- static void RecordCopiedObject(HeapObject* obj) {
+ static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
bool should_record = false;
#ifdef DEBUG
should_record = FLAG_heap_stats;
@@ -1312,10 +1339,10 @@ class ScavengingVisitor : public StaticVisitorBase {
should_record = should_record || FLAG_log_gc;
#endif
if (should_record) {
- if (Heap::new_space()->Contains(obj)) {
- Heap::new_space()->RecordAllocation(obj);
+ if (heap->new_space()->Contains(obj)) {
+ heap->new_space()->RecordAllocation(obj);
} else {
- Heap::new_space()->RecordPromotion(obj);
+ heap->new_space()->RecordPromotion(obj);
}
}
}
@@ -1324,27 +1351,34 @@ class ScavengingVisitor : public StaticVisitorBase {
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
// object. Returns the target object.
- INLINE(static HeapObject* MigrateObject(HeapObject* source,
+ INLINE(static HeapObject* MigrateObject(Heap* heap,
+ HeapObject* source,
HeapObject* target,
int size)) {
// Copy the content of source to target.
- Heap::CopyBlock(target->address(), source->address(), size);
+ heap->CopyBlock(target->address(), source->address(), size);
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
+ if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- // Update NewSpace stats if necessary.
- RecordCopiedObject(target);
+ // Update NewSpace stats if necessary.
+ RecordCopiedObject(heap, target);
#endif
- HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
+ HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
#if defined(ENABLE_LOGGING_AND_PROFILING)
- if (Logger::is_logging() || CpuProfiler::is_profiling()) {
- if (target->IsSharedFunctionInfo()) {
- PROFILE(SFIMoveEvent(source->address(), target->address()));
+ Isolate* isolate = heap->isolate();
+ if (isolate->logger()->is_logging() ||
+ CpuProfiler::is_profiling(isolate)) {
+ if (target->IsSharedFunctionInfo()) {
+ PROFILE(isolate, SharedFunctionInfoMoveEvent(
+ source->address(), target->address()));
+ }
}
- }
#endif
+ }
+
return target;
}
@@ -1358,36 +1392,37 @@ class ScavengingVisitor : public StaticVisitorBase {
(object_size <= Page::kMaxHeapObjectSize));
ASSERT(object->Size() == object_size);
- if (Heap::ShouldBePromoted(object->address(), object_size)) {
+ Heap* heap = map->heap();
+ if (heap->ShouldBePromoted(object->address(), object_size)) {
MaybeObject* maybe_result;
if ((size_restriction != SMALL) &&
(object_size > Page::kMaxHeapObjectSize)) {
- maybe_result = Heap::lo_space()->AllocateRawFixedArray(object_size);
+ maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
} else {
if (object_contents == DATA_OBJECT) {
- maybe_result = Heap::old_data_space()->AllocateRaw(object_size);
+ maybe_result = heap->old_data_space()->AllocateRaw(object_size);
} else {
- maybe_result = Heap::old_pointer_space()->AllocateRaw(object_size);
+ maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
}
}
Object* result = NULL; // Initialization to please compiler.
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- *slot = MigrateObject(object, target, object_size);
+ *slot = MigrateObject(heap, object , target, object_size);
if (object_contents == POINTER_OBJECT) {
- promotion_queue.insert(target, object_size);
+ heap->promotion_queue()->insert(target, object_size);
}
- Heap::tracer()->increment_promoted_objects_size(object_size);
+ heap->tracer()->increment_promoted_objects_size(object_size);
return;
}
}
Object* result =
- Heap::new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
- *slot = MigrateObject(object, HeapObject::cast(result), object_size);
+ heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
+ *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
return;
}
@@ -1403,6 +1438,18 @@ class ScavengingVisitor : public StaticVisitorBase {
}
+ static inline void EvacuateFixedDoubleArray(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
+ int object_size = FixedDoubleArray::SizeFor(length);
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
+ slot,
+ object,
+ object_size);
+ }
+
+
static inline void EvacuateByteArray(Map* map,
HeapObject** slot,
HeapObject* object) {
@@ -1438,13 +1485,14 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
ASSERT(IsShortcutCandidate(map->instance_type()));
- if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
+ if (ConsString::cast(object)->unchecked_second() ==
+ map->heap()->empty_string()) {
HeapObject* first =
HeapObject::cast(ConsString::cast(object)->unchecked_first());
*slot = first;
- if (!Heap::InNewSpace(first)) {
+ if (!map->heap()->InNewSpace(first)) {
object->set_map_word(MapWord::FromForwardingAddress(first));
return;
}
@@ -1458,7 +1506,7 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
- Scavenge(first->map(), slot, first);
+ DoScavengeObject(first->map(), slot, first);
object->set_map_word(MapWord::FromForwardingAddress(*slot));
return;
}
@@ -1485,26 +1533,61 @@ class ScavengingVisitor : public StaticVisitorBase {
}
};
- typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object);
-
- static VisitorDispatchTable<Callback> table_;
+ static VisitorDispatchTable<ScavengingCallback> table_;
};
-VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_;
+template<LoggingAndProfiling logging_and_profiling_mode>
+VisitorDispatchTable<ScavengingCallback>
+ ScavengingVisitor<logging_and_profiling_mode>::table_;
+
+
+static void InitializeScavengingVisitorsTables() {
+ ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
+ ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
+ scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
+}
+
+
+void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
+ // Table was already updated by some isolate.
+ return;
+ }
+
+ if (isolate()->logger()->is_logging() |
+ CpuProfiler::is_profiling(isolate()) ||
+ (isolate()->heap_profiler() != NULL &&
+ isolate()->heap_profiler()->is_profiling())) {
+ // If one of the isolates is doing scavenge at this moment of time
+ // it might see this table in an inconsitent state when
+ // some of the callbacks point to
+ // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
+ // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
+ // However this does not lead to any bugs as such isolate does not have
+ // profiling enabled and any isolate with enabled profiling is guaranteed
+ // to see the table in the consistent state.
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
+
+ // We use Release_Store to prevent reordering of this write before writes
+ // to the table.
+ Release_Store(&scavenging_visitors_table_mode_,
+ LOGGING_AND_PROFILING_ENABLED);
+ }
+#endif
+}
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- ASSERT(InFromSpace(object));
+ ASSERT(HEAP->InFromSpace(object));
MapWord first_word = object->map_word();
ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
- ScavengingVisitor::Scavenge(map, p, object);
-}
-
-
-void Heap::ScavengePointer(HeapObject** p) {
- ScavengeObject(p, *p);
+ DoScavengeObject(map, p, object);
}
@@ -1519,9 +1602,8 @@ MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
- reinterpret_cast<Map*>(result)->
- set_visitor_id(
- StaticVisitorBase::GetVisitorId(instance_type, instance_size));
+ reinterpret_cast<Map*>(result)->set_visitor_id(
+ StaticVisitorBase::GetVisitorId(instance_type, instance_size));
reinterpret_cast<Map*>(result)->set_inobject_properties(0);
reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
@@ -1547,11 +1629,13 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
map->set_instance_size(instance_size);
map->set_inobject_properties(0);
map->set_pre_allocated_property_fields(0);
- map->set_instance_descriptors(empty_descriptor_array());
+ map->init_instance_descriptors();
map->set_code_cache(empty_fixed_array());
+ map->set_prototype_transitions(empty_fixed_array());
map->set_unused_property_fields(0);
map->set_bit_field(0);
- map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
+ map->set_bit_field2(1 << Map::kIsExtensible);
+ map->set_elements_kind(JSObject::FAST_ELEMENTS);
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
@@ -1575,6 +1659,11 @@ MaybeObject* Heap::AllocateCodeCache() {
}
+MaybeObject* Heap::AllocatePolymorphicCodeCache() {
+ return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
+}
+
+
const Heap::StringTypeTable Heap::string_type_table[] = {
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
{type, size, k##camel_name##MapRootIndex},
@@ -1630,6 +1719,7 @@ bool Heap::CreateInitialMaps() {
if (!maybe_obj->ToObject(&obj)) return false;
}
set_null_value(obj);
+ Oddball::cast(obj)->set_kind(Oddball::kNull);
// Allocate the empty descriptor array.
{ MaybeObject* maybe_obj = AllocateEmptyFixedArray();
@@ -1638,14 +1728,17 @@ bool Heap::CreateInitialMaps() {
set_empty_descriptor_array(DescriptorArray::cast(obj));
// Fix the instance_descriptors for the existing maps.
- meta_map()->set_instance_descriptors(empty_descriptor_array());
+ meta_map()->init_instance_descriptors();
meta_map()->set_code_cache(empty_fixed_array());
+ meta_map()->set_prototype_transitions(empty_fixed_array());
- fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
+ fixed_array_map()->init_instance_descriptors();
fixed_array_map()->set_code_cache(empty_fixed_array());
+ fixed_array_map()->set_prototype_transitions(empty_fixed_array());
- oddball_map()->set_instance_descriptors(empty_descriptor_array());
+ oddball_map()->init_instance_descriptors();
oddball_map()->set_code_cache(empty_fixed_array());
+ oddball_map()->set_prototype_transitions(empty_fixed_array());
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
@@ -1669,10 +1762,10 @@ bool Heap::CreateInitialMaps() {
}
set_heap_number_map(Map::cast(obj));
- { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
+ { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_proxy_map(Map::cast(obj));
+ set_foreign_map(Map::cast(obj));
for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
const StringTypeTable& entry = string_type_table[i];
@@ -1696,6 +1789,12 @@ bool Heap::CreateInitialMaps() {
Map::cast(obj)->set_is_undetectable();
{ MaybeObject* maybe_obj =
+ AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_fixed_double_array_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj =
AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -1707,10 +1806,10 @@ bool Heap::CreateInitialMaps() {
set_empty_byte_array(ByteArray::cast(obj));
{ MaybeObject* maybe_obj =
- AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
+ AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_pixel_array_map(Map::cast(obj));
+ set_external_pixel_array_map(Map::cast(obj));
{ MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
ExternalArray::kAlignedSize);
@@ -1754,6 +1853,18 @@ bool Heap::CreateInitialMaps() {
}
set_external_float_array_map(Map::cast(obj));
+ { MaybeObject* maybe_obj =
+ AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_non_strict_arguments_elements_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
+ ExternalArray::kAlignedSize);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_external_double_array_map(Map::cast(obj));
+
{ MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -1793,7 +1904,7 @@ bool Heap::CreateInitialMaps() {
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_context_map(Map::cast(obj));
+ set_function_context_map(Map::cast(obj));
{ MaybeObject* maybe_obj =
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
@@ -1805,6 +1916,12 @@ bool Heap::CreateInitialMaps() {
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
+ set_with_context_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj =
+ AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
Map* global_context_map = Map::cast(obj);
global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
set_global_context_map(global_context_map);
@@ -1821,7 +1938,7 @@ bool Heap::CreateInitialMaps() {
}
set_message_object_map(Map::cast(obj));
- ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
+ ASSERT(!InNewSpace(empty_fixed_array()));
return true;
}
@@ -1874,12 +1991,13 @@ MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
MaybeObject* Heap::CreateOddball(const char* to_string,
- Object* to_number) {
+ Object* to_number,
+ byte kind) {
Object* result;
{ MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- return Oddball::cast(result)->Initialize(to_string, to_number);
+ return Oddball::cast(result)->Initialize(to_string, to_number, kind);
}
@@ -1891,7 +2009,7 @@ bool Heap::CreateApiObjects() {
}
set_neander_map(Map::cast(obj));
- { MaybeObject* maybe_obj = Heap::AllocateJSObjectFromMap(neander_map());
+ { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
if (!maybe_obj->ToObject(&obj)) return false;
}
Object* elements;
@@ -1906,20 +2024,6 @@ bool Heap::CreateApiObjects() {
}
-void Heap::CreateCEntryStub() {
- CEntryStub stub(1);
- set_c_entry_code(*stub.GetCode());
-}
-
-
-#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
-void Heap::CreateRegExpCEntryStub() {
- RegExpCEntryStub stub;
- set_re_c_entry_code(*stub.GetCode());
-}
-#endif
-
-
void Heap::CreateJSEntryStub() {
JSEntryStub stub;
set_js_entry_code(*stub.GetCode());
@@ -1932,14 +2036,6 @@ void Heap::CreateJSConstructEntryStub() {
}
-#if V8_TARGET_ARCH_ARM
-void Heap::CreateDirectCEntryStub() {
- DirectCEntryStub stub;
- set_direct_c_entry_code(*stub.GetCode());
-}
-#endif
-
-
void Heap::CreateFixedStubs() {
// Here we create roots for fixed stubs. They are needed at GC
// for cooking and uncooking (check out frames.cc).
@@ -1947,22 +2043,15 @@ void Heap::CreateFixedStubs() {
// stub cache for these stubs.
HandleScope scope;
// gcc-4.4 has problem generating correct code of following snippet:
- // { CEntryStub stub;
- // c_entry_code_ = *stub.GetCode();
+ // { JSEntryStub stub;
+ // js_entry_code_ = *stub.GetCode();
// }
- // { DebuggerStatementStub stub;
- // debugger_statement_code_ = *stub.GetCode();
+ // { JSConstructEntryStub stub;
+ // js_construct_entry_code_ = *stub.GetCode();
// }
// To workaround the problem, make separate functions without inlining.
- Heap::CreateCEntryStub();
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
-#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
- Heap::CreateRegExpCEntryStub();
-#endif
-#if V8_TARGET_ARCH_ARM
- Heap::CreateDirectCEntryStub();
-#endif
}
@@ -1985,6 +2074,7 @@ bool Heap::CreateInitialObjects() {
if (!maybe_obj->ToObject(&obj)) return false;
}
set_undefined_value(obj);
+ Oddball::cast(obj)->set_kind(Oddball::kUndefined);
ASSERT(!InNewSpace(undefined_value()));
// Allocate initial symbol table.
@@ -2004,39 +2094,50 @@ bool Heap::CreateInitialObjects() {
// Allocate the null_value
{ MaybeObject* maybe_obj =
- Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
+ Oddball::cast(null_value())->Initialize("null",
+ Smi::FromInt(0),
+ Oddball::kNull);
if (!maybe_obj->ToObject(&obj)) return false;
}
- { MaybeObject* maybe_obj = CreateOddball("true", Smi::FromInt(1));
+ { MaybeObject* maybe_obj = CreateOddball("true",
+ Smi::FromInt(1),
+ Oddball::kTrue);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_true_value(obj);
- { MaybeObject* maybe_obj = CreateOddball("false", Smi::FromInt(0));
+ { MaybeObject* maybe_obj = CreateOddball("false",
+ Smi::FromInt(0),
+ Oddball::kFalse);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_false_value(obj);
- { MaybeObject* maybe_obj = CreateOddball("hole", Smi::FromInt(-1));
+ { MaybeObject* maybe_obj = CreateOddball("hole",
+ Smi::FromInt(-1),
+ Oddball::kTheHole);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_the_hole_value(obj);
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
- Smi::FromInt(-4));
+ Smi::FromInt(-4),
+ Oddball::kArgumentMarker);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_arguments_marker(obj);
- { MaybeObject* maybe_obj =
- CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
+ { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
+ Smi::FromInt(-2),
+ Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_no_interceptor_result_sentinel(obj);
- { MaybeObject* maybe_obj =
- CreateOddball("termination_exception", Smi::FromInt(-3));
+ { MaybeObject* maybe_obj = CreateOddball("termination_exception",
+ Smi::FromInt(-3),
+ Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_termination_exception(obj);
@@ -2067,12 +2168,12 @@ bool Heap::CreateInitialObjects() {
}
hidden_symbol_ = String::cast(obj);
- // Allocate the proxy for __proto__.
+ // Allocate the foreign for __proto__.
{ MaybeObject* maybe_obj =
- AllocateProxy((Address) &Accessors::ObjectPrototype);
+ AllocateForeign((Address) &Accessors::ObjectPrototype);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_prototype_accessors(Proxy::cast(obj));
+ set_prototype_accessors(Foreign::cast(obj));
// Allocate the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
@@ -2088,6 +2189,11 @@ bool Heap::CreateInitialObjects() {
}
set_non_monomorphic_cache(NumberDictionary::cast(obj));
+ { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
+
set_instanceof_cache_function(Smi::FromInt(0));
set_instanceof_cache_map(Smi::FromInt(0));
set_instanceof_cache_answer(Smi::FromInt(0));
@@ -2098,7 +2204,8 @@ bool Heap::CreateInitialObjects() {
{ MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
if (!maybe_obj->ToObject(&obj)) return false;
}
- { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(obj);
+ { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
+ obj);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_intrinsic_function_names(StringDictionary::cast(obj));
@@ -2118,20 +2225,20 @@ bool Heap::CreateInitialObjects() {
}
set_natives_source_cache(FixedArray::cast(obj));
- // Handling of script id generation is in Factory::NewScript.
+ // Handling of script id generation is in FACTORY->NewScript.
set_last_script_id(undefined_value());
// Initialize keyed lookup cache.
- KeyedLookupCache::Clear();
+ isolate_->keyed_lookup_cache()->Clear();
// Initialize context slot cache.
- ContextSlotCache::Clear();
+ isolate_->context_slot_cache()->Clear();
// Initialize descriptor cache.
- DescriptorLookupCache::Clear();
+ isolate_->descriptor_lookup_cache()->Clear();
// Initialize compilation cache.
- CompilationCache::Clear();
+ isolate_->compilation_cache()->Clear();
return true;
}
@@ -2155,7 +2262,7 @@ void Heap::FlushNumberStringCache() {
// Flush the number to string cache.
int len = number_string_cache()->length();
for (int i = 0; i < len; i++) {
- number_string_cache()->set_undefined(i);
+ number_string_cache()->set_undefined(this, i);
}
}
@@ -2207,7 +2314,7 @@ void Heap::SetNumberStringCache(Object* number, String* string) {
MaybeObject* Heap::NumberToString(Object* number,
bool check_number_string_cache) {
- Counters::number_to_string_runtime.Increment();
+ isolate_->counters()->number_to_string_runtime()->Increment();
if (check_number_string_cache) {
Object* cached = GetNumberStringCache(number);
if (cached != undefined_value()) {
@@ -2257,6 +2364,10 @@ Heap::RootListIndex Heap::RootIndexForExternalArrayType(
return kExternalUnsignedIntArrayMapRootIndex;
case kExternalFloatArray:
return kExternalFloatArrayMapRootIndex;
+ case kExternalDoubleArray:
+ return kExternalDoubleArrayMapRootIndex;
+ case kExternalPixelArray:
+ return kExternalPixelArrayMapRootIndex;
default:
UNREACHABLE();
return kUndefinedValueRootIndex;
@@ -2285,16 +2396,16 @@ MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
}
-MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
- // Statically ensure that it is safe to allocate proxies in paged spaces.
- STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
+MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
+ // Statically ensure that it is safe to allocate foreigns in paged spaces.
+ STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Object* result;
- { MaybeObject* maybe_result = Allocate(proxy_map(), space);
+ { MaybeObject* maybe_result = Allocate(foreign_map(), space);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Proxy::cast(result)->set_proxy(proxy);
+ Foreign::cast(result)->set_address(address);
return result;
}
@@ -2308,10 +2419,11 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
share->set_name(name);
- Code* illegal = Builtins::builtin(Builtins::Illegal);
+ Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
share->set_code(illegal);
share->set_scope_info(SerializedScopeInfo::Empty());
- Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Code* construct_stub = isolate_->builtins()->builtin(
+ Builtins::kJSConstructStubGeneric);
share->set_construct_stub(construct_stub);
share->set_expected_nof_properties(0);
share->set_length(0);
@@ -2331,6 +2443,7 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_num_literals(0);
share->set_end_position(0);
share->set_function_token_position(0);
+ share->set_native(false);
return result;
}
@@ -2369,20 +2482,21 @@ static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
+ Heap* heap,
uint32_t c1,
uint32_t c2) {
String* symbol;
// Numeric strings have a different hash algorithm not known by
// LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
- Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
+ heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
return symbol;
// Now we know the length is 2, we might as well make use of that fact
// when building the new string.
} else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateRawAsciiString(2);
+ { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
char* dest = SeqAsciiString::cast(result)->GetChars();
@@ -2391,7 +2505,7 @@ MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
return result;
} else {
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(2);
+ { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
uc16* dest = SeqTwoByteString::cast(result)->GetChars();
@@ -2421,7 +2535,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
if (length == 2) {
unsigned c1 = first->Get(0);
unsigned c2 = second->Get(0);
- return MakeOrFindTwoCharacterString(c1, c2);
+ return MakeOrFindTwoCharacterString(this, c1, c2);
}
bool first_is_ascii = first->IsAsciiRepresentation();
@@ -2431,7 +2545,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
// Make sure that an out of memory exception is thrown if the length
// of the new cons string is too large.
if (length > String::kMaxLength || length < 0) {
- Top::context()->mark_out_of_memory();
+ isolate()->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
@@ -2443,7 +2557,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
is_ascii_data_in_two_byte_string =
first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
if (is_ascii_data_in_two_byte_string) {
- Counters::string_add_runtime_ext_to_ascii.Increment();
+ isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
}
}
@@ -2484,6 +2598,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
char* dest = SeqAsciiString::cast(result)->GetChars();
String::WriteToFlat(first, dest, 0, first_length);
String::WriteToFlat(second, dest + first_length, 0, second_length);
+ isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
return result;
}
@@ -2519,21 +2634,21 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
MaybeObject* Heap::AllocateSubString(String* buffer,
- int start,
- int end,
- PretenureFlag pretenure) {
+ int start,
+ int end,
+ PretenureFlag pretenure) {
int length = end - start;
-
- if (length == 1) {
- return Heap::LookupSingleCharacterStringFromCode(
- buffer->Get(start));
+ if (length == 0) {
+ return empty_string();
+ } else if (length == 1) {
+ return LookupSingleCharacterStringFromCode(buffer->Get(start));
} else if (length == 2) {
// Optimization for 2-byte strings often used as keys in a decompression
// dictionary. Check whether we already have the string in the symbol
// table to prevent creation of many unneccesary strings.
unsigned c1 = buffer->Get(start);
unsigned c2 = buffer->Get(start + 1);
- return MakeOrFindTwoCharacterString(c1, c2);
+ return MakeOrFindTwoCharacterString(this, c1, c2);
}
// Make an attempt to flatten the buffer to reduce access time.
@@ -2565,7 +2680,7 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
ExternalAsciiString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
- Top::context()->mark_out_of_memory();
+ isolate()->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
@@ -2588,7 +2703,7 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
- Top::context()->mark_out_of_memory();
+ isolate()->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
@@ -2598,7 +2713,7 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte(
bool is_ascii = length <= kAsciiCheckLengthLimit &&
String::IsAscii(resource->data(), static_cast<int>(length));
Map* map = is_ascii ?
- Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
+ external_string_with_ascii_data_map() : external_string_map();
Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -2615,8 +2730,8 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte(
MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
if (code <= String::kMaxAsciiCharCode) {
- Object* value = Heap::single_character_string_cache()->get(code);
- if (value != Heap::undefined_value()) return value;
+ Object* value = single_character_string_cache()->get(code);
+ if (value != undefined_value()) return value;
char buffer[1];
buffer[0] = static_cast<char>(code);
@@ -2624,12 +2739,12 @@ MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
if (!maybe_result->ToObject(&result)) return maybe_result;
- Heap::single_character_string_cache()->set(code, result);
+ single_character_string_cache()->set(code, result);
return result;
}
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(1);
+ { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
String* answer = String::cast(result);
@@ -2691,24 +2806,6 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
}
-MaybeObject* Heap::AllocatePixelArray(int length,
- uint8_t* external_pointer,
- PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
- reinterpret_cast<PixelArray*>(result)->set_length(length);
- reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
-
- return result;
-}
-
-
MaybeObject* Heap::AllocateExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
@@ -2733,7 +2830,8 @@ MaybeObject* Heap::AllocateExternalArray(int length,
MaybeObject* Heap::CreateCode(const CodeDesc& desc,
Code::Flags flags,
- Handle<Object> self_reference) {
+ Handle<Object> self_reference,
+ bool immovable) {
// Allocate ByteArray before the Code object, so that we do not risk
// leaving uninitialized Code object (and breaking the heap).
Object* reloc_info;
@@ -2741,12 +2839,14 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
}
- // Compute size
+ // Compute size.
int body_size = RoundUp(desc.instr_size, kObjectAlignment);
int obj_size = Code::SizeFor(body_size);
ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
MaybeObject* maybe_result;
- if (obj_size > MaxObjectSizeInPagedSpace()) {
+ // Large code objects and code objects which should stay at a fixed address
+ // are allocated in large object space.
+ if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
maybe_result = lo_space_->AllocateRawCode(obj_size);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
@@ -2758,7 +2858,8 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
// Initialize the object
HeapObject::cast(result)->set_map(code_map());
Code* code = Code::cast(result);
- ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
+ ASSERT(!isolate_->code_range()->exists() ||
+ isolate_->code_range()->contains(code->address()));
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(ByteArray::cast(reloc_info));
code->set_flags(flags);
@@ -2766,6 +2867,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_check_type(RECEIVER_MAP_CHECK);
}
code->set_deoptimization_data(empty_fixed_array());
+ code->set_next_code_flushing_candidate(undefined_value());
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -2804,7 +2906,8 @@ MaybeObject* Heap::CopyCode(Code* code) {
CopyBlock(new_addr, old_addr, obj_size);
// Relocate the copy.
Code* new_code = Code::cast(result);
- ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
+ ASSERT(!isolate_->code_range()->exists() ||
+ isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
return new_code;
}
@@ -2853,7 +2956,8 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
// Relocate the copy.
- ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
+ ASSERT(!isolate_->code_range()->exists() ||
+ isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
#ifdef DEBUG
@@ -2876,9 +2980,6 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
HeapObject::cast(result)->set_map(map);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ProducerHeapProfile::RecordJSObjectAllocation(result);
-#endif
return result;
}
@@ -2939,22 +3040,34 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
// To get fast allocation and map sharing for arguments objects we
// allocate them based on an arguments boilerplate.
+ JSObject* boilerplate;
+ int arguments_object_size;
+ bool strict_mode_callee = callee->IsJSFunction() &&
+ JSFunction::cast(callee)->shared()->strict_mode();
+ if (strict_mode_callee) {
+ boilerplate =
+ isolate()->context()->global_context()->
+ strict_mode_arguments_boilerplate();
+ arguments_object_size = kArgumentsObjectSizeStrict;
+ } else {
+ boilerplate =
+ isolate()->context()->global_context()->arguments_boilerplate();
+ arguments_object_size = kArgumentsObjectSize;
+ }
+
// This calls Copy directly rather than using Heap::AllocateRaw so we
// duplicate the check here.
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
- JSObject* boilerplate =
- Top::context()->global_context()->arguments_boilerplate();
-
// Check that the size of the boilerplate matches our
// expectations. The ArgumentsAccessStub::GenerateNewObject relies
// on the size being a known constant.
- ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
+ ASSERT(arguments_object_size == boilerplate->map()->instance_size());
// Do the allocation.
Object* result;
{ MaybeObject* maybe_result =
- AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
+ AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -2963,14 +3076,17 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
// barrier here.
CopyBlock(HeapObject::cast(result)->address(),
boilerplate->address(),
- kArgumentsObjectSize);
+ JSObject::kHeaderSize);
- // Set the two properties.
- JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
- callee);
- JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
+ // Set the length property.
+ JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
Smi::FromInt(length),
SKIP_WRITE_BARRIER);
+ // Set the callee property for non-strict mode arguments object only.
+ if (!strict_mode_callee) {
+ JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
+ callee);
+ }
// Check the state of the object
ASSERT(JSObject::cast(result)->HasFastProperties());
@@ -3002,8 +3118,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
int instance_size = fun->shared()->CalculateInstanceSize();
int in_object_properties = fun->shared()->CalculateInObjectProperties();
Object* map_obj;
- { MaybeObject* maybe_map_obj =
- Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
+ { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
}
@@ -3162,6 +3277,26 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
}
+MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
+ // Allocate map.
+ // TODO(rossberg): Once we optimize proxies, think about a scheme to share
+ // maps. Will probably depend on the identity of the handler object, too.
+ Map* map;
+ MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
+ if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
+ map->set_prototype(prototype);
+ map->set_pre_allocated_property_fields(1);
+ map->set_inobject_properties(1);
+
+ // Allocate the proxy object.
+ Object* result;
+ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ JSProxy::cast(result)->set_handler(handler);
+ return result;
+}
+
+
MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
ASSERT(constructor->has_initial_map());
Map* map = constructor->initial_map();
@@ -3194,12 +3329,12 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
// Fill these accessors into the dictionary.
DescriptorArray* descs = map->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
+ PropertyDetails details(descs->GetDetails(i));
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
PropertyDetails d =
PropertyDetails(details.attributes(), CALLBACKS, details.index());
Object* value = descs->GetCallbacksObject(i);
- { MaybeObject* maybe_value = Heap::AllocateJSGlobalPropertyCell(value);
+ { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
if (!maybe_value->ToObject(&value)) return maybe_value;
}
@@ -3225,7 +3360,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
// Setup the global object as a normalized object.
global->set_map(new_map);
- global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
+ global->map()->clear_instance_descriptors();
global->set_properties(dictionary);
// Make sure result is a global object with properties in dictionary.
@@ -3264,7 +3399,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
- ASSERT(Heap::InNewSpace(clone));
+ ASSERT(InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
CopyBlock(HeapObject::cast(clone)->address(),
@@ -3293,9 +3428,6 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
}
// Return the new clone.
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ProducerHeapProfile::RecordJSObjectAllocation(clone);
-#endif
return clone;
}
@@ -3349,8 +3481,8 @@ MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
const uc32 kMaxSupportedChar = 0xFFFF;
// Count the number of characters in the UTF-8 string and check if
// it is an ASCII string.
- Access<ScannerConstants::Utf8Decoder>
- decoder(ScannerConstants::utf8_decoder());
+ Access<UnicodeCache::Utf8Decoder>
+ decoder(isolate_->unicode_cache()->utf8_decoder());
decoder->Reset(string.start(), string.length());
int chars = 0;
while (decoder->has_more()) {
@@ -3403,12 +3535,24 @@ Map* Heap::SymbolMapForString(String* string) {
// Find the corresponding symbol map for strings.
Map* map = string->map();
- if (map == ascii_string_map()) return ascii_symbol_map();
- if (map == string_map()) return symbol_map();
- if (map == cons_string_map()) return cons_symbol_map();
- if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
- if (map == external_string_map()) return external_symbol_map();
- if (map == external_ascii_string_map()) return external_ascii_symbol_map();
+ if (map == ascii_string_map()) {
+ return ascii_symbol_map();
+ }
+ if (map == string_map()) {
+ return symbol_map();
+ }
+ if (map == cons_string_map()) {
+ return cons_symbol_map();
+ }
+ if (map == cons_ascii_string_map()) {
+ return cons_ascii_symbol_map();
+ }
+ if (map == external_string_map()) {
+ return external_symbol_map();
+ }
+ if (map == external_ascii_string_map()) {
+ return external_ascii_symbol_map();
+ }
if (map == external_string_with_ascii_data_map()) {
return external_symbol_with_ascii_data_map();
}
@@ -3582,7 +3726,7 @@ MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
{ MaybeObject* maybe_obj = AllocateRawFixedArray(len);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- if (Heap::InNewSpace(obj)) {
+ if (InNewSpace(obj)) {
HeapObject* dst = HeapObject::cast(obj);
dst->set_map(map);
CopyBlock(dst->address() + kPointerSize,
@@ -3614,7 +3758,7 @@ MaybeObject* Heap::AllocateFixedArray(int length) {
array->set_map(fixed_array_map());
array->set_length(length);
// Initialize body.
- ASSERT(!Heap::InNewSpace(undefined_value()));
+ ASSERT(!InNewSpace(undefined_value()));
MemsetPointer(array->data_start(), undefined_value(), length);
return result;
}
@@ -3645,20 +3789,21 @@ MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
+ Heap* heap,
int length,
PretenureFlag pretenure,
Object* filler) {
ASSERT(length >= 0);
- ASSERT(Heap::empty_fixed_array()->IsFixedArray());
- if (length == 0) return Heap::empty_fixed_array();
+ ASSERT(heap->empty_fixed_array()->IsFixedArray());
+ if (length == 0) return heap->empty_fixed_array();
- ASSERT(!Heap::InNewSpace(filler));
+ ASSERT(!heap->InNewSpace(filler));
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateRawFixedArray(length, pretenure);
+ { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map(Heap::fixed_array_map());
+ HeapObject::cast(result)->set_map(heap->fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@@ -3667,13 +3812,19 @@ MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
+ return AllocateFixedArrayWithFiller(this,
+ length,
+ pretenure,
+ undefined_value());
}
MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
+ return AllocateFixedArrayWithFiller(this,
+ length,
+ pretenure,
+ the_hole_value());
}
@@ -3691,9 +3842,65 @@ MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
}
+MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
+ int size = FixedDoubleArray::SizeFor(0);
+ Object* result;
+ { MaybeObject* maybe_result =
+ AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ // Initialize the object.
+ reinterpret_cast<FixedDoubleArray*>(result)->set_map(
+ fixed_double_array_map());
+ reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
+ return result;
+}
+
+
+MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
+ int length,
+ PretenureFlag pretenure) {
+ if (length == 0) return empty_fixed_double_array();
+
+ Object* obj;
+ { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+
+ reinterpret_cast<FixedDoubleArray*>(obj)->set_map(fixed_double_array_map());
+ FixedDoubleArray::cast(obj)->set_length(length);
+ return obj;
+}
+
+
+MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
+ PretenureFlag pretenure) {
+ if (length < 0 || length > FixedDoubleArray::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
+
+ AllocationSpace space =
+ (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ int size = FixedDoubleArray::SizeFor(length);
+ if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
+ // Too big for new space.
+ space = LO_SPACE;
+ } else if (space == OLD_DATA_SPACE &&
+ size > MaxObjectSizeInPagedSpace()) {
+ // Too big for old data space.
+ space = LO_SPACE;
+ }
+
+ AllocationSpace retry_space =
+ (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
+
+ return AllocateRaw(size, space, retry_space);
+}
+
+
MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateFixedArray(length, pretenure);
+ { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
@@ -3705,7 +3912,7 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateGlobalContext() {
Object* result;
{ MaybeObject* maybe_result =
- Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
+ AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
@@ -3719,49 +3926,62 @@ MaybeObject* Heap::AllocateGlobalContext() {
MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateFixedArray(length);
+ { MaybeObject* maybe_result = AllocateFixedArray(length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map(context_map());
+ context->set_map(function_context_map());
context->set_closure(function);
- context->set_fcontext(context);
- context->set_previous(NULL);
+ context->set_previous(function->context());
context->set_extension(NULL);
context->set_global(function->context()->global());
- ASSERT(!context->IsGlobalContext());
- ASSERT(context->is_function_context());
- ASSERT(result->IsContext());
- return result;
+ return context;
}
-MaybeObject* Heap::AllocateWithContext(Context* previous,
- JSObject* extension,
- bool is_catch_context) {
+MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
+ Context* previous,
+ String* name,
+ Object* thrown_object) {
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
Object* result;
{ MaybeObject* maybe_result =
- Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
+ AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Context* context = reinterpret_cast<Context*>(result);
+ context->set_map(catch_context_map());
+ context->set_closure(function);
+ context->set_previous(previous);
+ context->set_extension(name);
+ context->set_global(previous->global());
+ context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
+ return context;
+}
+
+
+MaybeObject* Heap::AllocateWithContext(JSFunction* function,
+ Context* previous,
+ JSObject* extension) {
+ Object* result;
+ { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map(is_catch_context ? catch_context_map() : context_map());
- context->set_closure(previous->closure());
- context->set_fcontext(previous->fcontext());
+ context->set_map(with_context_map());
+ context->set_closure(function);
context->set_previous(previous);
context->set_extension(extension);
context->set_global(previous->global());
- ASSERT(!context->IsGlobalContext());
- ASSERT(!context->is_function_context());
- ASSERT(result->IsContext());
- return result;
+ return context;
}
MaybeObject* Heap::AllocateStruct(InstanceType type) {
Map* map;
switch (type) {
-#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
+#define MAKE_CASE(NAME, Name, name) \
+ case NAME##_TYPE: map = name##_map(); break;
STRUCT_LIST(MAKE_CASE)
#undef MAKE_CASE
default:
@@ -3772,7 +3992,7 @@ STRUCT_LIST(MAKE_CASE)
AllocationSpace space =
(size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
Object* result;
- { MaybeObject* maybe_result = Heap::Allocate(map, space);
+ { MaybeObject* maybe_result = Allocate(map, space);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Struct::cast(result)->InitializeBody(size);
@@ -3786,8 +4006,11 @@ bool Heap::IdleNotification() {
static const int kIdlesBeforeMarkCompact = 8;
static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
static const unsigned int kGCsBetweenCleanup = 4;
- static int number_idle_notifications = 0;
- static unsigned int last_gc_count = gc_count_;
+
+ if (!last_idle_notification_gc_count_init_) {
+ last_idle_notification_gc_count_ = gc_count_;
+ last_idle_notification_gc_count_init_ = true;
+ }
bool uncommit = true;
bool finished = false;
@@ -3796,56 +4019,56 @@ bool Heap::IdleNotification() {
// GCs have taken place. This allows another round of cleanup based
// on idle notifications if enough work has been carried out to
// provoke a number of garbage collections.
- if (gc_count_ - last_gc_count < kGCsBetweenCleanup) {
- number_idle_notifications =
- Min(number_idle_notifications + 1, kMaxIdleCount);
+ if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
+ number_idle_notifications_ =
+ Min(number_idle_notifications_ + 1, kMaxIdleCount);
} else {
- number_idle_notifications = 0;
- last_gc_count = gc_count_;
+ number_idle_notifications_ = 0;
+ last_idle_notification_gc_count_ = gc_count_;
}
- if (number_idle_notifications == kIdlesBeforeScavenge) {
+ if (number_idle_notifications_ == kIdlesBeforeScavenge) {
if (contexts_disposed_ > 0) {
- HistogramTimerScope scope(&Counters::gc_context);
+ HistogramTimerScope scope(isolate_->counters()->gc_context());
CollectAllGarbage(false);
} else {
CollectGarbage(NEW_SPACE);
}
new_space_.Shrink();
- last_gc_count = gc_count_;
- } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
+ last_idle_notification_gc_count_ = gc_count_;
+ } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
// Before doing the mark-sweep collections we clear the
// compilation cache to avoid hanging on to source code and
// generated code for cached functions.
- CompilationCache::Clear();
+ isolate_->compilation_cache()->Clear();
CollectAllGarbage(false);
new_space_.Shrink();
- last_gc_count = gc_count_;
+ last_idle_notification_gc_count_ = gc_count_;
- } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
+ } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
CollectAllGarbage(true);
new_space_.Shrink();
- last_gc_count = gc_count_;
+ last_idle_notification_gc_count_ = gc_count_;
+ number_idle_notifications_ = 0;
finished = true;
-
} else if (contexts_disposed_ > 0) {
if (FLAG_expose_gc) {
contexts_disposed_ = 0;
} else {
- HistogramTimerScope scope(&Counters::gc_context);
+ HistogramTimerScope scope(isolate_->counters()->gc_context());
CollectAllGarbage(false);
- last_gc_count = gc_count_;
+ last_idle_notification_gc_count_ = gc_count_;
}
// If this is the first idle notification, we reset the
// notification count to avoid letting idle notifications for
// context disposal garbage collections start a potentially too
// aggressive idle GC cycle.
- if (number_idle_notifications <= 1) {
- number_idle_notifications = 0;
+ if (number_idle_notifications_ <= 1) {
+ number_idle_notifications_ = 0;
uncommit = false;
}
- } else if (number_idle_notifications > kIdlesBeforeMarkCompact) {
+ } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
// If we have received more than kIdlesBeforeMarkCompact idle
// notifications we do not perform any cleanup because we don't
// expect to gain much by doing so.
@@ -3855,7 +4078,7 @@ bool Heap::IdleNotification() {
// Make sure that we have no pending context disposals and
// conditionally uncommit from space.
ASSERT(contexts_disposed_ == 0);
- if (uncommit) Heap::UncommitFromSpace();
+ if (uncommit) UncommitFromSpace();
return finished;
}
@@ -3864,7 +4087,7 @@ bool Heap::IdleNotification() {
void Heap::Print() {
if (!HasBeenSetup()) return;
- Top::PrintStack();
+ isolate()->PrintStack();
AllSpaces spaces;
for (Space* space = spaces.next(); space != NULL; space = spaces.next())
space->Print();
@@ -3897,11 +4120,11 @@ void Heap::ReportHeapStatistics(const char* title) {
PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
- GlobalHandles::PrintStats();
+ isolate_->global_handles()->PrintStats();
PrintF("\n");
PrintF("Heap statistics : ");
- MemoryAllocator::ReportStatistics();
+ isolate_->memory_allocator()->ReportStatistics();
PrintF("To space : ");
new_space_.ReportStatistics();
PrintF("Old pointer space : ");
@@ -3984,7 +4207,7 @@ static void VerifyPointersUnderWatermark(
Address start = page->ObjectAreaStart();
Address end = page->AllocationWatermark();
- Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
+ HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
start,
end,
visit_dirty_region,
@@ -4005,7 +4228,7 @@ static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
// When we are not in GC the Heap::InNewSpace() predicate
// checks that pointers which satisfy predicate point into
// the active semispace.
- Heap::InNewSpace(*slot);
+ HEAP->InNewSpace(*slot);
slot_address += kPointerSize;
}
}
@@ -4074,6 +4297,26 @@ MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
}
+MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
+ int from,
+ int length) {
+ Object* symbol = NULL;
+ Object* new_table;
+ { MaybeObject* maybe_new_table =
+ symbol_table()->LookupSubStringAsciiSymbol(string,
+ from,
+ length,
+ &symbol);
+ if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
+ }
+ // Can't use set_symbol_table because SymbolTable::cast knows that
+ // SymbolTable is a singleton and checks for identity.
+ roots_[kSymbolTableRootIndex] = new_table;
+ ASSERT(symbol != NULL);
+ return symbol;
+}
+
+
MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
Object* symbol = NULL;
Object* new_table;
@@ -4126,7 +4369,8 @@ void Heap::ZapFromSpace() {
#endif // DEBUG
-bool Heap::IteratePointersInDirtyRegion(Address start,
+bool Heap::IteratePointersInDirtyRegion(Heap* heap,
+ Address start,
Address end,
ObjectSlotCallback copy_object_func) {
Address slot_address = start;
@@ -4134,10 +4378,10 @@ bool Heap::IteratePointersInDirtyRegion(Address start,
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
- if (Heap::InNewSpace(*slot)) {
+ if (heap->InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
copy_object_func(reinterpret_cast<HeapObject**>(slot));
- if (Heap::InNewSpace(*slot)) {
+ if (heap->InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
pointers_to_new_space_found = true;
}
@@ -4171,14 +4415,16 @@ static bool IteratePointersInDirtyMaps(Address start,
Address map_address = start;
bool pointers_to_new_space_found = false;
+ Heap* heap = HEAP;
while (map_address < end) {
- ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
+ ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
ASSERT(Memory::Object_at(map_address)->IsMap());
Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
- if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
+ if (Heap::IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
pointer_fields_end,
copy_object_func)) {
pointers_to_new_space_found = true;
@@ -4192,6 +4438,7 @@ static bool IteratePointersInDirtyMaps(Address start,
bool Heap::IteratePointersInDirtyMapsRegion(
+ Heap* heap,
Address start,
Address end,
ObjectSlotCallback copy_object_func) {
@@ -4211,7 +4458,8 @@ bool Heap::IteratePointersInDirtyMapsRegion(
Min(prev_map + Map::kPointerFieldsEndOffset, end);
contains_pointers_to_new_space =
- IteratePointersInDirtyRegion(pointer_fields_start,
+ IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
pointer_fields_end,
copy_object_func)
|| contains_pointers_to_new_space;
@@ -4233,7 +4481,8 @@ bool Heap::IteratePointersInDirtyMapsRegion(
Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
contains_pointers_to_new_space =
- IteratePointersInDirtyRegion(pointer_fields_start,
+ IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
pointer_fields_end,
copy_object_func)
|| contains_pointers_to_new_space;
@@ -4253,10 +4502,10 @@ void Heap::IterateAndMarkPointersToFromSpace(Address start,
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
- if (Heap::InFromSpace(*slot)) {
+ if (InFromSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
callback(reinterpret_cast<HeapObject**>(slot));
- if (Heap::InNewSpace(*slot)) {
+ if (InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
marks |= page->GetRegionMaskForAddress(slot_address);
}
@@ -4295,7 +4544,7 @@ uint32_t Heap::IterateDirtyRegions(
Address region_end = Min(second_region, area_end);
if (marks & mask) {
- if (visit_dirty_region(region_start, region_end, copy_object_func)) {
+ if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
newmarks |= mask;
}
}
@@ -4307,7 +4556,10 @@ uint32_t Heap::IterateDirtyRegions(
while (region_end <= area_end) {
if (marks & mask) {
- if (visit_dirty_region(region_start, region_end, copy_object_func)) {
+ if (visit_dirty_region(this,
+ region_start,
+ region_end,
+ copy_object_func)) {
newmarks |= mask;
}
}
@@ -4323,7 +4575,7 @@ uint32_t Heap::IterateDirtyRegions(
// with region end. Check whether region covering last part of area is
// dirty.
if (marks & mask) {
- if (visit_dirty_region(region_start, area_end, copy_object_func)) {
+ if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
newmarks |= mask;
}
}
@@ -4387,9 +4639,10 @@ void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
v->Synchronize("symbol_table");
- if (mode != VISIT_ALL_IN_SCAVENGE) {
+ if (mode != VISIT_ALL_IN_SCAVENGE &&
+ mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
- ExternalStringTable::Iterate(v);
+ external_string_table_.Iterate(v);
}
v->Synchronize("external_string_table");
}
@@ -4402,42 +4655,50 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
v->Synchronize("symbol");
- Bootstrapper::Iterate(v);
+ isolate_->bootstrapper()->Iterate(v);
v->Synchronize("bootstrapper");
- Top::Iterate(v);
+ isolate_->Iterate(v);
v->Synchronize("top");
Relocatable::Iterate(v);
v->Synchronize("relocatable");
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug::Iterate(v);
+ isolate_->debug()->Iterate(v);
#endif
v->Synchronize("debug");
- CompilationCache::Iterate(v);
+ isolate_->compilation_cache()->Iterate(v);
v->Synchronize("compilationcache");
// Iterate over local handles in handle scopes.
- HandleScopeImplementer::Iterate(v);
+ isolate_->handle_scope_implementer()->Iterate(v);
v->Synchronize("handlescope");
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
// on scavenge collections.
- if (mode != VISIT_ALL_IN_SCAVENGE) {
- Builtins::IterateBuiltins(v);
+ if (mode != VISIT_ALL_IN_SCAVENGE &&
+ mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+ isolate_->builtins()->IterateBuiltins(v);
}
v->Synchronize("builtins");
// Iterate over global handles.
- if (mode == VISIT_ONLY_STRONG) {
- GlobalHandles::IterateStrongRoots(v);
- } else {
- GlobalHandles::IterateAllRoots(v);
+ switch (mode) {
+ case VISIT_ONLY_STRONG:
+ isolate_->global_handles()->IterateStrongRoots(v);
+ break;
+ case VISIT_ALL_IN_SCAVENGE:
+ isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
+ break;
+ case VISIT_ALL_IN_SWEEP_NEWSPACE:
+ case VISIT_ALL:
+ isolate_->global_handles()->IterateAllRoots(v);
+ break;
}
v->Synchronize("globalhandles");
// Iterate over pointers being held by inactive threads.
- ThreadManager::Iterate(v);
+ isolate_->thread_manager()->Iterate(v);
v->Synchronize("threadmanager");
// Iterate over the pointers the Serialization/Deserialization code is
@@ -4456,10 +4717,6 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
}
-// Flag is set when the heap has been configured. The heap can be repeatedly
-// configured through the API until it is setup.
-static bool heap_configured = false;
-
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
@@ -4506,7 +4763,7 @@ bool Heap::ConfigureHeap(int max_semispace_size,
// The old generation is paged.
max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
- heap_configured = true;
+ configured_ = true;
return true;
}
@@ -4534,11 +4791,13 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->cell_space_size = cell_space_->Size();
*stats->cell_space_capacity = cell_space_->Capacity();
*stats->lo_space_size = lo_space_->Size();
- GlobalHandles::RecordStats(stats);
- *stats->memory_allocator_size = MemoryAllocator::Size();
+ isolate_->global_handles()->RecordStats(stats);
+ *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
*stats->memory_allocator_capacity =
- MemoryAllocator::Size() + MemoryAllocator::Available();
+ isolate()->memory_allocator()->Size() +
+ isolate()->memory_allocator()->Available();
*stats->os_error = OS::GetLastError();
+ isolate()->memory_allocator()->Available();
if (take_snapshot) {
HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
for (HeapObject* obj = iterator.next();
@@ -4570,8 +4829,177 @@ int Heap::PromotedExternalMemorySize() {
- amount_of_external_allocated_memory_at_last_global_gc_;
}
+#ifdef DEBUG
+
+// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
+static const int kMarkTag = 2;
+
+
+class HeapDebugUtils {
+ public:
+ explicit HeapDebugUtils(Heap* heap)
+ : search_for_any_global_(false),
+ search_target_(NULL),
+ found_target_(false),
+ object_stack_(20),
+ heap_(heap) {
+ }
+
+ class MarkObjectVisitor : public ObjectVisitor {
+ public:
+ explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Copy all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject())
+ utils_->MarkObjectRecursively(p);
+ }
+ }
+
+ HeapDebugUtils* utils_;
+ };
+
+ void MarkObjectRecursively(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+
+ Object* map = obj->map();
+
+ if (!map->IsHeapObject()) return; // visited before
+
+ if (found_target_) return; // stop if target found
+ object_stack_.Add(obj);
+ if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
+ (!search_for_any_global_ && (obj == search_target_))) {
+ found_target_ = true;
+ return;
+ }
+
+ // not visited yet
+ Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
+
+ Address map_addr = map_p->address();
+
+ obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
+
+ MarkObjectRecursively(&map);
+
+ MarkObjectVisitor mark_visitor(this);
+
+ obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
+ &mark_visitor);
+
+ if (!found_target_) // don't pop if found the target
+ object_stack_.RemoveLast();
+ }
+
+
+ class UnmarkObjectVisitor : public ObjectVisitor {
+ public:
+ explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Copy all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject())
+ utils_->UnmarkObjectRecursively(p);
+ }
+ }
+
+ HeapDebugUtils* utils_;
+ };
+
+
+ void UnmarkObjectRecursively(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+
+ Object* map = obj->map();
+
+ if (map->IsHeapObject()) return; // unmarked already
+
+ Address map_addr = reinterpret_cast<Address>(map);
+
+ map_addr -= kMarkTag;
+
+ ASSERT_TAG_ALIGNED(map_addr);
+
+ HeapObject* map_p = HeapObject::FromAddress(map_addr);
+
+ obj->set_map(reinterpret_cast<Map*>(map_p));
+
+ UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
+
+ UnmarkObjectVisitor unmark_visitor(this);
+
+ obj->IterateBody(Map::cast(map_p)->instance_type(),
+ obj->SizeFromMap(Map::cast(map_p)),
+ &unmark_visitor);
+ }
+
+
+ void MarkRootObjectRecursively(Object** root) {
+ if (search_for_any_global_) {
+ ASSERT(search_target_ == NULL);
+ } else {
+ ASSERT(search_target_->IsHeapObject());
+ }
+ found_target_ = false;
+ object_stack_.Clear();
+
+ MarkObjectRecursively(root);
+ UnmarkObjectRecursively(root);
+
+ if (found_target_) {
+ PrintF("=====================================\n");
+ PrintF("==== Path to object ====\n");
+ PrintF("=====================================\n\n");
+
+ ASSERT(!object_stack_.is_empty());
+ for (int i = 0; i < object_stack_.length(); i++) {
+ if (i > 0) PrintF("\n |\n |\n V\n\n");
+ Object* obj = object_stack_[i];
+ obj->Print();
+ }
+ PrintF("=====================================\n");
+ }
+ }
+
+ // Helper class for visiting HeapObjects recursively.
+ class MarkRootVisitor: public ObjectVisitor {
+ public:
+ explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Visit all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject())
+ utils_->MarkRootObjectRecursively(p);
+ }
+ }
+
+ HeapDebugUtils* utils_;
+ };
+
+ bool search_for_any_global_;
+ Object* search_target_;
+ bool found_target_;
+ List<Object*> object_stack_;
+ Heap* heap_;
+
+ friend class Heap;
+};
+
+#endif
bool Heap::Setup(bool create_heap_objects) {
+#ifdef DEBUG
+ debug_utils_ = new HeapDebugUtils(this);
+#endif
+
// Initialize heap spaces and initial maps and objects. Whenever something
// goes wrong, just return false. The caller should check the results and
// call Heap::TearDown() to release allocated memory.
@@ -4580,13 +5008,19 @@ bool Heap::Setup(bool create_heap_objects) {
// Configuration is based on the flags new-space-size (really the semispace
// size) and old-space-size if set or the initial values of semispace_size_
// and old_generation_size_ otherwise.
- if (!heap_configured) {
+ if (!configured_) {
if (!ConfigureHeapDefault()) return false;
}
- ScavengingVisitor::Initialize();
- NewSpaceScavenger::Initialize();
- MarkCompactCollector::Initialize();
+ gc_initializer_mutex->Lock();
+ static bool initialized_gc = false;
+ if (!initialized_gc) {
+ initialized_gc = true;
+ InitializeScavengingVisitorsTables();
+ NewSpaceScavenger::Initialize();
+ MarkCompactCollector::Initialize();
+ }
+ gc_initializer_mutex->Unlock();
MarkMapPointersAsEncoded(false);
@@ -4594,9 +5028,11 @@ bool Heap::Setup(bool create_heap_objects) {
// space. The chunk is double the size of the requested reserved
// new space size to ensure that we can find a pair of semispaces that
// are contiguous and aligned to their size.
- if (!MemoryAllocator::Setup(MaxReserved(), MaxExecutableSize())) return false;
+ if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
+ return false;
void* chunk =
- MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
+ isolate_->memory_allocator()->ReserveInitialChunk(
+ 4 * reserved_semispace_size_);
if (chunk == NULL) return false;
// Align the pair of semispaces to their size, which must be a power
@@ -4609,13 +5045,19 @@ bool Heap::Setup(bool create_heap_objects) {
// Initialize old pointer space.
old_pointer_space_ =
- new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
+ new OldSpace(this,
+ max_old_generation_size_,
+ OLD_POINTER_SPACE,
+ NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false;
if (!old_pointer_space_->Setup(NULL, 0)) return false;
// Initialize old data space.
old_data_space_ =
- new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
+ new OldSpace(this,
+ max_old_generation_size_,
+ OLD_DATA_SPACE,
+ NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false;
if (!old_data_space_->Setup(NULL, 0)) return false;
@@ -4624,18 +5066,18 @@ bool Heap::Setup(bool create_heap_objects) {
// On 64-bit platform(s), we put all code objects in a 2 GB range of
// virtual address space, so that they can call each other with near calls.
if (code_range_size_ > 0) {
- if (!CodeRange::Setup(code_range_size_)) {
+ if (!isolate_->code_range()->Setup(code_range_size_)) {
return false;
}
}
code_space_ =
- new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
+ new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
if (!code_space_->Setup(NULL, 0)) return false;
// Initialize map space.
- map_space_ = new MapSpace(FLAG_use_big_map_space
+ map_space_ = new MapSpace(this, FLAG_use_big_map_space
? max_old_generation_size_
: MapSpace::kMaxMapPageIndex * Page::kPageSize,
FLAG_max_map_space_pages,
@@ -4644,14 +5086,14 @@ bool Heap::Setup(bool create_heap_objects) {
if (!map_space_->Setup(NULL, 0)) return false;
// Initialize global property cell space.
- cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
+ cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
if (!cell_space_->Setup(NULL, 0)) return false;
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
- lo_space_ = new LargeObjectSpace(LO_SPACE);
+ lo_space_ = new LargeObjectSpace(this, LO_SPACE);
if (lo_space_ == NULL) return false;
if (!lo_space_->Setup()) return false;
@@ -4666,19 +5108,16 @@ bool Heap::Setup(bool create_heap_objects) {
global_contexts_list_ = undefined_value();
}
- LOG(IntPtrTEvent("heap-capacity", Capacity()));
- LOG(IntPtrTEvent("heap-available", Available()));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // This should be called only after initial objects have been created.
- ProducerHeapProfile::Setup();
-#endif
+ LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
+ LOG(isolate_, IntPtrTEvent("heap-available", Available()));
return true;
}
void Heap::SetStackLimits() {
+ ASSERT(isolate_ != NULL);
+ ASSERT(isolate_ == isolate());
// On 64 bit machines, pointers are generally out of range of Smis. We write
// something that looks like an out of range Smi to the GC.
@@ -4686,10 +5125,10 @@ void Heap::SetStackLimits() {
// These are actually addresses, but the tag makes the GC ignore it.
roots_[kStackLimitRootIndex] =
reinterpret_cast<Object*>(
- (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
+ (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
roots_[kRealStackLimitRootIndex] =
reinterpret_cast<Object*>(
- (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
+ (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
}
@@ -4699,16 +5138,16 @@ void Heap::TearDown() {
PrintF("gc_count=%d ", gc_count_);
PrintF("mark_sweep_count=%d ", ms_count_);
PrintF("mark_compact_count=%d ", mc_count_);
- PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
- PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
+ PrintF("max_gc_pause=%d ", get_max_gc_pause());
+ PrintF("min_in_mutator=%d ", get_min_in_mutator());
PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
- GCTracer::get_max_alive_after_gc());
+ get_max_alive_after_gc());
PrintF("\n\n");
}
- GlobalHandles::TearDown();
+ isolate_->global_handles()->TearDown();
- ExternalStringTable::TearDown();
+ external_string_table_.TearDown();
new_space_.TearDown();
@@ -4748,7 +5187,12 @@ void Heap::TearDown() {
lo_space_ = NULL;
}
- MemoryAllocator::TearDown();
+ isolate_->memory_allocator()->TearDown();
+
+#ifdef DEBUG
+ delete debug_utils_;
+ debug_utils_ = NULL;
+#endif
}
@@ -4837,7 +5281,7 @@ class PrintHandleVisitor: public ObjectVisitor {
void Heap::PrintHandles() {
PrintF("Handles:\n");
PrintHandleVisitor v;
- HandleScopeImplementer::Iterate(&v);
+ isolate_->handle_scope_implementer()->Iterate(&v);
}
#endif
@@ -4846,19 +5290,19 @@ void Heap::PrintHandles() {
Space* AllSpaces::next() {
switch (counter_++) {
case NEW_SPACE:
- return Heap::new_space();
+ return HEAP->new_space();
case OLD_POINTER_SPACE:
- return Heap::old_pointer_space();
+ return HEAP->old_pointer_space();
case OLD_DATA_SPACE:
- return Heap::old_data_space();
+ return HEAP->old_data_space();
case CODE_SPACE:
- return Heap::code_space();
+ return HEAP->code_space();
case MAP_SPACE:
- return Heap::map_space();
+ return HEAP->map_space();
case CELL_SPACE:
- return Heap::cell_space();
+ return HEAP->cell_space();
case LO_SPACE:
- return Heap::lo_space();
+ return HEAP->lo_space();
default:
return NULL;
}
@@ -4868,15 +5312,15 @@ Space* AllSpaces::next() {
PagedSpace* PagedSpaces::next() {
switch (counter_++) {
case OLD_POINTER_SPACE:
- return Heap::old_pointer_space();
+ return HEAP->old_pointer_space();
case OLD_DATA_SPACE:
- return Heap::old_data_space();
+ return HEAP->old_data_space();
case CODE_SPACE:
- return Heap::code_space();
+ return HEAP->code_space();
case MAP_SPACE:
- return Heap::map_space();
+ return HEAP->map_space();
case CELL_SPACE:
- return Heap::cell_space();
+ return HEAP->cell_space();
default:
return NULL;
}
@@ -4887,11 +5331,11 @@ PagedSpace* PagedSpaces::next() {
OldSpace* OldSpaces::next() {
switch (counter_++) {
case OLD_POINTER_SPACE:
- return Heap::old_pointer_space();
+ return HEAP->old_pointer_space();
case OLD_DATA_SPACE:
- return Heap::old_data_space();
+ return HEAP->old_data_space();
case CODE_SPACE:
- return Heap::code_space();
+ return HEAP->code_space();
default:
return NULL;
}
@@ -4946,25 +5390,25 @@ ObjectIterator* SpaceIterator::CreateIterator() {
switch (current_space_) {
case NEW_SPACE:
- iterator_ = new SemiSpaceIterator(Heap::new_space(), size_func_);
+ iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
break;
case OLD_POINTER_SPACE:
- iterator_ = new HeapObjectIterator(Heap::old_pointer_space(), size_func_);
+ iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
break;
case OLD_DATA_SPACE:
- iterator_ = new HeapObjectIterator(Heap::old_data_space(), size_func_);
+ iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
break;
case CODE_SPACE:
- iterator_ = new HeapObjectIterator(Heap::code_space(), size_func_);
+ iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
break;
case MAP_SPACE:
- iterator_ = new HeapObjectIterator(Heap::map_space(), size_func_);
+ iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
break;
case CELL_SPACE:
- iterator_ = new HeapObjectIterator(Heap::cell_space(), size_func_);
+ iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
break;
case LO_SPACE:
- iterator_ = new LargeObjectIterator(Heap::lo_space(), size_func_);
+ iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
break;
}
@@ -4998,16 +5442,17 @@ class FreeListNodesFilter : public HeapObjectsFilter {
private:
void MarkFreeListNodes() {
- Heap::old_pointer_space()->MarkFreeListNodes();
- Heap::old_data_space()->MarkFreeListNodes();
- MarkCodeSpaceFreeListNodes();
- Heap::map_space()->MarkFreeListNodes();
- Heap::cell_space()->MarkFreeListNodes();
+ Heap* heap = HEAP;
+ heap->old_pointer_space()->MarkFreeListNodes();
+ heap->old_data_space()->MarkFreeListNodes();
+ MarkCodeSpaceFreeListNodes(heap);
+ heap->map_space()->MarkFreeListNodes();
+ heap->cell_space()->MarkFreeListNodes();
}
- void MarkCodeSpaceFreeListNodes() {
+ void MarkCodeSpaceFreeListNodes(Heap* heap) {
// For code space, using FreeListNode::IsFreeListNode is OK.
- HeapObjectIterator iter(Heap::code_space());
+ HeapObjectIterator iter(heap->code_space());
for (HeapObject* obj = iter.next_object();
obj != NULL;
obj = iter.next_object()) {
@@ -5069,7 +5514,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
obj->SetMark();
}
UnmarkingVisitor visitor;
- Heap::IterateRoots(&visitor, VISIT_ALL);
+ HEAP->IterateRoots(&visitor, VISIT_ALL);
while (visitor.can_process())
visitor.ProcessNext();
}
@@ -5372,7 +5817,7 @@ static intptr_t CountTotalHolesSize() {
}
-GCTracer::GCTracer()
+GCTracer::GCTracer(Heap* heap)
: start_time_(0.0),
start_size_(0),
gc_count_(0),
@@ -5381,14 +5826,16 @@ GCTracer::GCTracer()
marked_count_(0),
allocated_since_last_gc_(0),
spent_in_mutator_(0),
- promoted_objects_size_(0) {
+ promoted_objects_size_(0),
+ heap_(heap) {
// These two fields reflect the state of the previous full collection.
// Set them before they are changed by the collector.
- previous_has_compacted_ = MarkCompactCollector::HasCompacted();
- previous_marked_count_ = MarkCompactCollector::previous_marked_count();
+ previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
+ previous_marked_count_ =
+ heap_->mark_compact_collector_.previous_marked_count();
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
start_time_ = OS::TimeCurrentMillis();
- start_size_ = Heap::SizeOfObjects();
+ start_size_ = heap_->SizeOfObjects();
for (int i = 0; i < Scope::kNumberOfScopes; i++) {
scopes_[i] = 0;
@@ -5396,10 +5843,11 @@ GCTracer::GCTracer()
in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
- allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
+ allocated_since_last_gc_ =
+ heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
- if (last_gc_end_timestamp_ > 0) {
- spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
+ if (heap_->last_gc_end_timestamp_ > 0) {
+ spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
}
}
@@ -5408,20 +5856,21 @@ GCTracer::~GCTracer() {
// Printf ONE line iff flag is set.
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
- bool first_gc = (last_gc_end_timestamp_ == 0);
+ bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
- alive_after_last_gc_ = Heap::SizeOfObjects();
- last_gc_end_timestamp_ = OS::TimeCurrentMillis();
+ heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
+ heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
- int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
+ int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
// Update cumulative GC statistics if required.
if (FLAG_print_cumulative_gc_stat) {
- max_gc_pause_ = Max(max_gc_pause_, time);
- max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
+ heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
+ heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
+ heap_->alive_after_last_gc_);
if (!first_gc) {
- min_in_mutator_ = Min(min_in_mutator_,
- static_cast<int>(spent_in_mutator_));
+ heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
+ static_cast<int>(spent_in_mutator_));
}
}
@@ -5446,7 +5895,8 @@ GCTracer::~GCTracer() {
PrintF("s");
break;
case MARK_COMPACTOR:
- PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
+ PrintF("%s",
+ heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
break;
default:
UNREACHABLE();
@@ -5460,7 +5910,7 @@ GCTracer::~GCTracer() {
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
- PrintF("total_size_after=%" V8_PTR_PREFIX "d ", Heap::SizeOfObjects());
+ PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
in_free_list_or_wasted_before_gc_);
PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
@@ -5472,7 +5922,7 @@ GCTracer::~GCTracer() {
}
#if defined(ENABLE_LOGGING_AND_PROFILING)
- Heap::PrintShortHeapStatistics();
+ heap_->PrintShortHeapStatistics();
#endif
}
@@ -5482,8 +5932,8 @@ const char* GCTracer::CollectorString() {
case SCAVENGER:
return "Scavenge";
case MARK_COMPACTOR:
- return MarkCompactCollector::HasCompacted() ? "Mark-compact"
- : "Mark-sweep";
+ return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
+ : "Mark-sweep";
}
return "Unknown GC";
}
@@ -5503,13 +5953,13 @@ int KeyedLookupCache::Lookup(Map* map, String* name) {
if ((key.map == map) && key.name->Equals(name)) {
return field_offsets_[index];
}
- return -1;
+ return kNotFound;
}
void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
String* symbol;
- if (Heap::LookupSymbolIfExists(name, &symbol)) {
+ if (HEAP->LookupSymbolIfExists(name, &symbol)) {
int index = Hash(map, symbol);
Key& key = keys_[index];
key.map = map;
@@ -5524,35 +5974,24 @@ void KeyedLookupCache::Clear() {
}
-KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
-
-
-int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
-
-
void DescriptorLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
}
-DescriptorLookupCache::Key
-DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
-
-int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
-
-
#ifdef DEBUG
void Heap::GarbageCollectionGreedyCheck() {
ASSERT(FLAG_gc_greedy);
- if (Bootstrapper::IsActive()) return;
+ if (isolate_->bootstrapper()->IsActive()) return;
if (disallow_allocation_failure()) return;
CollectGarbage(NEW_SPACE);
}
#endif
-TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
- : type_(t) {
+TranscendentalCache::SubCache::SubCache(Type t)
+ : type_(t),
+ isolate_(Isolate::Current()) {
uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
uint32_t in1 = 0xffffffffu; // generated by the FPU.
for (int i = 0; i < kCacheSize; i++) {
@@ -5563,9 +6002,6 @@ TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
}
-TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
-
-
void TranscendentalCache::Clear() {
for (int i = 0; i < kNumberOfCaches; i++) {
if (caches_[i] != NULL) {
@@ -5579,8 +6015,8 @@ void TranscendentalCache::Clear() {
void ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
- if (Heap::InNewSpace(new_space_strings_[i])) {
+ if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
+ if (heap_->InNewSpace(new_space_strings_[i])) {
new_space_strings_[last++] = new_space_strings_[i];
} else {
old_space_strings_.Add(new_space_strings_[i]);
@@ -5589,8 +6025,8 @@ void ExternalStringTable::CleanUp() {
new_space_strings_.Rewind(last);
last = 0;
for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
- ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
+ if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
+ ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
old_space_strings_[last++] = old_space_strings_[i];
}
old_space_strings_.Rewind(last);
@@ -5604,7 +6040,4 @@ void ExternalStringTable::TearDown() {
}
-List<Object*> ExternalStringTable::new_space_strings_;
-List<Object*> ExternalStringTable::old_space_strings_;
-
} } // namespace v8::internal
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index b43b59e30..b10af6dc8 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,8 +30,10 @@
#include <math.h>
+#include "allocation.h"
#include "globals.h"
#include "list.h"
+#include "mark-compact.h"
#include "spaces.h"
#include "splay-tree-inl.h"
#include "v8-counters.h"
@@ -39,9 +41,14 @@
namespace v8 {
namespace internal {
+// TODO(isolates): remove HEAP here
+#define HEAP (_inline_get_heap_())
+class Heap;
+inline Heap* _inline_get_heap_();
+
// Defines all the roots in Heap.
-#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
+#define STRONG_ROOT_LIST(V) \
/* Put the byte array map early. We need it to be in place by the time */ \
/* the deserializer hits the next page, since it wants to put a byte */ \
/* array in the unused space at the end of the page. */ \
@@ -49,7 +56,6 @@ namespace internal {
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
/* Cluster the most popular ones in a few cache lines here at the top. */ \
- V(Smi, stack_limit, StackLimit) \
V(Object, undefined_value, UndefinedValue) \
V(Object, the_hole_value, TheHoleValue) \
V(Object, null_value, NullValue) \
@@ -60,29 +66,39 @@ namespace internal {
V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
+ V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Map, meta_map, MetaMap) \
- V(Object, termination_exception, TerminationException) \
V(Map, hash_table_map, HashTableMap) \
+ V(Smi, stack_limit, StackLimit) \
+ V(FixedArray, number_string_cache, NumberStringCache) \
+ V(Object, instanceof_cache_function, InstanceofCacheFunction) \
+ V(Object, instanceof_cache_map, InstanceofCacheMap) \
+ V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
+ V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
+ V(Object, termination_exception, TerminationException) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
+ V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \
+ V(String, empty_string, EmptyString) \
+ V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Map, string_map, StringMap) \
V(Map, ascii_string_map, AsciiStringMap) \
V(Map, symbol_map, SymbolMap) \
+ V(Map, cons_string_map, ConsStringMap) \
+ V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
V(Map, ascii_symbol_map, AsciiSymbolMap) \
V(Map, cons_symbol_map, ConsSymbolMap) \
V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
V(Map, external_symbol_map, ExternalSymbolMap) \
V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \
V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \
- V(Map, cons_string_map, ConsStringMap) \
- V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
V(Map, external_string_map, ExternalStringMap) \
V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \
V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
V(Map, undetectable_string_map, UndetectableStringMap) \
V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
- V(Map, pixel_array_map, PixelArrayMap) \
+ V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
V(Map, external_byte_array_map, ExternalByteArrayMap) \
V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
V(Map, external_short_array_map, ExternalShortArrayMap) \
@@ -90,50 +106,33 @@ namespace internal {
V(Map, external_int_array_map, ExternalIntArrayMap) \
V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
V(Map, external_float_array_map, ExternalFloatArrayMap) \
- V(Map, context_map, ContextMap) \
+ V(Map, external_double_array_map, ExternalDoubleArrayMap) \
+ V(Map, non_strict_arguments_elements_map, NonStrictArgumentsElementsMap) \
+ V(Map, function_context_map, FunctionContextMap) \
V(Map, catch_context_map, CatchContextMap) \
+ V(Map, with_context_map, WithContextMap) \
V(Map, code_map, CodeMap) \
V(Map, oddball_map, OddballMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, message_object_map, JSMessageObjectMap) \
- V(Map, proxy_map, ProxyMap) \
+ V(Map, foreign_map, ForeignMap) \
V(Object, nan_value, NanValue) \
V(Object, minus_zero_value, MinusZeroValue) \
- V(Object, instanceof_cache_function, InstanceofCacheFunction) \
- V(Object, instanceof_cache_map, InstanceofCacheMap) \
- V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
- V(String, empty_string, EmptyString) \
- V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
- V(Proxy, prototype_accessors, PrototypeAccessors) \
+ V(Foreign, prototype_accessors, PrototypeAccessors) \
V(NumberDictionary, code_stubs, CodeStubs) \
V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
+ V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
- V(Code, c_entry_code, CEntryCode) \
- V(FixedArray, number_string_cache, NumberStringCache) \
- V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
V(Object, last_script_id, LastScriptId) \
V(Script, empty_script, EmptyScript) \
V(Smi, real_stack_limit, RealStackLimit) \
V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
-#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
-#define STRONG_ROOT_LIST(V) \
- UNCONDITIONAL_STRONG_ROOT_LIST(V) \
- V(Code, re_c_entry_code, RegExpCEntryCode) \
- V(Code, direct_c_entry_code, DirectCEntryCode)
-#elif V8_TARGET_ARCH_ARM
-#define STRONG_ROOT_LIST(V) \
- UNCONDITIONAL_STRONG_ROOT_LIST(V) \
- V(Code, direct_c_entry_code, DirectCEntryCode)
-#else
-#define STRONG_ROOT_LIST(V) UNCONDITIONAL_STRONG_ROOT_LIST(V)
-#endif
-
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
V(SymbolTable, symbol_table, SymbolTable)
@@ -145,7 +144,6 @@ namespace internal {
V(StringImpl_symbol, "StringImpl") \
V(arguments_symbol, "arguments") \
V(Arguments_symbol, "Arguments") \
- V(arguments_shadow_symbol, ".arguments") \
V(call_symbol, "call") \
V(apply_symbol, "apply") \
V(caller_symbol, "caller") \
@@ -161,6 +159,7 @@ namespace internal {
V(function_symbol, "function") \
V(length_symbol, "length") \
V(name_symbol, "name") \
+ V(native_symbol, "native") \
V(number_symbol, "number") \
V(Number_symbol, "Number") \
V(nan_symbol, "NaN") \
@@ -184,10 +183,14 @@ namespace internal {
V(value_of_symbol, "valueOf") \
V(InitializeVarGlobal_symbol, "InitializeVarGlobal") \
V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
- V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized") \
- V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized") \
- V(KeyedLoadPixelArray_symbol, "KeyedLoadPixelArray") \
- V(KeyedStorePixelArray_symbol, "KeyedStorePixelArray") \
+ V(KeyedLoadElementMonomorphic_symbol, \
+ "KeyedLoadElementMonomorphic") \
+ V(KeyedLoadElementPolymorphic_symbol, \
+ "KeyedLoadElementPolymorphic") \
+ V(KeyedStoreElementMonomorphic_symbol, \
+ "KeyedStoreElementMonomorphic") \
+ V(KeyedStoreElementPolymorphic_symbol, \
+ "KeyedStoreElementPolymorphic") \
V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
V(illegal_access_symbol, "illegal access") \
V(out_of_memory_symbol, "out-of-memory") \
@@ -216,19 +219,21 @@ namespace internal {
V(identity_hash_symbol, "v8::IdentityHash") \
V(closure_symbol, "(closure)") \
V(use_strict, "use strict") \
- V(KeyedLoadExternalArray_symbol, "KeyedLoadExternalArray") \
- V(KeyedStoreExternalArray_symbol, "KeyedStoreExternalArray")
-
+ V(dot_symbol, ".") \
+ V(anonymous_function_symbol, "(anonymous function)")
// Forward declarations.
class GCTracer;
class HeapStats;
+class Isolate;
class WeakObjectRetainer;
-typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
+typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
+ Object** pointer);
-typedef bool (*DirtyRegionCallback)(Address start,
+typedef bool (*DirtyRegionCallback)(Heap* heap,
+ Address start,
Address end,
ObjectSlotCallback copy_object_func);
@@ -236,103 +241,178 @@ typedef bool (*DirtyRegionCallback)(Address start,
// The all static Heap captures the interface to the global object heap.
// All JavaScript contexts by this process share the same object heap.
-class Heap : public AllStatic {
+#ifdef DEBUG
+class HeapDebugUtils;
+#endif
+
+
+// A queue of objects promoted during scavenge. Each object is accompanied
+// by it's size to avoid dereferencing a map pointer for scanning.
+class PromotionQueue {
+ public:
+ PromotionQueue() : front_(NULL), rear_(NULL) { }
+
+ void Initialize(Address start_address) {
+ front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
+ }
+
+ bool is_empty() { return front_ <= rear_; }
+
+ inline void insert(HeapObject* target, int size);
+
+ void remove(HeapObject** target, int* size) {
+ *target = reinterpret_cast<HeapObject*>(*(--front_));
+ *size = static_cast<int>(*(--front_));
+ // Assert no underflow.
+ ASSERT(front_ >= rear_);
+ }
+
+ private:
+ // The front of the queue is higher in memory than the rear.
+ intptr_t* front_;
+ intptr_t* rear_;
+
+ DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
+};
+
+
+// External strings table is a place where all external strings are
+// registered. We need to keep track of such strings to properly
+// finalize them.
+class ExternalStringTable {
+ public:
+ // Registers an external string.
+ inline void AddString(String* string);
+
+ inline void Iterate(ObjectVisitor* v);
+
+ // Restores internal invariant and gets rid of collected strings.
+ // Must be called after each Iterate() that modified the strings.
+ void CleanUp();
+
+ // Destroys all allocated memory.
+ void TearDown();
+
+ private:
+ ExternalStringTable() { }
+
+ friend class Heap;
+
+ inline void Verify();
+
+ inline void AddOldString(String* string);
+
+ // Notifies the table that only a prefix of the new list is valid.
+ inline void ShrinkNewStrings(int position);
+
+ // To speed up scavenge collections new space string are kept
+ // separate from old space strings.
+ List<Object*> new_space_strings_;
+ List<Object*> old_space_strings_;
+
+ Heap* heap_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
+};
+
+
+class Heap {
public:
// Configure heap size before setup. Return false if the heap has been
// setup already.
- static bool ConfigureHeap(int max_semispace_size,
- int max_old_gen_size,
- int max_executable_size);
- static bool ConfigureHeapDefault();
+ bool ConfigureHeap(int max_semispace_size,
+ int max_old_gen_size,
+ int max_executable_size);
+ bool ConfigureHeapDefault();
// Initializes the global object heap. If create_heap_objects is true,
// also creates the basic non-mutable objects.
// Returns whether it succeeded.
- static bool Setup(bool create_heap_objects);
+ bool Setup(bool create_heap_objects);
// Destroys all memory allocated by the heap.
- static void TearDown();
+ void TearDown();
// Set the stack limit in the roots_ array. Some architectures generate
// code that looks here, because it is faster than loading from the static
// jslimit_/real_jslimit_ variable in the StackGuard.
- static void SetStackLimits();
+ void SetStackLimits();
// Returns whether Setup has been called.
- static bool HasBeenSetup();
+ bool HasBeenSetup();
// Returns the maximum amount of memory reserved for the heap. For
// the young generation, we reserve 4 times the amount needed for a
// semi space. The young generation consists of two semi spaces and
// we reserve twice the amount needed for those in order to ensure
// that new space can be aligned to its size.
- static intptr_t MaxReserved() {
+ intptr_t MaxReserved() {
return 4 * reserved_semispace_size_ + max_old_generation_size_;
}
- static int MaxSemiSpaceSize() { return max_semispace_size_; }
- static int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
- static int InitialSemiSpaceSize() { return initial_semispace_size_; }
- static intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
- static intptr_t MaxExecutableSize() { return max_executable_size_; }
+ int MaxSemiSpaceSize() { return max_semispace_size_; }
+ int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
+ int InitialSemiSpaceSize() { return initial_semispace_size_; }
+ intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
+ intptr_t MaxExecutableSize() { return max_executable_size_; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
- static intptr_t Capacity();
+ intptr_t Capacity();
// Returns the amount of memory currently committed for the heap.
- static intptr_t CommittedMemory();
+ intptr_t CommittedMemory();
// Returns the amount of executable memory currently committed for the heap.
- static intptr_t CommittedMemoryExecutable();
+ intptr_t CommittedMemoryExecutable();
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
- static intptr_t Available();
+ intptr_t Available();
// Returns the maximum object size in paged space.
- static inline int MaxObjectSizeInPagedSpace();
+ inline int MaxObjectSizeInPagedSpace();
// Returns of size of all objects residing in the heap.
- static intptr_t SizeOfObjects();
+ intptr_t SizeOfObjects();
// Return the starting address and a mask for the new space. And-masking an
// address with the mask will result in the start address of the new space
// for all addresses in either semispace.
- static Address NewSpaceStart() { return new_space_.start(); }
- static uintptr_t NewSpaceMask() { return new_space_.mask(); }
- static Address NewSpaceTop() { return new_space_.top(); }
-
- static NewSpace* new_space() { return &new_space_; }
- static OldSpace* old_pointer_space() { return old_pointer_space_; }
- static OldSpace* old_data_space() { return old_data_space_; }
- static OldSpace* code_space() { return code_space_; }
- static MapSpace* map_space() { return map_space_; }
- static CellSpace* cell_space() { return cell_space_; }
- static LargeObjectSpace* lo_space() { return lo_space_; }
-
- static bool always_allocate() { return always_allocate_scope_depth_ != 0; }
- static Address always_allocate_scope_depth_address() {
+ Address NewSpaceStart() { return new_space_.start(); }
+ uintptr_t NewSpaceMask() { return new_space_.mask(); }
+ Address NewSpaceTop() { return new_space_.top(); }
+
+ NewSpace* new_space() { return &new_space_; }
+ OldSpace* old_pointer_space() { return old_pointer_space_; }
+ OldSpace* old_data_space() { return old_data_space_; }
+ OldSpace* code_space() { return code_space_; }
+ MapSpace* map_space() { return map_space_; }
+ CellSpace* cell_space() { return cell_space_; }
+ LargeObjectSpace* lo_space() { return lo_space_; }
+
+ bool always_allocate() { return always_allocate_scope_depth_ != 0; }
+ Address always_allocate_scope_depth_address() {
return reinterpret_cast<Address>(&always_allocate_scope_depth_);
}
- static bool linear_allocation() {
+ bool linear_allocation() {
return linear_allocation_scope_depth_ != 0;
}
- static Address* NewSpaceAllocationTopAddress() {
+ Address* NewSpaceAllocationTopAddress() {
return new_space_.allocation_top_address();
}
- static Address* NewSpaceAllocationLimitAddress() {
+ Address* NewSpaceAllocationLimitAddress() {
return new_space_.allocation_limit_address();
}
// Uncommit unused semi space.
- static bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+ bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the heap by marking all spaces read-only/writable.
- static void Protect();
- static void Unprotect();
+ void Protect();
+ void Unprotect();
#endif
// Allocates and initializes a new JavaScript object based on a
@@ -340,71 +420,75 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateJSObject(
+ MUST_USE_RESULT MaybeObject* AllocateJSObject(
JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
// Allocates and initializes a new global object based on a constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateGlobalObject(
- JSFunction* constructor);
+ MUST_USE_RESULT MaybeObject* AllocateGlobalObject(JSFunction* constructor);
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Returns failure if allocation failed.
- MUST_USE_RESULT static MaybeObject* CopyJSObject(JSObject* source);
+ MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source);
// Allocates the function prototype.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateFunctionPrototype(
- JSFunction* function);
+ MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function);
+
+ // Allocates a Harmony Proxy.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ MUST_USE_RESULT MaybeObject* AllocateJSProxy(Object* handler,
+ Object* prototype);
// Reinitialize an JSGlobalProxy based on a constructor. The object
// must have the same size as objects allocated using the
// constructor. The object is reinitialized and behaves as an
// object that has been freshly allocated using the constructor.
- MUST_USE_RESULT static MaybeObject* ReinitializeJSGlobalProxy(
- JSFunction* constructor,
- JSGlobalProxy* global);
+ MUST_USE_RESULT MaybeObject* ReinitializeJSGlobalProxy(
+ JSFunction* constructor, JSGlobalProxy* global);
// Allocates and initializes a new JavaScript object based on a map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateJSObjectFromMap(
+ MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
Map* map, PretenureFlag pretenure = NOT_TENURED);
// Allocates a heap object based on the map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* Allocate(Map* map, AllocationSpace space);
+ MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space);
// Allocates a JS Map in the heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateMap(InstanceType instance_type,
- int instance_size);
+ MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type,
+ int instance_size);
// Allocates a partial map for bootstrapping.
- MUST_USE_RESULT static MaybeObject* AllocatePartialMap(
- InstanceType instance_type,
- int instance_size);
+ MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
+ int instance_size);
// Allocate a map for the specified function
- MUST_USE_RESULT static MaybeObject* AllocateInitialMap(JSFunction* fun);
+ MUST_USE_RESULT MaybeObject* AllocateInitialMap(JSFunction* fun);
// Allocates an empty code cache.
- MUST_USE_RESULT static MaybeObject* AllocateCodeCache();
+ MUST_USE_RESULT MaybeObject* AllocateCodeCache();
+
+ // Allocates an empty PolymorphicCodeCache.
+ MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
// Clear the Instanceof cache (used when a prototype changes).
- static void ClearInstanceofCache() {
- set_instanceof_cache_function(the_hole_value());
- }
+ inline void ClearInstanceofCache();
// Allocates and fully initializes a String. There are two String
// encodings: ASCII and two byte. One should choose between the three string
@@ -424,16 +508,16 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateStringFromAscii(
+ MUST_USE_RESULT MaybeObject* AllocateStringFromAscii(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT static inline MaybeObject* AllocateStringFromUtf8(
+ MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT static MaybeObject* AllocateStringFromUtf8Slow(
+ MUST_USE_RESULT MaybeObject* AllocateStringFromUtf8Slow(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT static MaybeObject* AllocateStringFromTwoByte(
+ MUST_USE_RESULT MaybeObject* AllocateStringFromTwoByte(
Vector<const uc16> str,
PretenureFlag pretenure = NOT_TENURED);
@@ -441,27 +525,25 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static inline MaybeObject* AllocateSymbol(
- Vector<const char> str,
- int chars,
- uint32_t hash_field);
+ MUST_USE_RESULT inline MaybeObject* AllocateSymbol(Vector<const char> str,
+ int chars,
+ uint32_t hash_field);
- MUST_USE_RESULT static inline MaybeObject* AllocateAsciiSymbol(
+ MUST_USE_RESULT inline MaybeObject* AllocateAsciiSymbol(
Vector<const char> str,
uint32_t hash_field);
- MUST_USE_RESULT static inline MaybeObject* AllocateTwoByteSymbol(
+ MUST_USE_RESULT inline MaybeObject* AllocateTwoByteSymbol(
Vector<const uc16> str,
uint32_t hash_field);
- MUST_USE_RESULT static MaybeObject* AllocateInternalSymbol(
+ MUST_USE_RESULT MaybeObject* AllocateInternalSymbol(
unibrow::CharacterStream* buffer, int chars, uint32_t hash_field);
- MUST_USE_RESULT static MaybeObject* AllocateExternalSymbol(
+ MUST_USE_RESULT MaybeObject* AllocateExternalSymbol(
Vector<const char> str,
int chars);
-
// Allocates and partially initializes a String. There are two String
// encodings: ASCII and two byte. These functions allocate a string of the
// given length and set its map and length fields. The characters of the
@@ -469,10 +551,10 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateRawAsciiString(
+ MUST_USE_RESULT MaybeObject* AllocateRawAsciiString(
int length,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT static MaybeObject* AllocateRawTwoByteString(
+ MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
@@ -480,35 +562,27 @@ class Heap : public AllStatic {
// A cache is used for ascii codes.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* LookupSingleCharacterStringFromCode(
+ MUST_USE_RESULT MaybeObject* LookupSingleCharacterStringFromCode(
uint16_t code);
// Allocate a byte array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateByteArray(int length,
- PretenureFlag pretenure);
+ MUST_USE_RESULT MaybeObject* AllocateByteArray(int length,
+ PretenureFlag pretenure);
// Allocate a non-tenured byte array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateByteArray(int length);
-
- // Allocate a pixel array of the specified length
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocatePixelArray(int length,
- uint8_t* external_pointer,
- PretenureFlag pretenure);
+ MUST_USE_RESULT MaybeObject* AllocateByteArray(int length);
// Allocates an external array of the specified length and type.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateExternalArray(
+ MUST_USE_RESULT MaybeObject* AllocateExternalArray(
int length,
ExternalArrayType array_type,
void* external_pointer,
@@ -518,132 +592,146 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateJSGlobalPropertyCell(
- Object* value);
+ MUST_USE_RESULT MaybeObject* AllocateJSGlobalPropertyCell(Object* value);
// Allocates a fixed array initialized with undefined values
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateFixedArray(
- int length,
- PretenureFlag pretenure);
+ MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length,
+ PretenureFlag pretenure);
// Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT static MaybeObject* AllocateFixedArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length);
// Allocates an uninitialized fixed array. It must be filled by the caller.
//
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateUninitializedFixedArray(
- int length);
+ MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedArray(int length);
// Make a copy of src and return it. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT static inline MaybeObject* CopyFixedArray(FixedArray* src);
+ MUST_USE_RESULT inline MaybeObject* CopyFixedArray(FixedArray* src);
// Make a copy of src, set the map, and return the copy. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT static MaybeObject* CopyFixedArrayWithMap(FixedArray* src,
- Map* map);
+ MUST_USE_RESULT MaybeObject* CopyFixedArrayWithMap(FixedArray* src, Map* map);
// Allocates a fixed array initialized with the hole values.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithHoles(
+ MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithHoles(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray(
+ int length,
+ PretenureFlag pretenure);
+
+ // Allocates a fixed double array with uninitialized values. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ // Please note this does not perform a garbage collection.
+ MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedDoubleArray(
int length,
PretenureFlag pretenure = NOT_TENURED);
// AllocateHashTable is identical to AllocateFixedArray except
// that the resulting object has hash_table_map as map.
- MUST_USE_RESULT static MaybeObject* AllocateHashTable(
+ MUST_USE_RESULT MaybeObject* AllocateHashTable(
int length, PretenureFlag pretenure = NOT_TENURED);
// Allocate a global (but otherwise uninitialized) context.
- MUST_USE_RESULT static MaybeObject* AllocateGlobalContext();
+ MUST_USE_RESULT MaybeObject* AllocateGlobalContext();
// Allocate a function context.
- MUST_USE_RESULT static MaybeObject* AllocateFunctionContext(
- int length,
- JSFunction* closure);
-
+ MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
+ JSFunction* function);
+
+ // Allocate a catch context.
+ MUST_USE_RESULT MaybeObject* AllocateCatchContext(JSFunction* function,
+ Context* previous,
+ String* name,
+ Object* thrown_object);
// Allocate a 'with' context.
- MUST_USE_RESULT static MaybeObject* AllocateWithContext(
- Context* previous,
- JSObject* extension,
- bool is_catch_context);
+ MUST_USE_RESULT MaybeObject* AllocateWithContext(JSFunction* function,
+ Context* previous,
+ JSObject* extension);
// Allocates a new utility object in the old generation.
- MUST_USE_RESULT static MaybeObject* AllocateStruct(InstanceType type);
+ MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
// Allocates a function initialized with a shared part.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateFunction(
+ MUST_USE_RESULT MaybeObject* AllocateFunction(
Map* function_map,
SharedFunctionInfo* shared,
Object* prototype,
PretenureFlag pretenure = TENURED);
- // Indicies for direct access into argument objects.
+ // Arguments object size.
static const int kArgumentsObjectSize =
JSObject::kHeaderSize + 2 * kPointerSize;
- static const int arguments_callee_index = 0;
- static const int arguments_length_index = 1;
+ // Strict mode arguments has no callee so it is smaller.
+ static const int kArgumentsObjectSizeStrict =
+ JSObject::kHeaderSize + 1 * kPointerSize;
+ // Indicies for direct access into argument objects.
+ static const int kArgumentsLengthIndex = 0;
+ // callee is only valid in non-strict mode.
+ static const int kArgumentsCalleeIndex = 1;
// Allocates an arguments object - optionally with an elements array.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateArgumentsObject(Object* callee,
- int length);
+ MUST_USE_RESULT MaybeObject* AllocateArgumentsObject(
+ Object* callee, int length);
// Same as NewNumberFromDouble, but may return a preallocated/immutable
// number object (e.g., minus_zero_value_, nan_value_)
- MUST_USE_RESULT static MaybeObject* NumberFromDouble(
+ MUST_USE_RESULT MaybeObject* NumberFromDouble(
double value, PretenureFlag pretenure = NOT_TENURED);
// Allocated a HeapNumber from value.
- MUST_USE_RESULT static MaybeObject* AllocateHeapNumber(
+ MUST_USE_RESULT MaybeObject* AllocateHeapNumber(
double value,
PretenureFlag pretenure);
- // pretenure = NOT_TENURED.
- MUST_USE_RESULT static MaybeObject* AllocateHeapNumber(double value);
+ // pretenure = NOT_TENURED
+ MUST_USE_RESULT MaybeObject* AllocateHeapNumber(double value);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static inline MaybeObject* NumberFromInt32(int32_t value);
+ MUST_USE_RESULT inline MaybeObject* NumberFromInt32(int32_t value);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static inline MaybeObject* NumberFromUint32(uint32_t value);
+ MUST_USE_RESULT inline MaybeObject* NumberFromUint32(uint32_t value);
- // Allocates a new proxy object.
+ // Allocates a new foreign object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateProxy(
- Address proxy,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT MaybeObject* AllocateForeign(
+ Address address, PretenureFlag pretenure = NOT_TENURED);
// Allocates a new SharedFunctionInfo object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateSharedFunctionInfo(Object* name);
+ MUST_USE_RESULT MaybeObject* AllocateSharedFunctionInfo(Object* name);
// Allocates a new JSMessageObject object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note that this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateJSMessageObject(
+ MUST_USE_RESULT MaybeObject* AllocateJSMessageObject(
String* type,
JSArray* arguments,
int start_position,
@@ -656,8 +744,8 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateConsString(String* first,
- String* second);
+ MUST_USE_RESULT MaybeObject* AllocateConsString(String* first,
+ String* second);
// Allocates a new sub string object which is a substring of an underlying
// string buffer stretching from the index start (inclusive) to the index
@@ -665,7 +753,7 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateSubString(
+ MUST_USE_RESULT MaybeObject* AllocateSubString(
String* buffer,
int start,
int end,
@@ -676,28 +764,27 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* AllocateExternalStringFromAscii(
+ MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
ExternalAsciiString::Resource* resource);
- MUST_USE_RESULT static MaybeObject* AllocateExternalStringFromTwoByte(
+ MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource);
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
- static inline void FinalizeExternalString(String* string);
+ inline void FinalizeExternalString(String* string);
// Allocates an uninitialized object. The memory is non-executable if the
// hardware and OS allow.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static inline MaybeObject* AllocateRaw(
- int size_in_bytes,
- AllocationSpace space,
- AllocationSpace retry_space);
+ MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes,
+ AllocationSpace space,
+ AllocationSpace retry_space);
// Initialize a filler object to keep the ability to iterate over the heap
// when shortening objects.
- static void CreateFillerObjectAt(Address addr, int size);
+ void CreateFillerObjectAt(Address addr, int size);
// Makes a new native code object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -705,36 +792,40 @@ class Heap : public AllStatic {
// self_reference. This allows generated code to reference its own Code
// object by containing this pointer.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* CreateCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference);
+ MUST_USE_RESULT MaybeObject* CreateCode(const CodeDesc& desc,
+ Code::Flags flags,
+ Handle<Object> self_reference,
+ bool immovable = false);
- MUST_USE_RESULT static MaybeObject* CopyCode(Code* code);
+ MUST_USE_RESULT MaybeObject* CopyCode(Code* code);
// Copy the code and scope info part of the code object, but insert
// the provided data as the relocation information.
- MUST_USE_RESULT static MaybeObject* CopyCode(Code* code,
- Vector<byte> reloc_info);
+ MUST_USE_RESULT MaybeObject* CopyCode(Code* code, Vector<byte> reloc_info);
// Finds the symbol for string in the symbol table.
// If not found, a new symbol is added to the table and returned.
// Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* LookupSymbol(Vector<const char> str);
- MUST_USE_RESULT static MaybeObject* LookupAsciiSymbol(Vector<const char> str);
- MUST_USE_RESULT static MaybeObject* LookupTwoByteSymbol(
+ MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str);
+ MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str);
+ MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(
Vector<const uc16> str);
- MUST_USE_RESULT static MaybeObject* LookupAsciiSymbol(const char* str) {
+ MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(const char* str) {
return LookupSymbol(CStrVector(str));
}
- MUST_USE_RESULT static MaybeObject* LookupSymbol(String* str);
- static bool LookupSymbolIfExists(String* str, String** symbol);
- static bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
+ MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
+ MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Handle<SeqAsciiString> string,
+ int from,
+ int length);
+
+ bool LookupSymbolIfExists(String* str, String** symbol);
+ bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
// Compute the matching symbol map for a string if possible.
// NULL is returned if string is in new space or not flattened.
- static Map* SymbolMapForString(String* str);
+ Map* SymbolMapForString(String* str);
// Tries to flatten a string before compare operation.
//
@@ -743,60 +834,60 @@ class Heap : public AllStatic {
// string might stay non-flat even when not a failure is returned.
//
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static inline MaybeObject* PrepareForCompare(String* str);
+ MUST_USE_RESULT inline MaybeObject* PrepareForCompare(String* str);
// Converts the given boolean condition to JavaScript boolean value.
- static Object* ToBoolean(bool condition) {
- return condition ? true_value() : false_value();
- }
+ inline Object* ToBoolean(bool condition);
// Code that should be run before and after each GC. Includes some
// reporting/verification activities when compiled with DEBUG set.
- static void GarbageCollectionPrologue();
- static void GarbageCollectionEpilogue();
+ void GarbageCollectionPrologue();
+ void GarbageCollectionEpilogue();
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- static bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
+ bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- inline static bool CollectGarbage(AllocationSpace space);
+ inline bool CollectGarbage(AllocationSpace space);
// Performs a full garbage collection. Force compaction if the
// parameter is true.
- static void CollectAllGarbage(bool force_compaction);
+ void CollectAllGarbage(bool force_compaction);
// Last hope GC, should try to squeeze as much as possible.
- static void CollectAllAvailableGarbage();
+ void CollectAllAvailableGarbage();
// Notify the heap that a context has been disposed.
- static int NotifyContextDisposed() { return ++contexts_disposed_; }
+ int NotifyContextDisposed() { return ++contexts_disposed_; }
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
- static void PerformScavenge();
+ void PerformScavenge();
+
+ PromotionQueue* promotion_queue() { return &promotion_queue_; }
#ifdef DEBUG
// Utility used with flag gc-greedy.
- static void GarbageCollectionGreedyCheck();
+ void GarbageCollectionGreedyCheck();
#endif
- static void AddGCPrologueCallback(
+ void AddGCPrologueCallback(
GCEpilogueCallback callback, GCType gc_type_filter);
- static void RemoveGCPrologueCallback(GCEpilogueCallback callback);
+ void RemoveGCPrologueCallback(GCEpilogueCallback callback);
- static void AddGCEpilogueCallback(
+ void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter);
- static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
+ void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
- static void SetGlobalGCPrologueCallback(GCCallback callback) {
+ void SetGlobalGCPrologueCallback(GCCallback callback) {
ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
global_gc_prologue_callback_ = callback;
}
- static void SetGlobalGCEpilogueCallback(GCCallback callback) {
+ void SetGlobalGCEpilogueCallback(GCCallback callback) {
ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
global_gc_epilogue_callback_ = callback;
}
@@ -804,10 +895,10 @@ class Heap : public AllStatic {
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
#define ROOT_ACCESSOR(type, name, camel_name) \
- static inline type* name() { \
+ type* name() { \
return type::cast(roots_[k##camel_name##RootIndex]); \
} \
- static inline type* raw_unchecked_##name() { \
+ type* raw_unchecked_##name() { \
return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
}
ROOT_LIST(ROOT_ACCESSOR)
@@ -815,13 +906,13 @@ class Heap : public AllStatic {
// Utility type maps
#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
- static inline Map* name##_map() { \
+ Map* name##_map() { \
return Map::cast(roots_[k##Name##MapRootIndex]); \
}
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
-#define SYMBOL_ACCESSOR(name, str) static inline String* name() { \
+#define SYMBOL_ACCESSOR(name, str) String* name() { \
return String::cast(roots_[k##name##RootIndex]); \
}
SYMBOL_LIST(SYMBOL_ACCESSOR)
@@ -829,19 +920,19 @@ class Heap : public AllStatic {
// The hidden_symbol is special because it is the empty string, but does
// not match the empty string.
- static String* hidden_symbol() { return hidden_symbol_; }
+ String* hidden_symbol() { return hidden_symbol_; }
- static void set_global_contexts_list(Object* object) {
+ void set_global_contexts_list(Object* object) {
global_contexts_list_ = object;
}
- static Object* global_contexts_list() { return global_contexts_list_; }
+ Object* global_contexts_list() { return global_contexts_list_; }
// Iterates over all roots in the heap.
- static void IterateRoots(ObjectVisitor* v, VisitMode mode);
+ void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
- static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+ void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all the other roots in the heap.
- static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
+ void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
enum ExpectedPageWatermarkState {
WATERMARK_SHOULD_BE_VALID,
@@ -855,7 +946,7 @@ class Heap : public AllStatic {
// can_preallocate_during_iteration should be set to true.
// All pages will be marked as having invalid watermark upon
// iteration completion.
- static void IterateDirtyRegions(
+ void IterateDirtyRegions(
PagedSpace* space,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback,
@@ -865,22 +956,23 @@ class Heap : public AllStatic {
// Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
// memory interval from start to top. For each dirty region call a
// visit_dirty_region callback. Return updated bitvector of dirty marks.
- static uint32_t IterateDirtyRegions(uint32_t marks,
- Address start,
- Address end,
- DirtyRegionCallback visit_dirty_region,
- ObjectSlotCallback callback);
+ uint32_t IterateDirtyRegions(uint32_t marks,
+ Address start,
+ Address end,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback callback);
// Iterate pointers to from semispace of new space found in memory interval
// from start to end.
// Update dirty marks for page containing start address.
- static void IterateAndMarkPointersToFromSpace(Address start,
- Address end,
- ObjectSlotCallback callback);
+ void IterateAndMarkPointersToFromSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// Return true if pointers to new space was found.
- static bool IteratePointersInDirtyRegion(Address start,
+ static bool IteratePointersInDirtyRegion(Heap* heap,
+ Address start,
Address end,
ObjectSlotCallback callback);
@@ -888,127 +980,129 @@ class Heap : public AllStatic {
// Iterate pointers to new space found in memory interval from start to end.
// This interval is considered to belong to the map space.
// Return true if pointers to new space was found.
- static bool IteratePointersInDirtyMapsRegion(Address start,
+ static bool IteratePointersInDirtyMapsRegion(Heap* heap,
+ Address start,
Address end,
ObjectSlotCallback callback);
// Returns whether the object resides in new space.
- static inline bool InNewSpace(Object* object);
- static inline bool InFromSpace(Object* object);
- static inline bool InToSpace(Object* object);
+ inline bool InNewSpace(Object* object);
+ inline bool InFromSpace(Object* object);
+ inline bool InToSpace(Object* object);
// Checks whether an address/object in the heap (including auxiliary
// area and unused area).
- static bool Contains(Address addr);
- static bool Contains(HeapObject* value);
+ bool Contains(Address addr);
+ bool Contains(HeapObject* value);
// Checks whether an address/object in a space.
// Currently used by tests, serialization and heap verification only.
- static bool InSpace(Address addr, AllocationSpace space);
- static bool InSpace(HeapObject* value, AllocationSpace space);
+ bool InSpace(Address addr, AllocationSpace space);
+ bool InSpace(HeapObject* value, AllocationSpace space);
// Finds out which space an object should get promoted to based on its type.
- static inline OldSpace* TargetSpace(HeapObject* object);
- static inline AllocationSpace TargetSpaceId(InstanceType type);
+ inline OldSpace* TargetSpace(HeapObject* object);
+ inline AllocationSpace TargetSpaceId(InstanceType type);
// Sets the stub_cache_ (only used when expanding the dictionary).
- static void public_set_code_stubs(NumberDictionary* value) {
+ void public_set_code_stubs(NumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
}
// Support for computing object sizes for old objects during GCs. Returns
// a function that is guaranteed to be safe for computing object sizes in
// the current GC phase.
- static HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
+ HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
return gc_safe_size_of_old_object_;
}
// Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
- static void public_set_non_monomorphic_cache(NumberDictionary* value) {
+ void public_set_non_monomorphic_cache(NumberDictionary* value) {
roots_[kNonMonomorphicCacheRootIndex] = value;
}
- static void public_set_empty_script(Script* script) {
+ void public_set_empty_script(Script* script) {
roots_[kEmptyScriptRootIndex] = script;
}
// Update the next script id.
- static inline void SetLastScriptId(Object* last_script_id);
+ inline void SetLastScriptId(Object* last_script_id);
// Generated code can embed this address to get access to the roots.
- static Object** roots_address() { return roots_; }
+ Object** roots_address() { return roots_; }
// Get address of global contexts list for serialization support.
- static Object** global_contexts_list_address() {
+ Object** global_contexts_list_address() {
return &global_contexts_list_;
}
#ifdef DEBUG
- static void Print();
- static void PrintHandles();
+ void Print();
+ void PrintHandles();
// Verify the heap is in its normal state before or after a GC.
- static void Verify();
+ void Verify();
// Report heap statistics.
- static void ReportHeapStatistics(const char* title);
- static void ReportCodeStatistics(const char* title);
+ void ReportHeapStatistics(const char* title);
+ void ReportCodeStatistics(const char* title);
// Fill in bogus values in from space
- static void ZapFromSpace();
+ void ZapFromSpace();
#endif
#if defined(ENABLE_LOGGING_AND_PROFILING)
// Print short heap statistics.
- static void PrintShortHeapStatistics();
+ void PrintShortHeapStatistics();
#endif
// Makes a new symbol object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT static MaybeObject* CreateSymbol(const char* str,
- int length,
- int hash);
- MUST_USE_RESULT static MaybeObject* CreateSymbol(String* str);
+ MUST_USE_RESULT MaybeObject* CreateSymbol(
+ const char* str, int length, int hash);
+ MUST_USE_RESULT MaybeObject* CreateSymbol(String* str);
// Write barrier support for address[offset] = o.
- static inline void RecordWrite(Address address, int offset);
+ inline void RecordWrite(Address address, int offset);
// Write barrier support for address[start : start + len[ = o.
- static inline void RecordWrites(Address address, int start, int len);
+ inline void RecordWrites(Address address, int start, int len);
// Given an address occupied by a live code object, return that object.
- static Object* FindCodeObject(Address a);
+ Object* FindCodeObject(Address a);
// Invoke Shrink on shrinkable spaces.
- static void Shrink();
+ void Shrink();
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
- static inline HeapState gc_state() { return gc_state_; }
+ inline HeapState gc_state() { return gc_state_; }
+
+ inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
#ifdef DEBUG
- static bool IsAllocationAllowed() { return allocation_allowed_; }
- static inline bool allow_allocation(bool enable);
+ bool IsAllocationAllowed() { return allocation_allowed_; }
+ inline bool allow_allocation(bool enable);
- static bool disallow_allocation_failure() {
+ bool disallow_allocation_failure() {
return disallow_allocation_failure_;
}
- static void TracePathToObject(Object* target);
- static void TracePathToGlobal();
+ void TracePathToObject(Object* target);
+ void TracePathToGlobal();
#endif
// Callback function passed to Heap::Iterate etc. Copies an object if
// necessary, the object might be promoted to an old space. The caller must
// ensure the precondition that the object is (a) a heap object and (b) in
// the heap's from space.
- static void ScavengePointer(HeapObject** p);
+ static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
// Commits from space if it is uncommitted.
- static void EnsureFromSpaceIsCommitted();
+ void EnsureFromSpaceIsCommitted();
// Support for partial snapshots. After calling this we can allocate a
// certain number of bytes using only linear allocation (with a
@@ -1016,7 +1110,7 @@ class Heap : public AllStatic {
// or causing a GC. It returns true of space was reserved or false if a GC is
// needed. For paged spaces the space requested must include the space wasted
// at the end of each page when allocating linearly.
- static void ReserveSpace(
+ void ReserveSpace(
int new_space_size,
int pointer_space_size,
int data_space_size,
@@ -1029,45 +1123,44 @@ class Heap : public AllStatic {
// Support for the API.
//
- static bool CreateApiObjects();
+ bool CreateApiObjects();
// Attempt to find the number in a small cache. If we finds it, return
// the string representation of the number. Otherwise return undefined.
- static Object* GetNumberStringCache(Object* number);
+ Object* GetNumberStringCache(Object* number);
// Update the cache with a new number-string pair.
- static void SetNumberStringCache(Object* number, String* str);
+ void SetNumberStringCache(Object* number, String* str);
// Adjusts the amount of registered external memory.
// Returns the adjusted value.
- static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
+ inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
// Allocate uninitialized fixed array.
- MUST_USE_RESULT static MaybeObject* AllocateRawFixedArray(int length);
- MUST_USE_RESULT static MaybeObject* AllocateRawFixedArray(
- int length,
- PretenureFlag pretenure);
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
+ PretenureFlag pretenure);
// True if we have reached the allocation limit in the old generation that
// should force the next GC (caused normally) to be a full one.
- static bool OldGenerationPromotionLimitReached() {
+ bool OldGenerationPromotionLimitReached() {
return (PromotedSpaceSize() + PromotedExternalMemorySize())
> old_gen_promotion_limit_;
}
- static intptr_t OldGenerationSpaceAvailable() {
+ intptr_t OldGenerationSpaceAvailable() {
return old_gen_allocation_limit_ -
(PromotedSpaceSize() + PromotedExternalMemorySize());
}
// True if we have reached the allocation limit in the old generation that
// should artificially cause a GC right now.
- static bool OldGenerationAllocationLimitReached() {
+ bool OldGenerationAllocationLimitReached() {
return OldGenerationSpaceAvailable() < 0;
}
// Can be called when the embedding application is idle.
- static bool IdleNotification();
+ bool IdleNotification();
// Declare all the root indices.
enum RootListIndex {
@@ -1089,76 +1182,114 @@ class Heap : public AllStatic {
kRootListLength
};
- MUST_USE_RESULT static MaybeObject* NumberToString(
- Object* number,
- bool check_number_string_cache = true);
+ MUST_USE_RESULT MaybeObject* NumberToString(
+ Object* number, bool check_number_string_cache = true);
- static Map* MapForExternalArrayType(ExternalArrayType array_type);
- static RootListIndex RootIndexForExternalArrayType(
+ Map* MapForExternalArrayType(ExternalArrayType array_type);
+ RootListIndex RootIndexForExternalArrayType(
ExternalArrayType array_type);
- static void RecordStats(HeapStats* stats, bool take_snapshot = false);
+ void RecordStats(HeapStats* stats, bool take_snapshot = false);
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
- static inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size);
+ inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size);
// Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses.
static inline void MoveBlock(Address dst, Address src, int byte_size);
- static inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size);
+ inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size);
// Check new space expansion criteria and expand semispaces if it was hit.
- static void CheckNewSpaceExpansionCriteria();
+ void CheckNewSpaceExpansionCriteria();
- static inline void IncrementYoungSurvivorsCounter(int survived) {
+ inline void IncrementYoungSurvivorsCounter(int survived) {
young_survivors_after_last_gc_ = survived;
survived_since_last_expansion_ += survived;
}
- static void UpdateNewSpaceReferencesInExternalStringTable(
+ void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
- static void ProcessWeakReferences(WeakObjectRetainer* retainer);
+ void ProcessWeakReferences(WeakObjectRetainer* retainer);
// Helper function that governs the promotion policy from new space to
// old. If the object's old address lies below the new space's age
// mark or if we've already filled the bottom 1/16th of the to space,
// we try to promote this object.
- static inline bool ShouldBePromoted(Address old_address, int object_size);
+ inline bool ShouldBePromoted(Address old_address, int object_size);
+
+ int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
+
+ void ClearJSFunctionResultCaches();
+
+ void ClearNormalizedMapCaches();
+
+ GCTracer* tracer() { return tracer_; }
+
+ double total_regexp_code_generated() { return total_regexp_code_generated_; }
+ void IncreaseTotalRegexpCodeGenerated(int size) {
+ total_regexp_code_generated_ += size;
+ }
+
+ // Returns maximum GC pause.
+ int get_max_gc_pause() { return max_gc_pause_; }
+
+ // Returns maximum size of objects alive after GC.
+ intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
+
+ // Returns minimal interval between two subsequent collections.
+ int get_min_in_mutator() { return min_in_mutator_; }
+
+ MarkCompactCollector* mark_compact_collector() {
+ return &mark_compact_collector_;
+ }
- static int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
+ ExternalStringTable* external_string_table() {
+ return &external_string_table_;
+ }
- static void ClearJSFunctionResultCaches();
+ inline Isolate* isolate();
+ bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
- static void ClearNormalizedMapCaches();
+ void CallGlobalGCPrologueCallback() {
+ if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
+ }
- static GCTracer* tracer() { return tracer_; }
+ void CallGlobalGCEpilogueCallback() {
+ if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
+ }
private:
- static int reserved_semispace_size_;
- static int max_semispace_size_;
- static int initial_semispace_size_;
- static intptr_t max_old_generation_size_;
- static intptr_t max_executable_size_;
- static intptr_t code_range_size_;
+ Heap();
+
+ // This can be calculated directly from a pointer to the heap; however, it is
+ // more expedient to get at the isolate directly from within Heap methods.
+ Isolate* isolate_;
+
+ int reserved_semispace_size_;
+ int max_semispace_size_;
+ int initial_semispace_size_;
+ intptr_t max_old_generation_size_;
+ intptr_t max_executable_size_;
+ intptr_t code_range_size_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
- static int survived_since_last_expansion_;
+ int survived_since_last_expansion_;
- static int always_allocate_scope_depth_;
- static int linear_allocation_scope_depth_;
+ int always_allocate_scope_depth_;
+ int linear_allocation_scope_depth_;
// For keeping track of context disposals.
- static int contexts_disposed_;
+ int contexts_disposed_;
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 1024*KB;
@@ -1166,76 +1297,79 @@ class Heap : public AllStatic {
static const int kMaxObjectSizeInNewSpace = 512*KB;
#endif
- static NewSpace new_space_;
- static OldSpace* old_pointer_space_;
- static OldSpace* old_data_space_;
- static OldSpace* code_space_;
- static MapSpace* map_space_;
- static CellSpace* cell_space_;
- static LargeObjectSpace* lo_space_;
- static HeapState gc_state_;
+ NewSpace new_space_;
+ OldSpace* old_pointer_space_;
+ OldSpace* old_data_space_;
+ OldSpace* code_space_;
+ MapSpace* map_space_;
+ CellSpace* cell_space_;
+ LargeObjectSpace* lo_space_;
+ HeapState gc_state_;
+ int gc_post_processing_depth_;
// Returns the size of object residing in non new spaces.
- static intptr_t PromotedSpaceSize();
+ intptr_t PromotedSpaceSize();
// Returns the amount of external memory registered since last global gc.
- static int PromotedExternalMemorySize();
+ int PromotedExternalMemorySize();
- static int mc_count_; // how many mark-compact collections happened
- static int ms_count_; // how many mark-sweep collections happened
- static unsigned int gc_count_; // how many gc happened
+ int mc_count_; // how many mark-compact collections happened
+ int ms_count_; // how many mark-sweep collections happened
+ unsigned int gc_count_; // how many gc happened
// Total length of the strings we failed to flatten since the last GC.
- static int unflattened_strings_length_;
+ int unflattened_strings_length_;
#define ROOT_ACCESSOR(type, name, camel_name) \
- static inline void set_##name(type* value) { \
+ inline void set_##name(type* value) { \
roots_[k##camel_name##RootIndex] = value; \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
#ifdef DEBUG
- static bool allocation_allowed_;
+ bool allocation_allowed_;
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
- static int allocation_timeout_;
+ int allocation_timeout_;
// Do we expect to be able to handle allocation failure at this
// time?
- static bool disallow_allocation_failure_;
+ bool disallow_allocation_failure_;
+
+ HeapDebugUtils* debug_utils_;
#endif // DEBUG
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
// which collector to invoke.
- static intptr_t old_gen_promotion_limit_;
+ intptr_t old_gen_promotion_limit_;
// Limit that triggers a global GC as soon as is reasonable. This is
// checked before expanding a paged space in the old generation and on
// every allocation in large object space.
- static intptr_t old_gen_allocation_limit_;
+ intptr_t old_gen_allocation_limit_;
// Limit on the amount of externally allocated memory allowed
// between global GCs. If reached a global GC is forced.
- static intptr_t external_allocation_limit_;
+ intptr_t external_allocation_limit_;
// The amount of external memory registered through the API kept alive
// by global handles
- static int amount_of_external_allocated_memory_;
+ int amount_of_external_allocated_memory_;
// Caches the amount of external memory registered at the last global gc.
- static int amount_of_external_allocated_memory_at_last_global_gc_;
+ int amount_of_external_allocated_memory_at_last_global_gc_;
// Indicates that an allocation has failed in the old generation since the
// last GC.
- static int old_gen_exhausted_;
+ int old_gen_exhausted_;
- static Object* roots_[kRootListLength];
+ Object* roots_[kRootListLength];
- static Object* global_contexts_list_;
+ Object* global_contexts_list_;
struct StringTypeTable {
InstanceType type;
@@ -1260,7 +1394,7 @@ class Heap : public AllStatic {
// The special hidden symbol which is an empty string, but does not match
// any string when looked up in properties.
- static String* hidden_symbol_;
+ String* hidden_symbol_;
// GC callback function, called before and after mark-compact GC.
// Allocations in the callback function are disallowed.
@@ -1274,7 +1408,7 @@ class Heap : public AllStatic {
GCPrologueCallback callback;
GCType gc_type;
};
- static List<GCPrologueCallbackPair> gc_prologue_callbacks_;
+ List<GCPrologueCallbackPair> gc_prologue_callbacks_;
struct GCEpilogueCallbackPair {
GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
@@ -1286,91 +1420,96 @@ class Heap : public AllStatic {
GCEpilogueCallback callback;
GCType gc_type;
};
- static List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
+ List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
- static GCCallback global_gc_prologue_callback_;
- static GCCallback global_gc_epilogue_callback_;
+ GCCallback global_gc_prologue_callback_;
+ GCCallback global_gc_epilogue_callback_;
// Support for computing object sizes during GC.
- static HeapObjectCallback gc_safe_size_of_old_object_;
+ HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object);
static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
// Update the GC state. Called from the mark-compact collector.
- static void MarkMapPointersAsEncoded(bool encoded) {
+ void MarkMapPointersAsEncoded(bool encoded) {
gc_safe_size_of_old_object_ = encoded
? &GcSafeSizeOfOldObjectWithEncodedMap
: &GcSafeSizeOfOldObject;
}
// Checks whether a global GC is necessary
- static GarbageCollector SelectGarbageCollector(AllocationSpace space);
+ GarbageCollector SelectGarbageCollector(AllocationSpace space);
// Performs garbage collection
// Returns whether there is a chance another major GC could
// collect more garbage.
- static bool PerformGarbageCollection(GarbageCollector collector,
- GCTracer* tracer);
+ bool PerformGarbageCollection(GarbageCollector collector,
+ GCTracer* tracer);
+
+ static const intptr_t kMinimumPromotionLimit = 2 * MB;
+ static const intptr_t kMinimumAllocationLimit = 8 * MB;
+
+ inline void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
// (since both AllocateRaw and AllocateRawMap are inlined).
- MUST_USE_RESULT static inline MaybeObject* AllocateRawMap();
+ MUST_USE_RESULT inline MaybeObject* AllocateRawMap();
// Allocate an uninitialized object in the global property cell space.
- MUST_USE_RESULT static inline MaybeObject* AllocateRawCell();
+ MUST_USE_RESULT inline MaybeObject* AllocateRawCell();
// Initializes a JSObject based on its map.
- static void InitializeJSObjectFromMap(JSObject* obj,
- FixedArray* properties,
- Map* map);
+ void InitializeJSObjectFromMap(JSObject* obj,
+ FixedArray* properties,
+ Map* map);
- static bool CreateInitialMaps();
- static bool CreateInitialObjects();
+ bool CreateInitialMaps();
+ bool CreateInitialObjects();
// These five Create*EntryStub functions are here and forced to not be inlined
// because of a gcc-4.4 bug that assigns wrong vtable entries.
- NO_INLINE(static void CreateCEntryStub());
- NO_INLINE(static void CreateJSEntryStub());
- NO_INLINE(static void CreateJSConstructEntryStub());
- NO_INLINE(static void CreateRegExpCEntryStub());
- NO_INLINE(static void CreateDirectCEntryStub());
+ NO_INLINE(void CreateJSEntryStub());
+ NO_INLINE(void CreateJSConstructEntryStub());
- static void CreateFixedStubs();
+ void CreateFixedStubs();
- MUST_USE_RESULT static MaybeObject* CreateOddball(const char* to_string,
- Object* to_number);
+ MaybeObject* CreateOddball(const char* to_string,
+ Object* to_number,
+ byte kind);
// Allocate empty fixed array.
- MUST_USE_RESULT static MaybeObject* AllocateEmptyFixedArray();
+ MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
+
+ // Allocate empty fixed double array.
+ MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
+
+ void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
// Performs a minor collection in new generation.
- static void Scavenge();
+ void Scavenge();
static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
+ Heap* heap,
Object** pointer);
- static Address DoScavenge(ObjectVisitor* scavenge_visitor,
- Address new_space_front);
+ Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
// Performs a major collection in the whole heap.
- static void MarkCompact(GCTracer* tracer);
+ void MarkCompact(GCTracer* tracer);
// Code to be run before and after mark-compact.
- static void MarkCompactPrologue(bool is_compacting);
+ void MarkCompactPrologue(bool is_compacting);
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
- static void CompletelyClearInstanceofCache() {
- set_instanceof_cache_map(the_hole_value());
- set_instanceof_cache_function(the_hole_value());
- }
+ inline void CompletelyClearInstanceofCache();
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Record statistics before and after garbage collection.
- static void ReportStatisticsBeforeGC();
- static void ReportStatisticsAfterGC();
+ void ReportStatisticsBeforeGC();
+ void ReportStatisticsAfterGC();
#endif
// Slow part of scavenge object.
@@ -1382,39 +1521,42 @@ class Heap : public AllStatic {
// other parts of the VM could use it. Specifically, a function that creates
// instances of type JS_FUNCTION_TYPE benefit from the use of this function.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT static inline MaybeObject* InitializeFunction(
+ MUST_USE_RESULT inline MaybeObject* InitializeFunction(
JSFunction* function,
SharedFunctionInfo* shared,
Object* prototype);
- static GCTracer* tracer_;
+ // Total RegExp code ever generated
+ double total_regexp_code_generated_;
+
+ GCTracer* tracer_;
// Initializes the number to string cache based on the max semispace size.
- MUST_USE_RESULT static MaybeObject* InitializeNumberStringCache();
+ MUST_USE_RESULT MaybeObject* InitializeNumberStringCache();
// Flush the number to string cache.
- static void FlushNumberStringCache();
+ void FlushNumberStringCache();
- static void UpdateSurvivalRateTrend(int start_new_space_size);
+ void UpdateSurvivalRateTrend(int start_new_space_size);
enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
static const int kYoungSurvivalRateThreshold = 90;
static const int kYoungSurvivalRateAllowedDeviation = 15;
- static int young_survivors_after_last_gc_;
- static int high_survival_rate_period_length_;
- static double survival_rate_;
- static SurvivalRateTrend previous_survival_rate_trend_;
- static SurvivalRateTrend survival_rate_trend_;
+ int young_survivors_after_last_gc_;
+ int high_survival_rate_period_length_;
+ double survival_rate_;
+ SurvivalRateTrend previous_survival_rate_trend_;
+ SurvivalRateTrend survival_rate_trend_;
- static void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
+ void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
ASSERT(survival_rate_trend != FLUCTUATING);
previous_survival_rate_trend_ = survival_rate_trend_;
survival_rate_trend_ = survival_rate_trend;
}
- static SurvivalRateTrend survival_rate_trend() {
+ SurvivalRateTrend survival_rate_trend() {
if (survival_rate_trend_ == STABLE) {
return STABLE;
} else if (previous_survival_rate_trend_ == STABLE) {
@@ -1426,7 +1568,7 @@ class Heap : public AllStatic {
}
}
- static bool IsStableOrIncreasingSurvivalTrend() {
+ bool IsStableOrIncreasingSurvivalTrend() {
switch (survival_rate_trend()) {
case STABLE:
case INCREASING:
@@ -1436,22 +1578,64 @@ class Heap : public AllStatic {
}
}
- static bool IsIncreasingSurvivalTrend() {
+ bool IsIncreasingSurvivalTrend() {
return survival_rate_trend() == INCREASING;
}
- static bool IsHighSurvivalRate() {
+ bool IsHighSurvivalRate() {
return high_survival_rate_period_length_ > 0;
}
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
+ // Maximum GC pause.
+ int max_gc_pause_;
+
+ // Maximum size of objects alive after GC.
+ intptr_t max_alive_after_gc_;
+
+ // Minimal interval between two subsequent collections.
+ int min_in_mutator_;
+
+ // Size of objects alive after last GC.
+ intptr_t alive_after_last_gc_;
+
+ double last_gc_end_timestamp_;
+
+ MarkCompactCollector mark_compact_collector_;
+
+ // This field contains the meaning of the WATERMARK_INVALIDATED flag.
+ // Instead of clearing this flag from all pages we just flip
+ // its meaning at the beginning of a scavenge.
+ intptr_t page_watermark_invalidated_mark_;
+
+ int number_idle_notifications_;
+ unsigned int last_idle_notification_gc_count_;
+ bool last_idle_notification_gc_count_init_;
+
+ // Shared state read by the scavenge collector and set by ScavengeObject.
+ PromotionQueue promotion_queue_;
+
+ // Flag is set when the heap has been configured. The heap can be repeatedly
+ // configured through the API until it is setup.
+ bool configured_;
+
+ ExternalStringTable external_string_table_;
+
+ bool is_safe_to_read_maps_;
+
friend class Factory;
+ friend class GCTracer;
friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
friend class LinearAllocationScope;
+ friend class Page;
+ friend class Isolate;
friend class MarkCompactCollector;
+ friend class MapCompact;
+
+ DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -1478,7 +1662,7 @@ class HeapStats {
int* weak_global_handle_count; // 15
int* pending_global_handle_count; // 16
int* near_death_global_handle_count; // 17
- int* destroyed_global_handle_count; // 18
+ int* free_global_handle_count; // 18
intptr_t* memory_allocator_size; // 19
intptr_t* memory_allocator_capacity; // 20
int* objects_per_type; // 21
@@ -1495,13 +1679,13 @@ class AlwaysAllocateScope {
// non-handle code to call handle code. The code still works but
// performance will degrade, so we want to catch this situation
// in debug mode.
- ASSERT(Heap::always_allocate_scope_depth_ == 0);
- Heap::always_allocate_scope_depth_++;
+ ASSERT(HEAP->always_allocate_scope_depth_ == 0);
+ HEAP->always_allocate_scope_depth_++;
}
~AlwaysAllocateScope() {
- Heap::always_allocate_scope_depth_--;
- ASSERT(Heap::always_allocate_scope_depth_ == 0);
+ HEAP->always_allocate_scope_depth_--;
+ ASSERT(HEAP->always_allocate_scope_depth_ == 0);
}
};
@@ -1509,12 +1693,12 @@ class AlwaysAllocateScope {
class LinearAllocationScope {
public:
LinearAllocationScope() {
- Heap::linear_allocation_scope_depth_++;
+ HEAP->linear_allocation_scope_depth_++;
}
~LinearAllocationScope() {
- Heap::linear_allocation_scope_depth_--;
- ASSERT(Heap::linear_allocation_scope_depth_ >= 0);
+ HEAP->linear_allocation_scope_depth_--;
+ ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
}
};
@@ -1531,7 +1715,7 @@ class VerifyPointersVisitor: public ObjectVisitor {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- ASSERT(Heap::Contains(object));
+ ASSERT(HEAP->Contains(object));
ASSERT(object->map()->IsMap());
}
}
@@ -1549,10 +1733,10 @@ class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- ASSERT(Heap::Contains(object));
+ ASSERT(HEAP->Contains(object));
ASSERT(object->map()->IsMap());
- if (Heap::InNewSpace(object)) {
- ASSERT(Heap::InToSpace(object));
+ if (HEAP->InNewSpace(object)) {
+ ASSERT(HEAP->InToSpace(object));
Address addr = reinterpret_cast<Address>(current);
ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
}
@@ -1666,28 +1850,37 @@ class HeapIterator BASE_EMBEDDED {
class KeyedLookupCache {
public:
// Lookup field offset for (map, name). If absent, -1 is returned.
- static int Lookup(Map* map, String* name);
+ int Lookup(Map* map, String* name);
// Update an element in the cache.
- static void Update(Map* map, String* name, int field_offset);
+ void Update(Map* map, String* name, int field_offset);
// Clear the cache.
- static void Clear();
+ void Clear();
static const int kLength = 64;
static const int kCapacityMask = kLength - 1;
static const int kMapHashShift = 2;
+ static const int kNotFound = -1;
private:
+ KeyedLookupCache() {
+ for (int i = 0; i < kLength; ++i) {
+ keys_[i].map = NULL;
+ keys_[i].name = NULL;
+ field_offsets_[i] = kNotFound;
+ }
+ }
+
static inline int Hash(Map* map, String* name);
// Get the address of the keys and field_offsets arrays. Used in
// generated code to perform cache lookups.
- static Address keys_address() {
+ Address keys_address() {
return reinterpret_cast<Address>(&keys_);
}
- static Address field_offsets_address() {
+ Address field_offsets_address() {
return reinterpret_cast<Address>(&field_offsets_);
}
@@ -1695,10 +1888,13 @@ class KeyedLookupCache {
Map* map;
String* name;
};
- static Key keys_[kLength];
- static int field_offsets_[kLength];
+
+ Key keys_[kLength];
+ int field_offsets_[kLength];
friend class ExternalReference;
+ friend class Isolate;
+ DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
};
@@ -1710,7 +1906,7 @@ class DescriptorLookupCache {
public:
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
- static int Lookup(DescriptorArray* array, String* name) {
+ int Lookup(DescriptorArray* array, String* name) {
if (!StringShape(name).IsSymbol()) return kAbsent;
int index = Hash(array, name);
Key& key = keys_[index];
@@ -1719,7 +1915,7 @@ class DescriptorLookupCache {
}
// Update an element in the cache.
- static void Update(DescriptorArray* array, String* name, int result) {
+ void Update(DescriptorArray* array, String* name, int result) {
ASSERT(result != kAbsent);
if (StringShape(name).IsSymbol()) {
int index = Hash(array, name);
@@ -1731,10 +1927,19 @@ class DescriptorLookupCache {
}
// Clear the cache.
- static void Clear();
+ void Clear();
static const int kAbsent = -2;
+
private:
+ DescriptorLookupCache() {
+ for (int i = 0; i < kLength; ++i) {
+ keys_[i].array = NULL;
+ keys_[i].name = NULL;
+ results_[i] = kAbsent;
+ }
+ }
+
static int Hash(DescriptorArray* array, String* name) {
// Uses only lower 32 bits if pointers are larger.
uint32_t array_hash =
@@ -1750,55 +1955,11 @@ class DescriptorLookupCache {
String* name;
};
- static Key keys_[kLength];
- static int results_[kLength];
-};
-
-
-// ----------------------------------------------------------------------------
-// Marking stack for tracing live objects.
-
-class MarkingStack {
- public:
- void Initialize(Address low, Address high) {
- top_ = low_ = reinterpret_cast<HeapObject**>(low);
- high_ = reinterpret_cast<HeapObject**>(high);
- overflowed_ = false;
- }
-
- bool is_full() { return top_ >= high_; }
-
- bool is_empty() { return top_ <= low_; }
+ Key keys_[kLength];
+ int results_[kLength];
- bool overflowed() { return overflowed_; }
-
- void clear_overflowed() { overflowed_ = false; }
-
- // Push the (marked) object on the marking stack if there is room,
- // otherwise mark the object as overflowed and wait for a rescan of the
- // heap.
- void Push(HeapObject* object) {
- CHECK(object->IsHeapObject());
- if (is_full()) {
- object->SetOverflow();
- overflowed_ = true;
- } else {
- *(top_++) = object;
- }
- }
-
- HeapObject* Pop() {
- ASSERT(!is_empty());
- HeapObject* object = *(--top_);
- CHECK(object->IsHeapObject());
- return object;
- }
-
- private:
- HeapObject** low_;
- HeapObject** top_;
- HeapObject** high_;
- bool overflowed_;
+ friend class Isolate;
+ DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
};
@@ -1815,11 +1976,11 @@ class MarkingStack {
class DisallowAllocationFailure {
public:
DisallowAllocationFailure() {
- old_state_ = Heap::disallow_allocation_failure_;
- Heap::disallow_allocation_failure_ = true;
+ old_state_ = HEAP->disallow_allocation_failure_;
+ HEAP->disallow_allocation_failure_ = true;
}
~DisallowAllocationFailure() {
- Heap::disallow_allocation_failure_ = old_state_;
+ HEAP->disallow_allocation_failure_ = old_state_;
}
private:
bool old_state_;
@@ -1828,11 +1989,11 @@ class DisallowAllocationFailure {
class AssertNoAllocation {
public:
AssertNoAllocation() {
- old_state_ = Heap::allow_allocation(false);
+ old_state_ = HEAP->allow_allocation(false);
}
~AssertNoAllocation() {
- Heap::allow_allocation(old_state_);
+ HEAP->allow_allocation(old_state_);
}
private:
@@ -1842,11 +2003,11 @@ class AssertNoAllocation {
class DisableAssertNoAllocation {
public:
DisableAssertNoAllocation() {
- old_state_ = Heap::allow_allocation(true);
+ old_state_ = HEAP->allow_allocation(true);
}
~DisableAssertNoAllocation() {
- Heap::allow_allocation(old_state_);
+ HEAP->allow_allocation(old_state_);
}
private:
@@ -1903,7 +2064,7 @@ class GCTracer BASE_EMBEDDED {
double start_time_;
};
- GCTracer();
+ explicit GCTracer(Heap* heap);
~GCTracer();
// Sets the collector.
@@ -1929,22 +2090,13 @@ class GCTracer BASE_EMBEDDED {
promoted_objects_size_ += object_size;
}
- // Returns maximum GC pause.
- static int get_max_gc_pause() { return max_gc_pause_; }
-
- // Returns maximum size of objects alive after GC.
- static intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
-
- // Returns minimal interval between two subsequent collections.
- static int get_min_in_mutator() { return min_in_mutator_; }
-
private:
// Returns a string matching the collector.
const char* CollectorString();
// Returns size of object in heap (in MB).
double SizeOfHeapObjects() {
- return (static_cast<double>(Heap::SizeOfObjects())) / MB;
+ return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
}
double start_time_; // Timestamp set in the constructor.
@@ -1993,19 +2145,7 @@ class GCTracer BASE_EMBEDDED {
// Size of objects promoted during the current collection.
intptr_t promoted_objects_size_;
- // Maximum GC pause.
- static int max_gc_pause_;
-
- // Maximum size of objects alive after GC.
- static intptr_t max_alive_after_gc_;
-
- // Minimal interval between two subsequent collections.
- static int min_in_mutator_;
-
- // Size of objects alive after last GC.
- static intptr_t alive_after_last_gc_;
-
- static double last_gc_end_timestamp_;
+ Heap* heap_;
};
@@ -2015,131 +2155,71 @@ class TranscendentalCache {
static const int kTranscendentalTypeBits = 3;
STATIC_ASSERT((1 << kTranscendentalTypeBits) >= kNumberOfCaches);
- explicit TranscendentalCache(Type t);
-
// Returns a heap number with f(input), where f is a math function specified
// by the 'type' argument.
- MUST_USE_RESULT static inline MaybeObject* Get(Type type, double input) {
- TranscendentalCache* cache = caches_[type];
- if (cache == NULL) {
- caches_[type] = cache = new TranscendentalCache(type);
- }
- return cache->Get(input);
- }
+ MUST_USE_RESULT inline MaybeObject* Get(Type type, double input);
// The cache contains raw Object pointers. This method disposes of
// them before a garbage collection.
- static void Clear();
+ void Clear();
private:
- MUST_USE_RESULT inline MaybeObject* Get(double input) {
- Converter c;
- c.dbl = input;
- int hash = Hash(c);
- Element e = elements_[hash];
- if (e.in[0] == c.integers[0] &&
- e.in[1] == c.integers[1]) {
- ASSERT(e.output != NULL);
- Counters::transcendental_cache_hit.Increment();
- return e.output;
- }
- double answer = Calculate(input);
- Counters::transcendental_cache_miss.Increment();
- Object* heap_number;
- { MaybeObject* maybe_heap_number = Heap::AllocateHeapNumber(answer);
- if (!maybe_heap_number->ToObject(&heap_number)) return maybe_heap_number;
- }
- elements_[hash].in[0] = c.integers[0];
- elements_[hash].in[1] = c.integers[1];
- elements_[hash].output = heap_number;
- return heap_number;
- }
-
- inline double Calculate(double input) {
- switch (type_) {
- case ACOS:
- return acos(input);
- case ASIN:
- return asin(input);
- case ATAN:
- return atan(input);
- case COS:
- return cos(input);
- case EXP:
- return exp(input);
- case LOG:
- return log(input);
- case SIN:
- return sin(input);
- case TAN:
- return tan(input);
- default:
- return 0.0; // Never happens.
- }
- }
- static const int kCacheSize = 512;
- struct Element {
- uint32_t in[2];
- Object* output;
- };
- union Converter {
- double dbl;
- uint32_t integers[2];
- };
- inline static int Hash(const Converter& c) {
- uint32_t hash = (c.integers[0] ^ c.integers[1]);
- hash ^= static_cast<int32_t>(hash) >> 16;
- hash ^= static_cast<int32_t>(hash) >> 8;
- return (hash & (kCacheSize - 1));
- }
+ class SubCache {
+ static const int kCacheSize = 512;
- static Address cache_array_address() {
- // Used to create an external reference.
- return reinterpret_cast<Address>(caches_);
- }
+ explicit SubCache(Type t);
- // Allow access to the caches_ array as an ExternalReference.
- friend class ExternalReference;
- // Inline implementation of the cache.
- friend class TranscendentalCacheStub;
+ MUST_USE_RESULT inline MaybeObject* Get(double input);
- static TranscendentalCache* caches_[kNumberOfCaches];
- Element elements_[kCacheSize];
- Type type_;
-};
+ inline double Calculate(double input);
+ struct Element {
+ uint32_t in[2];
+ Object* output;
+ };
-// External strings table is a place where all external strings are
-// registered. We need to keep track of such strings to properly
-// finalize them.
-class ExternalStringTable : public AllStatic {
- public:
- // Registers an external string.
- inline static void AddString(String* string);
+ union Converter {
+ double dbl;
+ uint32_t integers[2];
+ };
- inline static void Iterate(ObjectVisitor* v);
+ inline static int Hash(const Converter& c) {
+ uint32_t hash = (c.integers[0] ^ c.integers[1]);
+ hash ^= static_cast<int32_t>(hash) >> 16;
+ hash ^= static_cast<int32_t>(hash) >> 8;
+ return (hash & (kCacheSize - 1));
+ }
- // Restores internal invariant and gets rid of collected strings.
- // Must be called after each Iterate() that modified the strings.
- static void CleanUp();
+ Element elements_[kCacheSize];
+ Type type_;
+ Isolate* isolate_;
- // Destroys all allocated memory.
- static void TearDown();
+ // Allow access to the caches_ array as an ExternalReference.
+ friend class ExternalReference;
+ // Inline implementation of the cache.
+ friend class TranscendentalCacheStub;
+ // For evaluating value.
+ friend class TranscendentalCache;
- private:
- friend class Heap;
+ DISALLOW_COPY_AND_ASSIGN(SubCache);
+ };
- inline static void Verify();
+ TranscendentalCache() {
+ for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
+ }
- inline static void AddOldString(String* string);
+ // Used to create an external reference.
+ inline Address cache_array_address();
- // Notifies the table that only a prefix of the new list is valid.
- inline static void ShrinkNewStrings(int position);
+ // Instantiation
+ friend class Isolate;
+ // Inline implementation of the caching.
+ friend class TranscendentalCacheStub;
+ // Allow access to the caches_ array as an ExternalReference.
+ friend class ExternalReference;
- // To speed up scavenge collections new space string are kept
- // separate from old space strings.
- static List<Object*> new_space_strings_;
- static List<Object*> old_space_strings_;
+ SubCache* caches_[kNumberOfCaches];
+ DISALLOW_COPY_AND_ASSIGN(TranscendentalCache);
};
@@ -2216,4 +2296,6 @@ class PathTracer : public ObjectVisitor {
} } // namespace v8::internal
+#undef HEAP
+
#endif // V8_HEAP_H_
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index c5a7146eb..771770ee8 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -36,6 +36,8 @@
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-mips.h"
#else
#error Unsupported target architecture.
#endif
@@ -58,12 +60,20 @@ const char* Representation::Mnemonic() const {
case kDouble: return "d";
case kInteger32: return "i";
case kExternal: return "x";
- case kNumRepresentations:
+ default:
UNREACHABLE();
return NULL;
}
- UNREACHABLE();
- return NULL;
+}
+
+
+void HValue::AssumeRepresentation(Representation r) {
+ if (CheckFlag(kFlexibleRepresentation)) {
+ ChangeRepresentation(r);
+ // The representation of the value is dictated by type feedback and
+ // will not be changed later.
+ ClearFlag(kFlexibleRepresentation);
+ }
}
@@ -120,6 +130,44 @@ void Range::AddConstant(int32_t value) {
}
+void Range::Intersect(Range* other) {
+ upper_ = Min(upper_, other->upper_);
+ lower_ = Max(lower_, other->lower_);
+ bool b = CanBeMinusZero() && other->CanBeMinusZero();
+ set_can_be_minus_zero(b);
+}
+
+
+void Range::Union(Range* other) {
+ upper_ = Max(upper_, other->upper_);
+ lower_ = Min(lower_, other->lower_);
+ bool b = CanBeMinusZero() || other->CanBeMinusZero();
+ set_can_be_minus_zero(b);
+}
+
+
+void Range::Sar(int32_t value) {
+ int32_t bits = value & 0x1F;
+ lower_ = lower_ >> bits;
+ upper_ = upper_ >> bits;
+ set_can_be_minus_zero(false);
+}
+
+
+void Range::Shl(int32_t value) {
+ int32_t bits = value & 0x1F;
+ int old_lower = lower_;
+ int old_upper = upper_;
+ lower_ = lower_ << bits;
+ upper_ = upper_ << bits;
+ if (old_lower != lower_ >> bits || old_upper != upper_ >> bits) {
+ upper_ = kMaxInt;
+ lower_ = kMinInt;
+ }
+ set_can_be_minus_zero(false);
+}
+
+
bool Range::AddAndCheckOverflow(Range* other) {
bool may_overflow = false;
lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
@@ -224,31 +272,60 @@ HType HType::TypeFromValue(Handle<Object> value) {
}
-int HValue::LookupOperandIndex(int occurrence_index, HValue* op) {
- for (int i = 0; i < OperandCount(); ++i) {
- if (OperandAt(i) == op) {
- if (occurrence_index == 0) return i;
- --occurrence_index;
- }
+bool HValue::IsDefinedAfter(HBasicBlock* other) const {
+ return block()->block_id() > other->block_id();
+}
+
+
+HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
+ Advance();
+}
+
+
+void HUseIterator::Advance() {
+ current_ = next_;
+ if (current_ != NULL) {
+ next_ = current_->tail();
+ value_ = current_->value();
+ index_ = current_->index();
}
- return -1;
}
-bool HValue::IsDefinedAfter(HBasicBlock* other) const {
- return block()->block_id() > other->block_id();
+int HValue::UseCount() const {
+ int count = 0;
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) ++count;
+ return count;
}
-bool HValue::UsesMultipleTimes(HValue* op) {
- bool seen = false;
- for (int i = 0; i < OperandCount(); ++i) {
- if (OperandAt(i) == op) {
- if (seen) return true;
- seen = true;
+HUseListNode* HValue::RemoveUse(HValue* value, int index) {
+ HUseListNode* previous = NULL;
+ HUseListNode* current = use_list_;
+ while (current != NULL) {
+ if (current->value() == value && current->index() == index) {
+ if (previous == NULL) {
+ use_list_ = current->tail();
+ } else {
+ previous->set_tail(current->tail());
+ }
+ break;
}
+
+ previous = current;
+ current = current->tail();
}
- return false;
+
+#ifdef DEBUG
+ // Do not reuse use list nodes in debug mode, zap them.
+ if (current != NULL) {
+ HUseListNode* temp =
+ new HUseListNode(current->value(), current->index(), NULL);
+ current->Zap();
+ current = temp;
+ }
+#endif
+ return current;
}
@@ -277,32 +354,42 @@ intptr_t HValue::Hashcode() {
}
+const char* HValue::Mnemonic() const {
+ switch (opcode()) {
+#define MAKE_CASE(type) case k##type: return #type;
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(MAKE_CASE)
+#undef MAKE_CASE
+ case kPhi: return "Phi";
+ default: return "";
+ }
+}
+
+
void HValue::SetOperandAt(int index, HValue* value) {
- ASSERT(value == NULL || !value->representation().IsNone());
RegisterUse(index, value);
InternalSetOperandAt(index, value);
}
-void HValue::ReplaceAndDelete(HValue* other) {
- ReplaceValue(other);
- Delete();
+void HValue::DeleteAndReplaceWith(HValue* other) {
+ // We replace all uses first, so Delete can assert that there are none.
+ if (other != NULL) ReplaceAllUsesWith(other);
+ ASSERT(HasNoUses());
+ ClearOperands();
+ DeleteFromGraph();
}
-void HValue::ReplaceValue(HValue* other) {
- ZoneList<HValue*> start_uses(2);
- for (int i = 0; i < uses_.length(); ++i) {
- HValue* use = uses_.at(i);
- if (!use->block()->IsStartBlock()) {
- InternalReplaceAtUse(use, other);
- other->uses_.Add(use);
- } else {
- start_uses.Add(use);
- }
+void HValue::ReplaceAllUsesWith(HValue* other) {
+ while (use_list_ != NULL) {
+ HUseListNode* list_node = use_list_;
+ HValue* value = list_node->value();
+ ASSERT(!value->block()->IsStartBlock());
+ value->InternalSetOperandAt(list_node->index(), other);
+ use_list_ = list_node->tail();
+ list_node->set_tail(other->use_list_);
+ other->use_list_ = list_node;
}
- uses_.Clear();
- uses_.AddAll(start_uses);
}
@@ -313,55 +400,48 @@ void HValue::ClearOperands() {
}
-void HValue::Delete() {
- ASSERT(HasNoUses());
- ClearOperands();
- DeleteFromGraph();
-}
-
-
-void HValue::ReplaceAtUse(HValue* use, HValue* other) {
- for (int i = 0; i < use->OperandCount(); ++i) {
- if (use->OperandAt(i) == this) {
- use->SetOperandAt(i, other);
- }
+void HValue::SetBlock(HBasicBlock* block) {
+ ASSERT(block_ == NULL || block == NULL);
+ block_ = block;
+ if (id_ == kNoNumber && block != NULL) {
+ id_ = block->graph()->GetNextValueID(this);
}
}
-void HValue::ReplaceFirstAtUse(HValue* use, HValue* other, Representation r) {
- for (int i = 0; i < use->OperandCount(); ++i) {
- if (use->RequiredInputRepresentation(i).Equals(r) &&
- use->OperandAt(i) == this) {
- use->SetOperandAt(i, other);
- return;
- }
- }
+void HValue::PrintTypeTo(StringStream* stream) {
+ if (!representation().IsTagged() || type().Equals(HType::Tagged())) return;
+ stream->Add(" type[%s]", type().ToString());
}
-void HValue::InternalReplaceAtUse(HValue* use, HValue* other) {
- for (int i = 0; i < use->OperandCount(); ++i) {
- if (use->OperandAt(i) == this) {
- // Call internal method that does not update use lists. The caller is
- // responsible for doing so.
- use->InternalSetOperandAt(i, other);
- }
- }
+void HValue::PrintRangeTo(StringStream* stream) {
+ if (range() == NULL || range()->IsMostGeneric()) return;
+ stream->Add(" range[%d,%d,m0=%d]",
+ range()->lower(),
+ range()->upper(),
+ static_cast<int>(range()->CanBeMinusZero()));
}
-void HValue::SetBlock(HBasicBlock* block) {
- ASSERT(block_ == NULL || block == NULL);
- block_ = block;
- if (id_ == kNoNumber && block != NULL) {
- id_ = block->graph()->GetNextValueID(this);
+void HValue::PrintChangesTo(StringStream* stream) {
+ int changes_flags = (flags() & HValue::ChangesFlagsMask());
+ if (changes_flags == 0) return;
+ stream->Add(" changes[");
+ if (changes_flags == AllSideEffects()) {
+ stream->Add("*");
+ } else {
+ bool add_comma = false;
+#define PRINT_DO(type) \
+ if (changes_flags & (1 << kChanges##type)) { \
+ if (add_comma) stream->Add(","); \
+ add_comma = true; \
+ stream->Add(#type); \
+ }
+ GVN_FLAG_LIST(PRINT_DO);
+#undef PRINT_DO
}
-}
-
-
-void HValue::PrintTypeTo(HType type, StringStream* stream) {
- stream->Add(type.ToShortString());
+ stream->Add("]");
}
@@ -381,12 +461,20 @@ bool HValue::UpdateInferredType() {
void HValue::RegisterUse(int index, HValue* new_value) {
HValue* old_value = OperandAt(index);
if (old_value == new_value) return;
+
+ HUseListNode* removed = NULL;
if (old_value != NULL) {
- ASSERT(old_value->uses_.Contains(this));
- old_value->uses_.RemoveElement(this);
+ removed = old_value->RemoveUse(this, index);
}
+
if (new_value != NULL) {
- new_value->uses_.Add(this);
+ if (removed == NULL) {
+ new_value->use_list_ =
+ new HUseListNode(this, index, new_value->use_list_);
+ } else {
+ removed->set_tail(new_value->use_list_);
+ new_value->use_list_ = removed;
+ }
}
}
@@ -415,26 +503,18 @@ void HValue::ComputeInitialRange() {
void HInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s", Mnemonic());
- if (HasSideEffects()) stream->Add("*");
- stream->Add(" ");
+ PrintMnemonicTo(stream);
PrintDataTo(stream);
+ PrintRangeTo(stream);
+ PrintChangesTo(stream);
+ PrintTypeTo(stream);
+}
- if (range() != NULL) {
- stream->Add(" range[%d,%d,m0=%d]",
- range()->lower(),
- range()->upper(),
- static_cast<int>(range()->CanBeMinusZero()));
- }
- int changes_flags = (flags() & HValue::ChangesFlagsMask());
- if (changes_flags != 0) {
- stream->Add(" changes[0x%x]", changes_flags);
- }
-
- if (representation().IsTagged() && !type().Equals(HType::Tagged())) {
- stream->Add(" type[%s]", type().ToString());
- }
+void HInstruction::PrintMnemonicTo(StringStream* stream) {
+ stream->Add("%s", Mnemonic());
+ if (HasSideEffects()) stream->Add("*");
+ stream->Add(" ");
}
@@ -519,6 +599,8 @@ void HInstruction::Verify() {
ASSERT(cur == other_operand);
}
} else {
+ // If the following assert fires, you may have forgotten an
+ // AddInstruction.
ASSERT(other_block->Dominates(cur_block));
}
}
@@ -604,15 +686,13 @@ void HAccessArgumentsAt::PrintDataTo(StringStream* stream) {
void HControlInstruction::PrintDataTo(StringStream* stream) {
- if (FirstSuccessor() != NULL) {
- int first_id = FirstSuccessor()->block_id();
- if (SecondSuccessor() == NULL) {
- stream->Add(" B%d", first_id);
- } else {
- int second_id = SecondSuccessor()->block_id();
- stream->Add(" goto (B%d, B%d)", first_id, second_id);
- }
+ stream->Add(" goto (");
+ bool first_block = true;
+ for (HSuccessorIterator it(this); !it.Done(); it.Advance()) {
+ stream->Add(first_block ? "B%d" : ", B%d", it.Current()->block_id());
+ first_block = false;
}
+ stream->Add(")");
}
@@ -622,6 +702,11 @@ void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
}
+void HReturn::PrintDataTo(StringStream* stream) {
+ value()->PrintNameTo(stream);
+}
+
+
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map());
@@ -665,7 +750,7 @@ void HUnaryOperation::PrintDataTo(StringStream* stream) {
void HHasInstanceType::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
switch (from_) {
- case FIRST_JS_OBJECT_TYPE:
+ case FIRST_JS_RECEIVER_TYPE:
if (to_ == LAST_TYPE) stream->Add(" spec_object");
break;
case JS_REGEXP_TYPE:
@@ -692,17 +777,44 @@ void HTypeofIs::PrintDataTo(StringStream* stream) {
void HChange::PrintDataTo(StringStream* stream) {
HUnaryOperation::PrintDataTo(stream);
- stream->Add(" %s to %s", from_.Mnemonic(), to_.Mnemonic());
+ stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic());
if (CanTruncateToInt32()) stream->Add(" truncating-int32");
if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
}
-HCheckInstanceType* HCheckInstanceType::NewIsJSObjectOrJSFunction(
- HValue* value) {
- STATIC_ASSERT((LAST_JS_OBJECT_TYPE + 1) == JS_FUNCTION_TYPE);
- return new HCheckInstanceType(value, FIRST_JS_OBJECT_TYPE, JS_FUNCTION_TYPE);
+void HCheckInstanceType::GetCheckInterval(InstanceType* first,
+ InstanceType* last) {
+ ASSERT(is_interval_check());
+ switch (check_) {
+ case IS_SPEC_OBJECT:
+ *first = FIRST_SPEC_OBJECT_TYPE;
+ *last = LAST_SPEC_OBJECT_TYPE;
+ return;
+ case IS_JS_ARRAY:
+ *first = *last = JS_ARRAY_TYPE;
+ return;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
+ ASSERT(!is_interval_check());
+ switch (check_) {
+ case IS_STRING:
+ *mask = kIsNotStringMask;
+ *tag = kStringTag;
+ return;
+ case IS_SYMBOL:
+ *mask = kIsSymbolMask;
+ *tag = kSymbolTag;
+ return;
+ default:
+ UNREACHABLE();
+ }
}
@@ -744,6 +856,8 @@ Range* HValue::InferRange() {
} else if (representation().IsNone()) {
return NULL;
} else {
+ // Untagged integer32 cannot be -0 and we don't compute ranges for
+ // untagged doubles.
return new Range();
}
}
@@ -755,7 +869,7 @@ Range* HConstant::InferRange() {
result->set_can_be_minus_zero(false);
return result;
}
- return HInstruction::InferRange();
+ return HValue::InferRange();
}
@@ -789,7 +903,7 @@ Range* HAdd::InferRange() {
res->set_can_be_minus_zero(m0);
return res;
} else {
- return HArithmeticBinaryOperation::InferRange();
+ return HValue::InferRange();
}
}
@@ -805,7 +919,7 @@ Range* HSub::InferRange() {
res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
return res;
} else {
- return HArithmeticBinaryOperation::InferRange();
+ return HValue::InferRange();
}
}
@@ -823,7 +937,7 @@ Range* HMul::InferRange() {
res->set_can_be_minus_zero(m0);
return res;
} else {
- return HArithmeticBinaryOperation::InferRange();
+ return HValue::InferRange();
}
}
@@ -848,7 +962,7 @@ Range* HDiv::InferRange() {
}
return result;
} else {
- return HArithmeticBinaryOperation::InferRange();
+ return HValue::InferRange();
}
}
@@ -865,7 +979,7 @@ Range* HMod::InferRange() {
}
return result;
} else {
- return HArithmeticBinaryOperation::InferRange();
+ return HValue::InferRange();
}
}
@@ -879,7 +993,7 @@ void HPhi::PrintTo(StringStream* stream) {
stream->Add(" ");
}
stream->Add(" uses%d_%di_%dd_%dt]",
- uses()->length(),
+ UseCount(),
int32_non_phi_uses() + int32_indirect_uses(),
double_non_phi_uses() + double_indirect_uses(),
tagged_non_phi_uses() + tagged_indirect_uses());
@@ -896,6 +1010,14 @@ void HPhi::AddInput(HValue* value) {
}
+bool HPhi::HasRealUses() {
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->IsPhi()) return true;
+ }
+ return false;
+}
+
+
HValue* HPhi::GetRedundantReplacement() {
HValue* candidate = NULL;
int count = OperandCount();
@@ -923,12 +1045,11 @@ void HPhi::DeleteFromGraph() {
void HPhi::InitRealUses(int phi_id) {
// Initialize real uses.
phi_id_ = phi_id;
- for (int j = 0; j < uses()->length(); j++) {
- HValue* use = uses()->at(j);
- if (!use->IsPhi()) {
- int index = use->LookupOperandIndex(0, this);
- Representation req_rep = use->RequiredInputRepresentation(index);
- non_phi_uses_[req_rep.kind()]++;
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ HValue* value = it.value();
+ if (!value->IsPhi()) {
+ Representation rep = value->RequiredInputRepresentation(it.index());
+ ++non_phi_uses_[rep.kind()];
}
}
}
@@ -965,6 +1086,16 @@ void HSimulate::PrintDataTo(StringStream* stream) {
}
+void HDeoptimize::PrintDataTo(StringStream* stream) {
+ if (OperandCount() == 0) return;
+ OperandAt(0)->PrintNameTo(stream);
+ for (int i = 1; i < OperandCount(); ++i) {
+ stream->Add(" ");
+ OperandAt(i)->PrintNameTo(stream);
+ }
+}
+
+
void HEnterInlined::PrintDataTo(StringStream* stream) {
SmartPointer<char> name = function()->debug_name()->ToCString();
stream->Add("%s, id=%d", *name, function()->id());
@@ -973,10 +1104,9 @@ void HEnterInlined::PrintDataTo(StringStream* stream) {
HConstant::HConstant(Handle<Object> handle, Representation r)
: handle_(handle),
- constant_type_(HType::TypeFromValue(handle)),
has_int32_value_(false),
- int32_value_(0),
has_double_value_(false),
+ int32_value_(0),
double_value_(0) {
set_representation(r);
SetFlag(kUseGVN);
@@ -1001,18 +1131,35 @@ HConstant* HConstant::CopyToRepresentation(Representation r) const {
HConstant* HConstant::CopyToTruncatedInt32() const {
if (!has_double_value_) return NULL;
int32_t truncated = NumberToInt32(*handle_);
- return new HConstant(Factory::NewNumberFromInt(truncated),
+ return new HConstant(FACTORY->NewNumberFromInt(truncated),
Representation::Integer32());
}
+bool HConstant::ToBoolean() const {
+ // Converts the constant's boolean value according to
+ // ECMAScript section 9.2 ToBoolean conversion.
+ if (HasInteger32Value()) return Integer32Value() != 0;
+ if (HasDoubleValue()) {
+ double v = DoubleValue();
+ return v != 0 && !isnan(v);
+ }
+ if (handle()->IsTrue()) return true;
+ if (handle()->IsFalse()) return false;
+ if (handle()->IsUndefined()) return false;
+ if (handle()->IsNull()) return false;
+ if (handle()->IsString() &&
+ String::cast(*handle())->length() == 0) return false;
+ return true;
+}
+
void HConstant::PrintDataTo(StringStream* stream) {
handle()->ShortPrint(stream);
}
bool HArrayLiteral::IsCopyOnWrite() const {
- return constant_elements()->map() == Heap::fixed_cow_array_map();
+ return constant_elements()->map() == HEAP->fixed_cow_array_map();
}
@@ -1026,34 +1173,30 @@ void HBinaryOperation::PrintDataTo(StringStream* stream) {
Range* HBitAnd::InferRange() {
- Range* a = left()->range();
- Range* b = right()->range();
- int32_t a_mask = 0xffffffff;
- int32_t b_mask = 0xffffffff;
- if (a != NULL) a_mask = a->Mask();
- if (b != NULL) b_mask = b->Mask();
- int32_t result_mask = a_mask & b_mask;
- if (result_mask >= 0) {
- return new Range(0, result_mask);
- } else {
- return HBinaryOperation::InferRange();
- }
+ int32_t left_mask = (left()->range() != NULL)
+ ? left()->range()->Mask()
+ : 0xffffffff;
+ int32_t right_mask = (right()->range() != NULL)
+ ? right()->range()->Mask()
+ : 0xffffffff;
+ int32_t result_mask = left_mask & right_mask;
+ return (result_mask >= 0)
+ ? new Range(0, result_mask)
+ : HValue::InferRange();
}
Range* HBitOr::InferRange() {
- Range* a = left()->range();
- Range* b = right()->range();
- int32_t a_mask = 0xffffffff;
- int32_t b_mask = 0xffffffff;
- if (a != NULL) a_mask = a->Mask();
- if (b != NULL) b_mask = b->Mask();
- int32_t result_mask = a_mask | b_mask;
- if (result_mask >= 0) {
- return new Range(0, result_mask);
- } else {
- return HBinaryOperation::InferRange();
- }
+ int32_t left_mask = (left()->range() != NULL)
+ ? left()->range()->Mask()
+ : 0xffffffff;
+ int32_t right_mask = (right()->range() != NULL)
+ ? right()->range()->Mask()
+ : 0xffffffff;
+ int32_t result_mask = left_mask | right_mask;
+ return (result_mask >= 0)
+ ? new Range(0, result_mask)
+ : HValue::InferRange();
}
@@ -1061,20 +1204,14 @@ Range* HSar::InferRange() {
if (right()->IsConstant()) {
HConstant* c = HConstant::cast(right());
if (c->HasInteger32Value()) {
- int32_t val = c->Integer32Value();
- Range* result = NULL;
- Range* left_range = left()->range();
- if (left_range == NULL) {
- result = new Range();
- } else {
- result = left_range->Copy();
- }
- result->Sar(val);
+ Range* result = (left()->range() != NULL)
+ ? left()->range()->Copy()
+ : new Range();
+ result->Sar(c->Integer32Value());
return result;
}
}
-
- return HBinaryOperation::InferRange();
+ return HValue::InferRange();
}
@@ -1082,20 +1219,14 @@ Range* HShl::InferRange() {
if (right()->IsConstant()) {
HConstant* c = HConstant::cast(right());
if (c->HasInteger32Value()) {
- int32_t val = c->Integer32Value();
- Range* result = NULL;
- Range* left_range = left()->range();
- if (left_range == NULL) {
- result = new Range();
- } else {
- result = left_range->Copy();
- }
- result->Shl(val);
+ Range* result = (left()->range() != NULL)
+ ? left()->range()->Copy()
+ : new Range();
+ result->Shl(c->Integer32Value());
return result;
}
}
-
- return HBinaryOperation::InferRange();
+ return HValue::InferRange();
}
@@ -1112,6 +1243,10 @@ void HCompare::SetInputRepresentation(Representation r) {
if (r.IsTagged()) {
SetAllSideEffects();
ClearFlag(kUseGVN);
+ } else if (r.IsDouble()) {
+ SetFlag(kDeoptimizeOnUndefined);
+ ClearAllSideEffects();
+ SetFlag(kUseGVN);
} else {
ClearAllSideEffects();
SetFlag(kUseGVN);
@@ -1130,6 +1265,70 @@ void HLoadNamedField::PrintDataTo(StringStream* stream) {
}
+HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* object,
+ ZoneMapList* types,
+ Handle<String> name)
+ : HUnaryOperation(object),
+ types_(Min(types->length(), kMaxLoadPolymorphism)),
+ name_(name),
+ need_generic_(false) {
+ set_representation(Representation::Tagged());
+ SetFlag(kDependsOnMaps);
+ for (int i = 0;
+ i < types->length() && types_.length() < kMaxLoadPolymorphism;
+ ++i) {
+ Handle<Map> map = types->at(i);
+ LookupResult lookup;
+ map->LookupInDescriptors(NULL, *name, &lookup);
+ if (lookup.IsProperty()) {
+ switch (lookup.type()) {
+ case FIELD: {
+ int index = lookup.GetLocalFieldIndexFromMap(*map);
+ if (index < 0) {
+ SetFlag(kDependsOnInobjectFields);
+ } else {
+ SetFlag(kDependsOnBackingStoreFields);
+ }
+ types_.Add(types->at(i));
+ break;
+ }
+ case CONSTANT_FUNCTION:
+ types_.Add(types->at(i));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (types_.length() == types->length() && FLAG_deoptimize_uncommon_cases) {
+ SetFlag(kUseGVN);
+ } else {
+ SetAllSideEffects();
+ need_generic_ = true;
+ }
+}
+
+
+bool HLoadNamedFieldPolymorphic::DataEquals(HValue* value) {
+ HLoadNamedFieldPolymorphic* other = HLoadNamedFieldPolymorphic::cast(value);
+ if (types_.length() != other->types()->length()) return false;
+ if (!name_.is_identical_to(other->name())) return false;
+ if (need_generic_ != other->need_generic_) return false;
+ for (int i = 0; i < types_.length(); i++) {
+ bool found = false;
+ for (int j = 0; j < types_.length(); j++) {
+ if (types_.at(j).is_identical_to(other->types()->at(i))) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) return false;
+ }
+ return true;
+}
+
+
void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
@@ -1138,6 +1337,15 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
}
+bool HLoadKeyedFastElement::RequiresHoleCheck() const {
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ if (!use->IsChange()) return true;
+ }
+ return false;
+}
+
+
void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
@@ -1146,8 +1354,45 @@ void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
}
-void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) {
+void HLoadKeyedSpecializedArrayElement::PrintDataTo(
+ StringStream* stream) {
external_pointer()->PrintNameTo(stream);
+ stream->Add(".");
+ switch (elements_kind()) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ stream->Add("byte");
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ stream->Add("u_byte");
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ stream->Add("short");
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ stream->Add("u_short");
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ stream->Add("int");
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ stream->Add("u_int");
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ stream->Add("float");
+ break;
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ stream->Add("double");
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ stream->Add("pixel");
+ break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("]");
@@ -1195,8 +1440,45 @@ void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
-void HStorePixelArrayElement::PrintDataTo(StringStream* stream) {
+void HStoreKeyedSpecializedArrayElement::PrintDataTo(
+ StringStream* stream) {
external_pointer()->PrintNameTo(stream);
+ stream->Add(".");
+ switch (elements_kind()) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ stream->Add("byte");
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ stream->Add("u_byte");
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ stream->Add("short");
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ stream->Add("u_short");
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ stream->Add("int");
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ stream->Add("u_int");
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ stream->Add("float");
+ break;
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ stream->Add("double");
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ stream->Add("pixel");
+ break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("] = ");
@@ -1204,18 +1486,29 @@ void HStorePixelArrayElement::PrintDataTo(StringStream* stream) {
}
-void HLoadGlobal::PrintDataTo(StringStream* stream) {
+void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p]", *cell());
if (check_hole_value()) stream->Add(" (deleteable/read-only)");
}
-void HStoreGlobal::PrintDataTo(StringStream* stream) {
+void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
+ stream->Add("%o ", *name());
+}
+
+
+void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p] = ", *cell());
value()->PrintNameTo(stream);
}
+void HStoreGlobalGeneric::PrintDataTo(StringStream* stream) {
+ stream->Add("%o = ", *name());
+ value()->PrintNameTo(stream);
+}
+
+
void HLoadContextSlot::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add("[%d]", slot_index());
@@ -1269,7 +1562,7 @@ HType HPhi::CalculateInferredType() {
HType HConstant::CalculateInferredType() {
- return constant_type_;
+ return HType::TypeFromValue(handle_);
}
@@ -1278,7 +1571,7 @@ HType HCompare::CalculateInferredType() {
}
-HType HCompareJSObjectEq::CalculateInferredType() {
+HType HCompareObjectEq::CalculateInferredType() {
return HType::Boolean();
}
@@ -1288,6 +1581,21 @@ HType HUnaryPredicate::CalculateInferredType() {
}
+HType HInstanceOf::CalculateInferredType() {
+ return HType::Boolean();
+}
+
+
+HType HDeleteProperty::CalculateInferredType() {
+ return HType::Boolean();
+}
+
+
+HType HInstanceOfKnownGlobal::CalculateInferredType() {
+ return HType::Boolean();
+}
+
+
HType HBitwiseBinaryOperation::CalculateInferredType() {
return HType::TaggedNumber();
}
@@ -1373,6 +1681,13 @@ HValue* HChange::EnsureAndPropagateNotMinusZero(BitVector* visited) {
}
+HValue* HForceRepresentation::EnsureAndPropagateNotMinusZero(
+ BitVector* visited) {
+ visited->Add(id());
+ return value();
+}
+
+
HValue* HMod::EnsureAndPropagateNotMinusZero(BitVector* visited) {
visited->Add(id());
if (range() == NULL || range()->CanBeMinusZero()) {
@@ -1423,6 +1738,13 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
}
+void HIn::PrintDataTo(StringStream* stream) {
+ key()->PrintNameTo(stream);
+ stream->Add(" ");
+ object()->PrintNameTo(stream);
+}
+
+
// Node-specific verification code is only included in debug mode.
#ifdef DEBUG
@@ -1446,7 +1768,6 @@ void HSimulate::Verify() {
void HBoundsCheck::Verify() {
HInstruction::Verify();
- ASSERT(HasNoUses());
}
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 1bce34beb..a0cab6aed 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -29,8 +29,13 @@
#define V8_HYDROGEN_INSTRUCTIONS_H_
#include "v8.h"
+
+#include "allocation.h"
#include "code-stubs.h"
+#include "data-flow.h"
+#include "small-pointer-list.h"
#include "string-stream.h"
+#include "utils.h"
#include "zone.h"
namespace v8 {
@@ -46,18 +51,10 @@ class LInstruction;
class LChunkBuilder;
-#define HYDROGEN_ALL_INSTRUCTION_LIST(V) \
- V(ArithmeticBinaryOperation) \
- V(BinaryCall) \
- V(BinaryOperation) \
+#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
V(BitwiseBinaryOperation) \
V(ControlInstruction) \
V(Instruction) \
- V(Phi) \
- V(UnaryCall) \
- V(UnaryControlInstruction) \
- V(UnaryOperation) \
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)
#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
@@ -91,49 +88,58 @@ class LChunkBuilder;
V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(ClampToUint8) \
+ V(ClassOfTest) \
V(Compare) \
- V(CompareJSObjectEq) \
+ V(CompareObjectEq) \
V(CompareMap) \
+ V(CompareConstantEq) \
V(Constant) \
V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(Div) \
+ V(ElementsKind) \
V(EnterInlined) \
+ V(ExternalArrayLength) \
V(FixedArrayLength) \
+ V(ForceRepresentation) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
+ V(HasCachedArrayIndex) \
+ V(HasInstanceType) \
+ V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
+ V(InvokeFunction) \
+ V(IsConstructCall) \
V(IsNull) \
V(IsObject) \
V(IsSmi) \
- V(IsConstructCall) \
- V(HasInstanceType) \
- V(HasCachedArrayIndex) \
+ V(IsUndetectable) \
V(JSArrayLength) \
- V(ClassOfTest) \
V(LeaveInlined) \
V(LoadContextSlot) \
V(LoadElements) \
+ V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
- V(LoadGlobal) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
+ V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
- V(LoadPixelArrayElement) \
- V(LoadPixelArrayExternalPointer) \
V(Mod) \
V(Mul) \
V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
- V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@@ -142,23 +148,31 @@ class LChunkBuilder;
V(Shl) \
V(Shr) \
V(Simulate) \
+ V(SoftDeoptimize) \
V(StackCheck) \
V(StoreContextSlot) \
- V(StoreGlobal) \
+ V(StoreGlobalCell) \
+ V(StoreGlobalGeneric) \
V(StoreKeyedFastElement) \
- V(StorePixelArrayElement) \
V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StringAdd) \
V(StringCharCodeAt) \
+ V(StringCharFromCode) \
V(StringLength) \
V(Sub) \
V(Test) \
+ V(ThisFunction) \
V(Throw) \
+ V(ToFastProperties) \
+ V(ToInt32) \
V(Typeof) \
V(TypeofIs) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
+ V(UseConst) \
V(ValueOf)
#define GVN_FLAG_LIST(V) \
@@ -166,105 +180,74 @@ class LChunkBuilder;
V(InobjectFields) \
V(BackingStoreFields) \
V(ArrayElements) \
- V(PixelArrayElements) \
+ V(SpecializedArrayElements) \
V(GlobalVars) \
V(Maps) \
V(ArrayLengths) \
V(ContextSlots) \
V(OsrEntries)
-#define DECLARE_INSTRUCTION(type) \
+#define DECLARE_ABSTRACT_INSTRUCTION(type) \
virtual bool Is##type() const { return true; } \
static H##type* cast(HValue* value) { \
ASSERT(value->Is##type()); \
return reinterpret_cast<H##type*>(value); \
- } \
- Opcode opcode() const { return HValue::k##type; }
+ }
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+#define DECLARE_CONCRETE_INSTRUCTION(type) \
virtual LInstruction* CompileToLithium(LChunkBuilder* builder); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- DECLARE_INSTRUCTION(type)
+ static H##type* cast(HValue* value) { \
+ ASSERT(value->Is##type()); \
+ return reinterpret_cast<H##type*>(value); \
+ } \
+ virtual Opcode opcode() const { return HValue::k##type; }
class Range: public ZoneObject {
public:
- Range() : lower_(kMinInt),
- upper_(kMaxInt),
- next_(NULL),
- can_be_minus_zero_(false) { }
+ Range()
+ : lower_(kMinInt),
+ upper_(kMaxInt),
+ next_(NULL),
+ can_be_minus_zero_(false) { }
Range(int32_t lower, int32_t upper)
- : lower_(lower), upper_(upper), next_(NULL), can_be_minus_zero_(false) { }
+ : lower_(lower),
+ upper_(upper),
+ next_(NULL),
+ can_be_minus_zero_(false) { }
- bool IsInSmiRange() const {
- return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
- }
- void KeepOrder();
- void Verify() const;
int32_t upper() const { return upper_; }
int32_t lower() const { return lower_; }
Range* next() const { return next_; }
Range* CopyClearLower() const { return new Range(kMinInt, upper_); }
Range* CopyClearUpper() const { return new Range(lower_, kMaxInt); }
- void ClearLower() { lower_ = kMinInt; }
- void ClearUpper() { upper_ = kMaxInt; }
Range* Copy() const { return new Range(lower_, upper_); }
- bool IsMostGeneric() const { return lower_ == kMinInt && upper_ == kMaxInt; }
int32_t Mask() const;
void set_can_be_minus_zero(bool b) { can_be_minus_zero_ = b; }
bool CanBeMinusZero() const { return CanBeZero() && can_be_minus_zero_; }
bool CanBeZero() const { return upper_ >= 0 && lower_ <= 0; }
bool CanBeNegative() const { return lower_ < 0; }
- bool Includes(int value) const {
- return lower_ <= value && upper_ >= value;
- }
-
- void Sar(int32_t value) {
- int32_t bits = value & 0x1F;
- lower_ = lower_ >> bits;
- upper_ = upper_ >> bits;
- set_can_be_minus_zero(false);
- }
-
- void Shl(int32_t value) {
- int32_t bits = value & 0x1F;
- int old_lower = lower_;
- int old_upper = upper_;
- lower_ = lower_ << bits;
- upper_ = upper_ << bits;
- if (old_lower != lower_ >> bits || old_upper != upper_ >> bits) {
- upper_ = kMaxInt;
- lower_ = kMinInt;
- }
- set_can_be_minus_zero(false);
+ bool Includes(int value) const { return lower_ <= value && upper_ >= value; }
+ bool IsMostGeneric() const { return lower_ == kMinInt && upper_ == kMaxInt; }
+ bool IsInSmiRange() const {
+ return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
}
-
- // Adds a constant to the lower and upper bound of the range.
- void AddConstant(int32_t value);
+ void KeepOrder();
+ void Verify() const;
void StackUpon(Range* other) {
Intersect(other);
next_ = other;
}
- void Intersect(Range* other) {
- upper_ = Min(upper_, other->upper_);
- lower_ = Max(lower_, other->lower_);
- bool b = CanBeMinusZero() && other->CanBeMinusZero();
- set_can_be_minus_zero(b);
- }
-
- void Union(Range* other) {
- upper_ = Max(upper_, other->upper_);
- lower_ = Min(lower_, other->lower_);
- bool b = CanBeMinusZero() || other->CanBeMinusZero();
- set_can_be_minus_zero(b);
- }
+ void Intersect(Range* other);
+ void Union(Range* other);
- // Compute a new result range and return true, if the operation
- // can overflow.
+ void AddConstant(int32_t value);
+ void Sar(int32_t value);
+ void Shl(int32_t value);
bool AddAndCheckOverflow(Range* other);
bool SubAndCheckOverflow(Range* other);
bool MulAndCheckOverflow(Range* other);
@@ -300,7 +283,7 @@ class Representation {
return kind_ == other.kind_;
}
- Kind kind() const { return kind_; }
+ Kind kind() const { return static_cast<Kind>(kind_); }
bool IsNone() const { return kind_ == kNone; }
bool IsTagged() const { return kind_ == kTagged; }
bool IsInteger32() const { return kind_ == kInteger32; }
@@ -314,7 +297,10 @@ class Representation {
private:
explicit Representation(Kind k) : kind_(k) { }
- Kind kind_;
+ // Make sure kind fits in int8.
+ STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
+
+ int8_t kind_;
};
@@ -417,13 +403,72 @@ class HType {
kBoolean = 0x85, // 0000 0000 1000 0101
kNonPrimitive = 0x101, // 0000 0001 0000 0001
kJSObject = 0x301, // 0000 0011 0000 0001
- kJSArray = 0x701, // 0000 0111 1000 0001
+ kJSArray = 0x701, // 0000 0111 0000 0001
kUninitialized = 0x1fff // 0001 1111 1111 1111
};
+ // Make sure type fits in int16.
+ STATIC_ASSERT(kUninitialized < (1 << (2 * kBitsPerByte)));
+
explicit HType(Type t) : type_(t) { }
- Type type_;
+ int16_t type_;
+};
+
+
+class HUseListNode: public ZoneObject {
+ public:
+ HUseListNode(HValue* value, int index, HUseListNode* tail)
+ : tail_(tail), value_(value), index_(index) {
+ }
+
+ HUseListNode* tail() const { return tail_; }
+ HValue* value() const { return value_; }
+ int index() const { return index_; }
+
+ void set_tail(HUseListNode* list) { tail_ = list; }
+
+#ifdef DEBUG
+ void Zap() {
+ tail_ = reinterpret_cast<HUseListNode*>(1);
+ value_ = NULL;
+ index_ = -1;
+ }
+#endif
+
+ private:
+ HUseListNode* tail_;
+ HValue* value_;
+ int index_;
+};
+
+
+// We reuse use list nodes behind the scenes as uses are added and deleted.
+// This class is the safe way to iterate uses while deleting them.
+class HUseIterator BASE_EMBEDDED {
+ public:
+ bool Done() { return current_ == NULL; }
+ void Advance();
+
+ HValue* value() {
+ ASSERT(!Done());
+ return value_;
+ }
+
+ int index() {
+ ASSERT(!Done());
+ return index_;
+ }
+
+ private:
+ explicit HUseIterator(HUseListNode* head);
+
+ HUseListNode* current_;
+ HUseListNode* next_;
+ HValue* value_;
+ int index_;
+
+ friend class HValue;
};
@@ -440,10 +485,15 @@ class HValue: public ZoneObject {
GVN_FLAG_LIST(DECLARE_DO)
#undef DECLARE_DO
kFlexibleRepresentation,
+ // Participate in Global Value Numbering, i.e. elimination of
+ // unnecessary recomputations. If an instruction sets this flag, it must
+ // implement DataEquals(), which will be used to determine if other
+ // occurrences of the instruction are indeed the same.
kUseGVN,
kCanOverflow,
kBailoutOnMinusZero,
kCanBeDivByZero,
+ kDeoptimizeOnUndefined,
kIsArguments,
kTruncatingToInt32,
kLastFlag = kTruncatingToInt32
@@ -474,16 +524,30 @@ class HValue: public ZoneObject {
enum Opcode {
// Declare a unique enum value for each hydrogen instruction.
- #define DECLARE_DO(type) k##type,
- HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
- #undef DECLARE_DO
- kMaxInstructionClass
+ #define DECLARE_OPCODE(type) k##type,
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kPhi
+ #undef DECLARE_OPCODE
};
+ virtual Opcode opcode() const = 0;
+
+ // Declare a non-virtual predicates for each concrete HInstruction or HValue.
+ #define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+ #undef DECLARE_PREDICATE
+ bool IsPhi() const { return opcode() == kPhi; }
+
+ // Declare virtual predicates for abstract HInstruction or HValue
+ #define DECLARE_PREDICATE(type) \
+ virtual bool Is##type() const { return false; }
+ HYDROGEN_ABSTRACT_INSTRUCTION_LIST(DECLARE_PREDICATE)
+ #undef DECLARE_PREDICATE
HValue() : block_(NULL),
id_(kNoNumber),
- uses_(2),
type_(HType::Tagged()),
+ use_list_(NULL),
range_(NULL),
flags_(0) {}
virtual ~HValue() {}
@@ -494,22 +558,24 @@ class HValue: public ZoneObject {
int id() const { return id_; }
void set_id(int id) { id_ = id; }
- const ZoneList<HValue*>* uses() const { return &uses_; }
+ HUseIterator uses() const { return HUseIterator(use_list_); }
- virtual bool EmitAtUses() const { return false; }
+ virtual bool EmitAtUses() { return false; }
Representation representation() const { return representation_; }
void ChangeRepresentation(Representation r) {
// Representation was already set and is allowed to be changed.
- ASSERT(!representation_.IsNone());
ASSERT(!r.IsNone());
ASSERT(CheckFlag(kFlexibleRepresentation));
RepresentationChanged(r);
representation_ = r;
}
+ void AssumeRepresentation(Representation r);
+
+ virtual bool IsConvertibleToInteger() const { return true; }
HType type() const { return type_; }
void set_type(HType type) {
- ASSERT(uses_.length() == 0);
+ ASSERT(HasNoUses());
type_ = type;
}
@@ -535,16 +601,14 @@ class HValue: public ZoneObject {
virtual HValue* OperandAt(int index) = 0;
void SetOperandAt(int index, HValue* value);
- int LookupOperandIndex(int occurrence_index, HValue* op);
- bool UsesMultipleTimes(HValue* op);
-
- void ReplaceAndDelete(HValue* other);
- void ReplaceValue(HValue* other);
- void ReplaceAtUse(HValue* use, HValue* other);
- void ReplaceFirstAtUse(HValue* use, HValue* other, Representation r);
- bool HasNoUses() const { return uses_.is_empty(); }
+ void DeleteAndReplaceWith(HValue* other);
+ void ReplaceAllUsesWith(HValue* other);
+ bool HasNoUses() const { return use_list_ == NULL; }
+ bool HasMultipleUses() const {
+ return use_list_ != NULL && use_list_->tail() != NULL;
+ }
+ int UseCount() const;
void ClearOperands();
- void Delete();
int flags() const { return flags_; }
void SetFlag(Flag f) { flags_ |= (1 << f); }
@@ -574,21 +638,17 @@ class HValue: public ZoneObject {
// then return it. Return NULL to have the instruction deleted.
virtual HValue* Canonicalize() { return this; }
- // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
- HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
bool Equals(HValue* other);
virtual intptr_t Hashcode();
// Printing support.
virtual void PrintTo(StringStream* stream) = 0;
void PrintNameTo(StringStream* stream);
- static void PrintTypeTo(HType type, StringStream* stream);
+ void PrintTypeTo(StringStream* stream);
+ void PrintRangeTo(StringStream* stream);
+ void PrintChangesTo(StringStream* stream);
- virtual const char* Mnemonic() const = 0;
- virtual Opcode opcode() const = 0;
+ const char* Mnemonic() const;
// Updated the inferred type of this instruction and returns true if
// it has changed.
@@ -628,7 +688,10 @@ class HValue: public ZoneObject {
return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
}
- void InternalReplaceAtUse(HValue* use, HValue* other);
+ // Remove the matching use from the use list if present. Returns the
+ // removed list node or NULL.
+ HUseListNode* RemoveUse(HValue* value, int index);
+
void RegisterUse(int index, HValue* new_value);
HBasicBlock* block_;
@@ -638,8 +701,8 @@ class HValue: public ZoneObject {
int id_;
Representation representation_;
- ZoneList<HValue*> uses_;
HType type_;
+ HUseListNode* use_list_;
Range* range_;
int flags_;
@@ -670,13 +733,9 @@ class HInstruction: public HValue {
virtual void Verify();
#endif
- // Returns whether this is some kind of deoptimizing check
- // instruction.
- virtual bool IsCheckInstruction() const { return false; }
-
virtual bool IsCall() { return false; }
- DECLARE_INSTRUCTION(Instruction)
+ DECLARE_ABSTRACT_INSTRUCTION(Instruction)
protected:
HInstruction()
@@ -694,6 +753,8 @@ class HInstruction: public HValue {
SetBlock(block);
}
+ void PrintMnemonicTo(StringStream* stream);
+
HInstruction* next_;
HInstruction* previous_;
int position_;
@@ -702,98 +763,98 @@ class HInstruction: public HValue {
};
-class HControlInstruction: public HInstruction {
+template<int V>
+class HTemplateInstruction : public HInstruction {
public:
- HControlInstruction(HBasicBlock* first, HBasicBlock* second)
- : first_successor_(first), second_successor_(second) {
- }
-
- HBasicBlock* FirstSuccessor() const { return first_successor_; }
- HBasicBlock* SecondSuccessor() const { return second_successor_; }
-
- virtual void PrintDataTo(StringStream* stream);
+ int OperandCount() { return V; }
+ HValue* OperandAt(int i) { return inputs_[i]; }
- DECLARE_INSTRUCTION(ControlInstruction)
+ protected:
+ void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
private:
- HBasicBlock* first_successor_;
- HBasicBlock* second_successor_;
+ EmbeddedContainer<HValue*, V> inputs_;
};
-template<int NumElements>
-class HOperandContainer {
+class HControlInstruction: public HInstruction {
public:
- HOperandContainer() : elems_() { }
+ virtual HBasicBlock* SuccessorAt(int i) = 0;
+ virtual int SuccessorCount() = 0;
- int length() { return NumElements; }
- HValue*& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
+ virtual void PrintDataTo(StringStream* stream);
+
+ HBasicBlock* FirstSuccessor() {
+ return SuccessorCount() > 0 ? SuccessorAt(0) : NULL;
+ }
+ HBasicBlock* SecondSuccessor() {
+ return SuccessorCount() > 1 ? SuccessorAt(1) : NULL;
}
- private:
- HValue* elems_[NumElements];
+ DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction)
};
-template<>
-class HOperandContainer<0> {
+class HSuccessorIterator BASE_EMBEDDED {
public:
- int length() { return 0; }
- HValue*& operator[](int i) {
- UNREACHABLE();
- static HValue* t = 0;
- return t;
- }
+ explicit HSuccessorIterator(HControlInstruction* instr)
+ : instr_(instr), current_(0) { }
+
+ bool Done() { return current_ >= instr_->SuccessorCount(); }
+ HBasicBlock* Current() { return instr_->SuccessorAt(current_); }
+ void Advance() { current_++; }
+
+ private:
+ HControlInstruction* instr_;
+ int current_;
};
-template<int V>
-class HTemplateInstruction : public HInstruction {
+template<int S, int V>
+class HTemplateControlInstruction: public HControlInstruction {
public:
+ int SuccessorCount() { return S; }
+ HBasicBlock* SuccessorAt(int i) { return successors_[i]; }
+
int OperandCount() { return V; }
HValue* OperandAt(int i) { return inputs_[i]; }
protected:
+ void SetSuccessorAt(int i, HBasicBlock* block) { successors_[i] = block; }
void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
private:
- HOperandContainer<V> inputs_;
+ EmbeddedContainer<HBasicBlock*, S> successors_;
+ EmbeddedContainer<HValue*, V> inputs_;
};
-template<int V>
-class HTemplateControlInstruction : public HControlInstruction {
+class HBlockEntry: public HTemplateInstruction<0> {
public:
- HTemplateControlInstruction<V>(HBasicBlock* first, HBasicBlock* second)
- : HControlInstruction(first, second) { }
- int OperandCount() { return V; }
- HValue* OperandAt(int i) { return inputs_[i]; }
-
- protected:
- void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
- private:
- HOperandContainer<V> inputs_;
+ DECLARE_CONCRETE_INSTRUCTION(BlockEntry)
};
-class HBlockEntry: public HTemplateInstruction<0> {
+// We insert soft-deoptimize when we hit code with unknown typefeedback,
+// so that we get a chance of re-optimizing with useful typefeedback.
+// HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
+class HSoftDeoptimize: public HTemplateInstruction<0> {
public:
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(BlockEntry, "block_entry")
+ DECLARE_CONCRETE_INSTRUCTION(SoftDeoptimize)
};
class HDeoptimize: public HControlInstruction {
public:
- explicit HDeoptimize(int environment_length)
- : HControlInstruction(NULL, NULL),
- values_(environment_length) { }
+ explicit HDeoptimize(int environment_length) : values_(environment_length) { }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
@@ -801,13 +862,25 @@ class HDeoptimize: public HControlInstruction {
virtual int OperandCount() { return values_.length(); }
virtual HValue* OperandAt(int index) { return values_[index]; }
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual int SuccessorCount() { return 0; }
+ virtual HBasicBlock* SuccessorAt(int i) {
+ UNREACHABLE();
+ return NULL;
+ }
void AddEnvironmentValue(HValue* value) {
values_.Add(NULL);
SetOperandAt(values_.length() - 1, value);
}
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
+
+ enum UseEnvironment {
+ kNoUses,
+ kUseAll
+ };
protected:
virtual void InternalSetOperandAt(int index, HValue* value) {
@@ -819,42 +892,33 @@ class HDeoptimize: public HControlInstruction {
};
-class HGoto: public HTemplateControlInstruction<0> {
+class HGoto: public HTemplateControlInstruction<1, 0> {
public:
- explicit HGoto(HBasicBlock* target)
- : HTemplateControlInstruction<0>(target, NULL),
- include_stack_check_(false) { }
-
- void set_include_stack_check(bool include_stack_check) {
- include_stack_check_ = include_stack_check;
- }
- bool include_stack_check() const { return include_stack_check_; }
+ explicit HGoto(HBasicBlock* target) {
+ SetSuccessorAt(0, target);
+ }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-
- private:
- bool include_stack_check_;
+ DECLARE_CONCRETE_INSTRUCTION(Goto)
};
-class HUnaryControlInstruction: public HTemplateControlInstruction<1> {
+class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
public:
- explicit HUnaryControlInstruction(HValue* value,
- HBasicBlock* true_target,
- HBasicBlock* false_target)
- : HTemplateControlInstruction<1>(true_target, false_target) {
+ HUnaryControlInstruction(HValue* value,
+ HBasicBlock* true_target,
+ HBasicBlock* false_target) {
SetOperandAt(0, value);
+ SetSuccessorAt(0, true_target);
+ SetSuccessorAt(1, false_target);
}
virtual void PrintDataTo(StringStream* stream);
HValue* value() { return OperandAt(0); }
-
- DECLARE_INSTRUCTION(UnaryControlInstruction)
};
@@ -869,7 +933,7 @@ class HTest: public HUnaryControlInstruction {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(Test, "test")
+ DECLARE_CONCRETE_INSTRUCTION(Test)
};
@@ -894,36 +958,38 @@ class HCompareMap: public HUnaryControlInstruction {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(CompareMap, "compare_map")
+ DECLARE_CONCRETE_INSTRUCTION(CompareMap)
private:
Handle<Map> map_;
};
-class HReturn: public HUnaryControlInstruction {
+class HReturn: public HTemplateControlInstruction<0, 1> {
public:
- explicit HReturn(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) {
+ explicit HReturn(HValue* value) {
+ SetOperandAt(0, value);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+ virtual void PrintDataTo(StringStream* stream);
+
+ HValue* value() { return OperandAt(0); }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return)
};
-class HAbnormalExit: public HTemplateControlInstruction<0> {
+class HAbnormalExit: public HTemplateControlInstruction<0, 0> {
public:
- HAbnormalExit() : HTemplateControlInstruction<0>(NULL, NULL) { }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(AbnormalExit, "abnormal_exit")
+ DECLARE_CONCRETE_INSTRUCTION(AbnormalExit)
};
@@ -933,10 +999,16 @@ class HUnaryOperation: public HTemplateInstruction<1> {
SetOperandAt(0, value);
}
+ static HUnaryOperation* cast(HValue* value) {
+ return reinterpret_cast<HUnaryOperation*>(value);
+ }
+
+ virtual bool CanTruncateToInt32() const {
+ return CheckFlag(kTruncatingToInt32);
+ }
+
HValue* value() { return OperandAt(0); }
virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_INSTRUCTION(UnaryOperation)
};
@@ -950,7 +1022,38 @@ class HThrow: public HUnaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+ DECLARE_CONCRETE_INSTRUCTION(Throw)
+};
+
+
+class HUseConst: public HUnaryOperation {
+ public:
+ explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(UseConst)
+};
+
+
+class HForceRepresentation: public HTemplateInstruction<1> {
+ public:
+ HForceRepresentation(HValue* value, Representation required_representation) {
+ SetOperandAt(0, value);
+ set_representation(required_representation);
+ }
+
+ HValue* value() { return OperandAt(0); }
+
+ virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return representation(); // Same as the output representation.
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
};
@@ -958,13 +1061,17 @@ class HChange: public HUnaryOperation {
public:
HChange(HValue* value,
Representation from,
- Representation to)
- : HUnaryOperation(value), from_(from), to_(to) {
+ Representation to,
+ bool is_truncating,
+ bool deoptimize_on_undefined)
+ : HUnaryOperation(value),
+ from_(from),
+ deoptimize_on_undefined_(deoptimize_on_undefined) {
ASSERT(!from.IsNone() && !to.IsNone());
ASSERT(!from.Equals(to));
set_representation(to);
SetFlag(kUseGVN);
-
+ if (is_truncating) SetFlag(kTruncatingToInt32);
if (from.IsInteger32() && to.IsTagged() && value->range() != NULL &&
value->range()->IsInSmiRange()) {
set_type(HType::Smi());
@@ -974,22 +1081,15 @@ class HChange: public HUnaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
Representation from() const { return from_; }
- Representation to() const { return to_; }
+ Representation to() const { return representation(); }
+ bool deoptimize_on_undefined() const { return deoptimize_on_undefined_; }
virtual Representation RequiredInputRepresentation(int index) const {
return from_;
}
- bool CanTruncateToInt32() const {
- for (int i = 0; i < uses()->length(); ++i) {
- if (!uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) return false;
- }
- return true;
- }
-
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(Change,
- CanTruncateToInt32() ? "truncate" : "change")
+ DECLARE_CONCRETE_INSTRUCTION(Change)
protected:
virtual bool DataEquals(HValue* other) {
@@ -997,21 +1097,91 @@ class HChange: public HUnaryOperation {
HChange* change = HChange::cast(other);
return value() == change->value()
&& to().Equals(change->to())
- && CanTruncateToInt32() == change->CanTruncateToInt32();
+ && deoptimize_on_undefined() == change->deoptimize_on_undefined();
}
private:
Representation from_;
- Representation to_;
+ bool deoptimize_on_undefined_;
+};
+
+
+class HClampToUint8: public HUnaryOperation {
+ public:
+ explicit HClampToUint8(HValue* value)
+ : HUnaryOperation(value),
+ input_rep_(Representation::None()) {
+ SetFlag(kFlexibleRepresentation);
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return input_rep_;
+ }
+
+ virtual Representation InferredRepresentation() {
+ // TODO(danno): Inference on input types should happen separately from
+ // return representation.
+ Representation new_rep = value()->representation();
+ if (input_rep_.IsNone()) {
+ if (!new_rep.IsNone()) {
+ input_rep_ = new_rep;
+ return Representation::Integer32();
+ } else {
+ return Representation::None();
+ }
+ } else {
+ return Representation::Integer32();
+ }
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ Representation input_rep_;
+};
+
+
+class HToInt32: public HUnaryOperation {
+ public:
+ explicit HToInt32(HValue* value)
+ : HUnaryOperation(value) {
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
+ virtual bool CanTruncateToInt32() const {
+ return true;
+ }
+
+ virtual HValue* Canonicalize() {
+ if (value()->representation().IsInteger32()) {
+ return value();
+ } else {
+ return this;
+ }
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToInt32)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
};
class HSimulate: public HInstruction {
public:
- HSimulate(int ast_id, int pop_count, int environment_length)
+ HSimulate(int ast_id, int pop_count)
: ast_id_(ast_id),
pop_count_(pop_count),
- environment_length_(environment_length),
values_(2),
assigned_indexes_(2) {}
virtual ~HSimulate() {}
@@ -1025,7 +1195,6 @@ class HSimulate: public HInstruction {
ast_id_ = id;
}
- int environment_length() const { return environment_length_; }
int pop_count() const { return pop_count_; }
const ZoneList<HValue*>* values() const { return &values_; }
int GetAssignedIndexAt(int index) const {
@@ -1048,7 +1217,7 @@ class HSimulate: public HInstruction {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate")
+ DECLARE_CONCRETE_INSTRUCTION(Simulate)
#ifdef DEBUG
virtual void Verify();
@@ -1071,7 +1240,6 @@ class HSimulate: public HInstruction {
}
int ast_id_;
int pop_count_;
- int environment_length_;
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
};
@@ -1079,36 +1247,61 @@ class HSimulate: public HInstruction {
class HStackCheck: public HTemplateInstruction<0> {
public:
- HStackCheck() { }
+ enum Type {
+ kFunctionEntry,
+ kBackwardsBranch
+ };
+
+ explicit HStackCheck(Type type) : type_(type) { }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack_check")
+ void Eliminate() {
+ // The stack check eliminator might try to eliminate the same stack
+ // check instruction multiple times.
+ if (IsLinked()) {
+ DeleteFromGraph();
+ }
+ }
+
+ bool is_function_entry() { return type_ == kFunctionEntry; }
+ bool is_backwards_branch() { return type_ == kBackwardsBranch; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck)
+
+ private:
+ Type type_;
};
class HEnterInlined: public HTemplateInstruction<0> {
public:
- HEnterInlined(Handle<JSFunction> closure, FunctionLiteral* function)
- : closure_(closure), function_(function) {
+ HEnterInlined(Handle<JSFunction> closure,
+ FunctionLiteral* function,
+ CallKind call_kind)
+ : closure_(closure),
+ function_(function),
+ call_kind_(call_kind) {
}
virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> closure() const { return closure_; }
FunctionLiteral* function() const { return function_; }
+ CallKind call_kind() const { return call_kind_; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(EnterInlined, "enter_inlined")
+ DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
private:
Handle<JSFunction> closure_;
FunctionLiteral* function_;
+ CallKind call_kind_;
};
@@ -1120,7 +1313,7 @@ class HLeaveInlined: public HTemplateInstruction<0> {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(LeaveInlined, "leave_inlined")
+ DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
};
@@ -1136,7 +1329,25 @@ class HPushArgument: public HUnaryOperation {
HValue* argument() { return OperandAt(0); }
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push_argument")
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument)
+};
+
+
+class HThisFunction: public HTemplateInstruction<0> {
+ public:
+ HThisFunction() {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1151,7 +1362,7 @@ class HContext: public HTemplateInstruction<0> {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(Context, "context");
+ DECLARE_CONCRETE_INSTRUCTION(Context)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -1165,7 +1376,7 @@ class HOuterContext: public HUnaryOperation {
SetFlag(kUseGVN);
}
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer_context");
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext);
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
@@ -1183,7 +1394,7 @@ class HGlobalObject: public HUnaryOperation {
SetFlag(kUseGVN);
}
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
+ DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
@@ -1202,7 +1413,7 @@ class HGlobalReceiver: public HUnaryOperation {
SetFlag(kUseGVN);
}
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
+ DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
@@ -1247,8 +1458,6 @@ class HUnaryCall: public HCall<1> {
virtual void PrintDataTo(StringStream* stream);
HValue* value() { return OperandAt(0); }
-
- DECLARE_INSTRUCTION(UnaryCall)
};
@@ -1268,8 +1477,23 @@ class HBinaryCall: public HCall<2> {
HValue* first() { return OperandAt(0); }
HValue* second() { return OperandAt(1); }
+};
+
+
+class HInvokeFunction: public HBinaryCall {
+ public:
+ HInvokeFunction(HValue* context, HValue* function, int argument_count)
+ : HBinaryCall(context, function, argument_count) {
+ }
- DECLARE_INSTRUCTION(BinaryCall)
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ HValue* context() { return first(); }
+ HValue* function() { return second(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
};
@@ -1281,7 +1505,8 @@ class HCallConstantFunction: public HCall<0> {
Handle<JSFunction> function() const { return function_; }
bool IsApplyFunction() const {
- return function_->code() == Builtins::builtin(Builtins::FunctionApply);
+ return function_->code() ==
+ Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply);
}
virtual void PrintDataTo(StringStream* stream);
@@ -1290,7 +1515,7 @@ class HCallConstantFunction: public HCall<0> {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call_constant_function")
+ DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction)
private:
Handle<JSFunction> function_;
@@ -1310,7 +1535,7 @@ class HCallKeyed: public HBinaryCall {
HValue* context() { return first(); }
HValue* key() { return second(); }
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call_keyed")
+ DECLARE_CONCRETE_INSTRUCTION(CallKeyed)
};
@@ -1325,7 +1550,7 @@ class HCallNamed: public HUnaryCall {
HValue* context() { return value(); }
Handle<String> name() const { return name_; }
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call_named")
+ DECLARE_CONCRETE_INSTRUCTION(CallNamed)
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
@@ -1348,7 +1573,7 @@ class HCallFunction: public HUnaryCall {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call_function")
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction)
};
@@ -1367,7 +1592,7 @@ class HCallGlobal: public HUnaryCall {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call_global")
+ DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
private:
Handle<String> name_;
@@ -1387,7 +1612,7 @@ class HCallKnownGlobal: public HCall<0> {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call_known_global")
+ DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal)
private:
Handle<JSFunction> target_;
@@ -1407,29 +1632,29 @@ class HCallNew: public HBinaryCall {
HValue* context() { return first(); }
HValue* constructor() { return second(); }
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call_new")
+ DECLARE_CONCRETE_INSTRUCTION(CallNew)
};
class HCallRuntime: public HCall<0> {
public:
HCallRuntime(Handle<String> name,
- Runtime::Function* c_function,
+ const Runtime::Function* c_function,
int argument_count)
: HCall<0>(argument_count), c_function_(c_function), name_(name) { }
virtual void PrintDataTo(StringStream* stream);
- Runtime::Function* function() const { return c_function_; }
+ const Runtime::Function* function() const { return c_function_; }
Handle<String> name() const { return name_; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call_runtime")
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime)
private:
- Runtime::Function* c_function_;
+ const Runtime::Function* c_function_;
Handle<String> name_;
};
@@ -1450,7 +1675,7 @@ class HJSArrayLength: public HUnaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length")
+ DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -1469,16 +1694,16 @@ class HFixedArrayLength: public HUnaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length")
+ DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength)
protected:
virtual bool DataEquals(HValue* other) { return true; }
};
-class HPixelArrayLength: public HUnaryOperation {
+class HExternalArrayLength: public HUnaryOperation {
public:
- explicit HPixelArrayLength(HValue* value) : HUnaryOperation(value) {
+ explicit HExternalArrayLength(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Integer32());
// The result of this instruction is idempotent as long as its inputs don't
// change. The length of a pixel array cannot change once set, so it's not
@@ -1490,7 +1715,26 @@ class HPixelArrayLength: public HUnaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel_array_length")
+ DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
+class HElementsKind: public HUnaryOperation {
+ public:
+ explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ SetFlag(kDependsOnMaps);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -1510,7 +1754,7 @@ class HBitNot: public HUnaryOperation {
}
virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not")
+ DECLARE_CONCRETE_INSTRUCTION(BitNot)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -1582,7 +1826,7 @@ class HUnaryMathOperation: public HUnaryOperation {
BuiltinFunctionId op() const { return op_; }
const char* OpName() const;
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary_math_operation")
+ DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation)
protected:
virtual bool DataEquals(HValue* other) {
@@ -1607,20 +1851,20 @@ class HLoadElements: public HUnaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+ DECLARE_CONCRETE_INSTRUCTION(LoadElements)
protected:
virtual bool DataEquals(HValue* other) { return true; }
};
-class HLoadPixelArrayExternalPointer: public HUnaryOperation {
+class HLoadExternalArrayPointer: public HUnaryOperation {
public:
- explicit HLoadPixelArrayExternalPointer(HValue* value)
+ explicit HLoadExternalArrayPointer(HValue* value)
: HUnaryOperation(value) {
set_representation(Representation::External());
// The result of this instruction is idempotent as long as its inputs don't
- // change. The external array of a pixel array elements object cannot
+ // change. The external array of a specialized array elements object cannot
// change once set, so it's no necessary to introduce any additional
// dependencies on top of the inputs.
SetFlag(kUseGVN);
@@ -1630,8 +1874,7 @@ class HLoadPixelArrayExternalPointer: public HUnaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
- "load-pixel-array-external-pointer")
+ DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -1647,8 +1890,6 @@ class HCheckMap: public HUnaryOperation {
SetFlag(kDependsOnMaps);
}
- virtual bool IsCheckInstruction() const { return true; }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1661,7 +1902,7 @@ class HCheckMap: public HUnaryOperation {
Handle<Map> map() const { return map_; }
- DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check_map")
+ DECLARE_CONCRETE_INSTRUCTION(CheckMap)
protected:
virtual bool DataEquals(HValue* other) {
@@ -1682,8 +1923,6 @@ class HCheckFunction: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsCheckInstruction() const { return true; }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1696,7 +1935,7 @@ class HCheckFunction: public HUnaryOperation {
Handle<JSFunction> target() const { return target_; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check_function")
+ DECLARE_CONCRETE_INSTRUCTION(CheckFunction)
protected:
virtual bool DataEquals(HValue* other) {
@@ -1711,22 +1950,18 @@ class HCheckFunction: public HUnaryOperation {
class HCheckInstanceType: public HUnaryOperation {
public:
- // Check that the instance type is in the range [first, last] where
- // both first and last are included.
- HCheckInstanceType(HValue* value, InstanceType first, InstanceType last)
- : HUnaryOperation(value), first_(first), last_(last) {
- ASSERT(first <= last);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- if ((FIRST_STRING_TYPE < first && last <= LAST_STRING_TYPE) ||
- (FIRST_STRING_TYPE <= first && last < LAST_STRING_TYPE)) {
- // A particular string instance type can change because of GC or
- // externalization, but the value still remains a string.
- SetFlag(kDependsOnMaps);
- }
+ static HCheckInstanceType* NewIsSpecObject(HValue* value) {
+ return new HCheckInstanceType(value, IS_SPEC_OBJECT);
+ }
+ static HCheckInstanceType* NewIsJSArray(HValue* value) {
+ return new HCheckInstanceType(value, IS_JS_ARRAY);
+ }
+ static HCheckInstanceType* NewIsString(HValue* value) {
+ return new HCheckInstanceType(value, IS_STRING);
+ }
+ static HCheckInstanceType* NewIsSymbol(HValue* value) {
+ return new HCheckInstanceType(value, IS_SYMBOL);
}
-
- virtual bool IsCheckInstruction() const { return true; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
@@ -1736,12 +1971,20 @@ class HCheckInstanceType: public HUnaryOperation {
virtual void Verify();
#endif
- static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value);
+ virtual HValue* Canonicalize() {
+ if (!value()->type().IsUninitialized() &&
+ value()->type().IsString() &&
+ check_ == IS_STRING) {
+ return NULL;
+ }
+ return this;
+ }
- InstanceType first() const { return first_; }
- InstanceType last() const { return last_; }
+ bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
+ void GetCheckInterval(InstanceType* first, InstanceType* last);
+ void GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag);
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check_instance_type")
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType)
protected:
// TODO(ager): It could be nice to allow the ommision of instance
@@ -1749,12 +1992,25 @@ class HCheckInstanceType: public HUnaryOperation {
// with a larger range.
virtual bool DataEquals(HValue* other) {
HCheckInstanceType* b = HCheckInstanceType::cast(other);
- return (first_ == b->first()) && (last_ == b->last());
+ return check_ == b->check_;
}
private:
- InstanceType first_;
- InstanceType last_;
+ enum Check {
+ IS_SPEC_OBJECT,
+ IS_JS_ARRAY,
+ IS_STRING,
+ IS_SYMBOL,
+ LAST_INTERVAL_CHECK = IS_JS_ARRAY
+ };
+
+ HCheckInstanceType(HValue* value, Check check)
+ : HUnaryOperation(value), check_(check) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ const Check check_;
};
@@ -1765,8 +2021,6 @@ class HCheckNonSmi: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsCheckInstruction() const { return true; }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1777,7 +2031,19 @@ class HCheckNonSmi: public HUnaryOperation {
virtual void Verify();
#endif
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi")
+ virtual HValue* Canonicalize() {
+ HType value_type = value()->type();
+ if (!value_type.IsUninitialized() &&
+ (value_type.IsHeapNumber() ||
+ value_type.IsString() ||
+ value_type.IsBoolean() ||
+ value_type.IsNonPrimitive())) {
+ return NULL;
+ }
+ return this;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -1792,8 +2058,6 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
SetFlag(kDependsOnMaps);
}
- virtual bool IsCheckInstruction() const { return true; }
-
#ifdef DEBUG
virtual void Verify();
#endif
@@ -1801,14 +2065,14 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
Handle<JSObject> prototype() const { return prototype_; }
Handle<JSObject> holder() const { return holder_; }
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check_prototype_maps")
+ DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
virtual intptr_t Hashcode() {
- ASSERT(!Heap::IsAllocationAllowed());
+ ASSERT(!HEAP->IsAllocationAllowed());
intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
return hash;
@@ -1834,8 +2098,6 @@ class HCheckSmi: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsCheckInstruction() const { return true; }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1845,7 +2107,7 @@ class HCheckSmi: public HUnaryOperation {
virtual void Verify();
#endif
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi")
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -1857,7 +2119,9 @@ class HPhi: public HValue {
explicit HPhi(int merged_index)
: inputs_(2),
merged_index_(merged_index),
- phi_id_(-1) {
+ phi_id_(-1),
+ is_live_(false),
+ is_convertible_to_integer_(true) {
for (int i = 0; i < Representation::kNumRepresentations; i++) {
non_phi_uses_[i] = 0;
indirect_uses_[i] = 0;
@@ -1891,21 +2155,18 @@ class HPhi: public HValue {
virtual HValue* OperandAt(int index) { return inputs_[index]; }
HValue* GetRedundantReplacement();
void AddInput(HValue* value);
+ bool HasRealUses();
bool IsReceiver() { return merged_index_ == 0; }
int merged_index() const { return merged_index_; }
- virtual const char* Mnemonic() const { return "phi"; }
-
virtual void PrintTo(StringStream* stream);
#ifdef DEBUG
virtual void Verify();
#endif
- DECLARE_INSTRUCTION(Phi)
-
void InitRealUses(int id);
void AddNonPhiUsesFrom(HPhi* other);
void AddIndirectUsesTo(int* use_count);
@@ -1929,6 +2190,22 @@ class HPhi: public HValue {
return indirect_uses_[Representation::kDouble];
}
int phi_id() { return phi_id_; }
+ bool is_live() { return is_live_; }
+ void set_is_live(bool b) { is_live_ = b; }
+
+ static HPhi* cast(HValue* value) {
+ ASSERT(value->IsPhi());
+ return reinterpret_cast<HPhi*>(value);
+ }
+ virtual Opcode opcode() const { return HValue::kPhi; }
+
+ virtual bool IsConvertibleToInteger() const {
+ return is_convertible_to_integer_;
+ }
+
+ void set_is_convertible_to_integer(bool b) {
+ is_convertible_to_integer_ = b;
+ }
protected:
virtual void DeleteFromGraph();
@@ -1943,6 +2220,8 @@ class HPhi: public HValue {
int non_phi_uses_[Representation::kNumRepresentations];
int indirect_uses_[Representation::kNumRepresentations];
int phi_id_;
+ bool is_live_;
+ bool is_convertible_to_integer_;
};
@@ -1957,7 +2236,7 @@ class HArgumentsObject: public HTemplateInstruction<0> {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject, "arguments-object")
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
};
@@ -1967,13 +2246,21 @@ class HConstant: public HTemplateInstruction<0> {
Handle<Object> handle() const { return handle_; }
- bool InOldSpace() const { return !Heap::InNewSpace(*handle_); }
+ bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- virtual bool EmitAtUses() const { return !representation().IsDouble(); }
+ virtual bool IsConvertibleToInteger() const {
+ if (handle_->IsSmi()) return true;
+ if (handle_->IsHeapNumber() &&
+ (HeapNumber::cast(*handle_)->value() ==
+ static_cast<double>(NumberToInt32(*handle_)))) return true;
+ return false;
+ }
+
+ virtual bool EmitAtUses() { return !representation().IsDouble(); }
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
bool IsInteger() const { return handle_->IsSmi(); }
@@ -1991,8 +2278,10 @@ class HConstant: public HTemplateInstruction<0> {
}
bool HasStringValue() const { return handle_->IsString(); }
+ bool ToBoolean() const;
+
virtual intptr_t Hashcode() {
- ASSERT(!Heap::allow_allocation(false));
+ ASSERT(!HEAP->allow_allocation(false));
return reinterpret_cast<intptr_t>(*handle());
}
@@ -2000,7 +2289,7 @@ class HConstant: public HTemplateInstruction<0> {
virtual void Verify() { }
#endif
- DECLARE_CONCRETE_INSTRUCTION(Constant, "constant")
+ DECLARE_CONCRETE_INSTRUCTION(Constant)
protected:
virtual Range* InferRange();
@@ -2012,14 +2301,13 @@ class HConstant: public HTemplateInstruction<0> {
private:
Handle<Object> handle_;
- HType constant_type_;
// The following two values represent the int32 and the double value of the
// given constant if there is a lossless conversion between the constant
// and the specific representation.
- bool has_int32_value_;
+ bool has_int32_value_ : 1;
+ bool has_double_value_ : 1;
int32_t int32_value_;
- bool has_double_value_;
double double_value_;
};
@@ -2049,8 +2337,6 @@ class HBinaryOperation: public HTemplateInstruction<2> {
virtual bool IsCommutative() const { return false; }
virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_INSTRUCTION(BinaryOperation)
};
@@ -2080,7 +2366,7 @@ class HApplyArguments: public HTemplateInstruction<4> {
HValue* length() { return OperandAt(2); }
HValue* elements() { return OperandAt(3); }
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply_arguments")
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments)
};
@@ -2093,7 +2379,7 @@ class HArgumentsElements: public HTemplateInstruction<0> {
SetFlag(kUseGVN);
}
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements")
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
@@ -2115,7 +2401,7 @@ class HArgumentsLength: public HUnaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length")
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2145,7 +2431,7 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> {
HValue* length() { return OperandAt(1); }
HValue* index() { return OperandAt(2); }
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access_arguments_at")
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt)
virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2155,11 +2441,10 @@ class HBoundsCheck: public HBinaryOperation {
public:
HBoundsCheck(HValue* index, HValue* length)
: HBinaryOperation(index, length) {
+ set_representation(Representation::Integer32());
SetFlag(kUseGVN);
}
- virtual bool IsCheckInstruction() const { return true; }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Integer32();
}
@@ -2171,7 +2456,7 @@ class HBoundsCheck: public HBinaryOperation {
HValue* index() { return left(); }
HValue* length() { return right(); }
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check")
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2202,7 +2487,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
virtual HType CalculateInferredType();
- DECLARE_INSTRUCTION(BitwiseBinaryOperation)
+ DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
};
@@ -2232,8 +2517,6 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
}
return HValue::InferredRepresentation();
}
-
- DECLARE_INSTRUCTION(ArithmeticBinaryOperation)
};
@@ -2248,8 +2531,8 @@ class HCompare: public HBinaryOperation {
void SetInputRepresentation(Representation r);
- virtual bool EmitAtUses() const {
- return !HasSideEffects() && (uses()->length() <= 1);
+ virtual bool EmitAtUses() {
+ return !HasSideEffects() && !HasMultipleUses();
}
virtual Representation RequiredInputRepresentation(int index) const {
@@ -2267,7 +2550,7 @@ class HCompare: public HBinaryOperation {
return HValue::Hashcode() * 7 + token_;
}
- DECLARE_CONCRETE_INSTRUCTION(Compare, "compare")
+ DECLARE_CONCRETE_INSTRUCTION(Compare)
protected:
virtual bool DataEquals(HValue* other) {
@@ -2281,17 +2564,17 @@ class HCompare: public HBinaryOperation {
};
-class HCompareJSObjectEq: public HBinaryOperation {
+class HCompareObjectEq: public HBinaryOperation {
public:
- HCompareJSObjectEq(HValue* left, HValue* right)
+ HCompareObjectEq(HValue* left, HValue* right)
: HBinaryOperation(left, right) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnMaps);
}
- virtual bool EmitAtUses() const {
- return !HasSideEffects() && (uses()->length() <= 1);
+ virtual bool EmitAtUses() {
+ return !HasSideEffects() && !HasMultipleUses();
}
virtual Representation RequiredInputRepresentation(int index) const {
@@ -2299,13 +2582,50 @@ class HCompareJSObjectEq: public HBinaryOperation {
}
virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CompareObjectEq)
protected:
virtual bool DataEquals(HValue* other) { return true; }
};
+class HCompareConstantEq: public HUnaryOperation {
+ public:
+ HCompareConstantEq(HValue* left, int right, Token::Value op)
+ : HUnaryOperation(left), op_(op), right_(right) {
+ ASSERT(op == Token::EQ_STRICT);
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ Token::Value op() const { return op_; }
+ int right() const { return right_; }
+
+ virtual bool EmitAtUses() {
+ return !HasSideEffects() && !HasMultipleUses();
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Integer32();
+ }
+
+ virtual HType CalculateInferredType() { return HType::Boolean(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareConstantEq);
+
+ protected:
+ virtual bool DataEquals(HValue* other) {
+ HCompareConstantEq* other_instr = HCompareConstantEq::cast(other);
+ return (op_ == other_instr->op_ &&
+ right_ == other_instr->right_);
+ }
+
+ private:
+ const Token::Value op_;
+ const int right_;
+};
+
+
class HUnaryPredicate: public HUnaryOperation {
public:
explicit HUnaryPredicate(HValue* value) : HUnaryOperation(value) {
@@ -2313,8 +2633,8 @@ class HUnaryPredicate: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool EmitAtUses() const {
- return !HasSideEffects() && (uses()->length() <= 1);
+ virtual bool EmitAtUses() {
+ return !HasSideEffects() && !HasMultipleUses();
}
virtual Representation RequiredInputRepresentation(int index) const {
@@ -2331,7 +2651,7 @@ class HIsNull: public HUnaryPredicate {
bool is_strict() const { return is_strict_; }
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is_null")
+ DECLARE_CONCRETE_INSTRUCTION(IsNull)
protected:
virtual bool DataEquals(HValue* other) {
@@ -2348,7 +2668,7 @@ class HIsObject: public HUnaryPredicate {
public:
explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object")
+ DECLARE_CONCRETE_INSTRUCTION(IsObject)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2359,7 +2679,18 @@ class HIsSmi: public HUnaryPredicate {
public:
explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi")
+ DECLARE_CONCRETE_INSTRUCTION(IsSmi)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
+class HIsUndetectable: public HUnaryPredicate {
+ public:
+ explicit HIsUndetectable(HValue* value) : HUnaryPredicate(value) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectable)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2373,15 +2704,15 @@ class HIsConstructCall: public HTemplateInstruction<0> {
SetFlag(kUseGVN);
}
- virtual bool EmitAtUses() const {
- return !HasSideEffects() && (uses()->length() <= 1);
+ virtual bool EmitAtUses() {
+ return !HasSideEffects() && !HasMultipleUses();
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is_construct_call")
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCall)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2400,9 +2731,13 @@ class HHasInstanceType: public HUnaryPredicate {
InstanceType from() { return from_; }
InstanceType to() { return to_; }
+ virtual bool EmitAtUses() {
+ return !HasSideEffects() && !HasMultipleUses();
+ }
+
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has_instance_type")
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceType)
protected:
virtual bool DataEquals(HValue* other) {
@@ -2420,18 +2755,25 @@ class HHasCachedArrayIndex: public HUnaryPredicate {
public:
explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index")
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex)
protected:
virtual bool DataEquals(HValue* other) { return true; }
};
-class HGetCachedArrayIndex: public HUnaryPredicate {
+class HGetCachedArrayIndex: public HUnaryOperation {
public:
- explicit HGetCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
+ explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get_cached_array_index")
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2443,7 +2785,7 @@ class HClassOfTest: public HUnaryPredicate {
HClassOfTest(HValue* value, Handle<String> class_name)
: HUnaryPredicate(value), class_name_(class_name) { }
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class_of_test")
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTest)
virtual void PrintDataTo(StringStream* stream);
@@ -2468,7 +2810,7 @@ class HTypeofIs: public HUnaryPredicate {
Handle<String> type_literal() { return type_literal_; }
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof_is")
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIs)
protected:
virtual bool DataEquals(HValue* other) {
@@ -2495,17 +2837,15 @@ class HInstanceOf: public HTemplateInstruction<3> {
HValue* left() { return OperandAt(1); }
HValue* right() { return OperandAt(2); }
- virtual bool EmitAtUses() const {
- return !HasSideEffects() && (uses()->length() <= 1);
- }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
+ virtual HType CalculateInferredType();
+
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance_of")
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
};
@@ -2523,8 +2863,9 @@ class HInstanceOfKnownGlobal: public HUnaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance_of_known_global")
+ virtual HType CalculateInferredType();
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal)
private:
Handle<JSFunction> function_;
@@ -2543,7 +2884,7 @@ class HPower: public HBinaryOperation {
return (index == 1) ? Representation::None() : Representation::Double();
}
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_CONCRETE_INSTRUCTION(Power)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2566,7 +2907,7 @@ class HAdd: public HArithmeticBinaryOperation {
virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(Add, "add")
+ DECLARE_CONCRETE_INSTRUCTION(Add)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2583,7 +2924,7 @@ class HSub: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- DECLARE_CONCRETE_INSTRUCTION(Sub, "sub")
+ DECLARE_CONCRETE_INSTRUCTION(Sub)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2605,7 +2946,7 @@ class HMul: public HArithmeticBinaryOperation {
return !representation().IsTagged();
}
- DECLARE_CONCRETE_INSTRUCTION(Mul, "mul")
+ DECLARE_CONCRETE_INSTRUCTION(Mul)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2620,9 +2961,19 @@ class HMod: public HArithmeticBinaryOperation {
SetFlag(kCanBeDivByZero);
}
+ bool HasPowerOf2Divisor() {
+ if (right()->IsConstant() &&
+ HConstant::cast(right())->HasInteger32Value()) {
+ int32_t value = HConstant::cast(right())->Integer32Value();
+ return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
+ }
+
+ return false;
+ }
+
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- DECLARE_CONCRETE_INSTRUCTION(Mod, "mod")
+ DECLARE_CONCRETE_INSTRUCTION(Mod)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2640,7 +2991,7 @@ class HDiv: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- DECLARE_CONCRETE_INSTRUCTION(Div, "div")
+ DECLARE_CONCRETE_INSTRUCTION(Div)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2657,7 +3008,7 @@ class HBitAnd: public HBitwiseBinaryOperation {
virtual bool IsCommutative() const { return true; }
virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and")
+ DECLARE_CONCRETE_INSTRUCTION(BitAnd)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2674,7 +3025,7 @@ class HBitXor: public HBitwiseBinaryOperation {
virtual bool IsCommutative() const { return true; }
virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor")
+ DECLARE_CONCRETE_INSTRUCTION(BitXor)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2689,7 +3040,7 @@ class HBitOr: public HBitwiseBinaryOperation {
virtual bool IsCommutative() const { return true; }
virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or")
+ DECLARE_CONCRETE_INSTRUCTION(BitOr)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2706,7 +3057,7 @@ class HShl: public HBitwiseBinaryOperation {
virtual Range* InferRange();
virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(Shl, "shl")
+ DECLARE_CONCRETE_INSTRUCTION(Shl)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2720,7 +3071,7 @@ class HShr: public HBitwiseBinaryOperation {
virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(Shr, "shr")
+ DECLARE_CONCRETE_INSTRUCTION(Shr)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2735,7 +3086,7 @@ class HSar: public HBitwiseBinaryOperation {
virtual Range* InferRange();
virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(Sar, "sar")
+ DECLARE_CONCRETE_INSTRUCTION(Sar)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -2754,7 +3105,7 @@ class HOsrEntry: public HTemplateInstruction<0> {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr_entry")
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry)
private:
int ast_id_;
@@ -2775,7 +3126,7 @@ class HParameter: public HTemplateInstruction<0> {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+ DECLARE_CONCRETE_INSTRUCTION(Parameter)
private:
unsigned index_;
@@ -2807,7 +3158,7 @@ class HCallStub: public HUnaryCall {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call_stub")
+ DECLARE_CONCRETE_INSTRUCTION(CallStub)
private:
CodeStub::Major major_key_;
@@ -2823,13 +3174,13 @@ class HUnknownOSRValue: public HTemplateInstruction<0> {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown_osr_value")
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
};
-class HLoadGlobal: public HTemplateInstruction<0> {
+class HLoadGlobalCell: public HTemplateInstruction<0> {
public:
- HLoadGlobal(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
+ HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
: cell_(cell), check_hole_value_(check_hole_value) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
@@ -2842,7 +3193,7 @@ class HLoadGlobal: public HTemplateInstruction<0> {
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
- ASSERT(!Heap::allow_allocation(false));
+ ASSERT(!HEAP->allow_allocation(false));
return reinterpret_cast<intptr_t>(*cell_);
}
@@ -2850,11 +3201,11 @@ class HLoadGlobal: public HTemplateInstruction<0> {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load_global")
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell)
protected:
virtual bool DataEquals(HValue* other) {
- HLoadGlobal* b = HLoadGlobal::cast(other);
+ HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
return cell_.is_identical_to(b->cell());
}
@@ -2864,11 +3215,43 @@ class HLoadGlobal: public HTemplateInstruction<0> {
};
-class HStoreGlobal: public HUnaryOperation {
+class HLoadGlobalGeneric: public HBinaryOperation {
+ public:
+ HLoadGlobalGeneric(HValue* context,
+ HValue* global_object,
+ Handle<Object> name,
+ bool for_typeof)
+ : HBinaryOperation(context, global_object),
+ name_(name),
+ for_typeof_(for_typeof) {
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
+ HValue* context() { return OperandAt(0); }
+ HValue* global_object() { return OperandAt(1); }
+ Handle<Object> name() const { return name_; }
+ bool for_typeof() const { return for_typeof_; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
+
+ private:
+ Handle<Object> name_;
+ bool for_typeof_;
+};
+
+
+class HStoreGlobalCell: public HUnaryOperation {
public:
- HStoreGlobal(HValue* value,
- Handle<JSGlobalPropertyCell> cell,
- bool check_hole_value)
+ HStoreGlobalCell(HValue* value,
+ Handle<JSGlobalPropertyCell> cell,
+ bool check_hole_value)
: HUnaryOperation(value),
cell_(cell),
check_hole_value_(check_hole_value) {
@@ -2883,7 +3266,7 @@ class HStoreGlobal: public HUnaryOperation {
}
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store_global")
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell)
private:
Handle<JSGlobalPropertyCell> cell_;
@@ -2891,6 +3274,42 @@ class HStoreGlobal: public HUnaryOperation {
};
+class HStoreGlobalGeneric: public HTemplateInstruction<3> {
+ public:
+ HStoreGlobalGeneric(HValue* context,
+ HValue* global_object,
+ Handle<Object> name,
+ HValue* value,
+ bool strict_mode)
+ : name_(name),
+ strict_mode_(strict_mode) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, global_object);
+ SetOperandAt(2, value);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
+ HValue* context() { return OperandAt(0); }
+ HValue* global_object() { return OperandAt(1); }
+ Handle<Object> name() const { return name_; }
+ HValue* value() { return OperandAt(2); }
+ bool strict_mode() { return strict_mode_; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric)
+
+ private:
+ Handle<Object> name_;
+ bool strict_mode_;
+};
+
+
class HLoadContextSlot: public HUnaryOperation {
public:
HLoadContextSlot(HValue* context , int slot_index)
@@ -2908,7 +3327,7 @@ class HLoadContextSlot: public HUnaryOperation {
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot)
protected:
virtual bool DataEquals(HValue* other) {
@@ -2948,7 +3367,7 @@ class HStoreContextSlot: public HBinaryOperation {
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store_context_slot")
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot)
private:
int slot_index_;
@@ -2980,7 +3399,7 @@ class HLoadNamedField: public HUnaryOperation {
}
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load_named_field")
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
protected:
virtual bool DataEquals(HValue* other) {
@@ -2994,6 +3413,36 @@ class HLoadNamedField: public HUnaryOperation {
};
+class HLoadNamedFieldPolymorphic: public HUnaryOperation {
+ public:
+ HLoadNamedFieldPolymorphic(HValue* object,
+ ZoneMapList* types,
+ Handle<String> name);
+
+ HValue* object() { return OperandAt(0); }
+ ZoneMapList* types() { return &types_; }
+ Handle<String> name() { return name_; }
+ bool need_generic() { return need_generic_; }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedFieldPolymorphic)
+
+ static const int kMaxLoadPolymorphism = 4;
+
+ protected:
+ virtual bool DataEquals(HValue* value);
+
+ private:
+ ZoneMapList types_;
+ Handle<String> name_;
+ bool need_generic_;
+};
+
+
+
class HLoadNamedGeneric: public HBinaryOperation {
public:
HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
@@ -3010,7 +3459,7 @@ class HLoadNamedGeneric: public HBinaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load_named_generic")
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
private:
Handle<Object> name_;
@@ -3032,7 +3481,7 @@ class HLoadFunctionPrototype: public HUnaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load_function_prototype")
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -3058,21 +3507,30 @@ class HLoadKeyedFastElement: public HBinaryOperation {
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement,
- "load_keyed_fast_element")
+ bool RequiresHoleCheck() const;
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
protected:
virtual bool DataEquals(HValue* other) { return true; }
};
-class HLoadPixelArrayElement: public HBinaryOperation {
+class HLoadKeyedSpecializedArrayElement: public HBinaryOperation {
public:
- HLoadPixelArrayElement(HValue* external_elements, HValue* key)
- : HBinaryOperation(external_elements, key) {
- set_representation(Representation::Integer32());
- SetFlag(kDependsOnPixelArrayElements);
- // Native code could change the pixel array.
+ HLoadKeyedSpecializedArrayElement(HValue* external_elements,
+ HValue* key,
+ JSObject::ElementsKind elements_kind)
+ : HBinaryOperation(external_elements, key),
+ elements_kind_(elements_kind) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ set_representation(Representation::Double());
+ } else {
+ set_representation(Representation::Integer32());
+ }
+ SetFlag(kDependsOnSpecializedArrayElements);
+ // Native code could change the specialized array.
SetFlag(kDependsOnCalls);
SetFlag(kUseGVN);
}
@@ -3088,18 +3546,26 @@ class HLoadPixelArrayElement: public HBinaryOperation {
HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
+ JSObject::ElementsKind elements_kind() const { return elements_kind_; }
- DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
- "load_pixel_array_element")
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) {
+ if (!other->IsLoadKeyedSpecializedArrayElement()) return false;
+ HLoadKeyedSpecializedArrayElement* cast_other =
+ HLoadKeyedSpecializedArrayElement::cast(other);
+ return elements_kind_ == cast_other->elements_kind();
+ }
+
+ private:
+ JSObject::ElementsKind elements_kind_;
};
class HLoadKeyedGeneric: public HTemplateInstruction<3> {
public:
- HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key) {
+ HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) {
set_representation(Representation::Tagged());
SetOperandAt(0, obj);
SetOperandAt(1, key);
@@ -3117,7 +3583,7 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
};
@@ -3139,7 +3605,7 @@ class HStoreNamedField: public HBinaryOperation {
}
}
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store_named_field")
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
@@ -3172,8 +3638,10 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
HStoreNamedGeneric(HValue* context,
HValue* object,
Handle<String> name,
- HValue* value)
- : name_(name) {
+ HValue* value,
+ bool strict_mode)
+ : name_(name),
+ strict_mode_(strict_mode) {
SetOperandAt(0, object);
SetOperandAt(1, value);
SetOperandAt(2, context);
@@ -3184,6 +3652,7 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
HValue* value() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
Handle<String> name() { return name_; }
+ bool strict_mode() { return strict_mode_; }
virtual void PrintDataTo(StringStream* stream);
@@ -3191,10 +3660,11 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
private:
Handle<String> name_;
+ bool strict_mode_;
};
@@ -3223,15 +3693,18 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> {
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store_keyed_fast_element")
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
};
-class HStorePixelArrayElement: public HTemplateInstruction<3> {
+class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
public:
- HStorePixelArrayElement(HValue* external_elements, HValue* key, HValue* val) {
- SetFlag(kChangesPixelArrayElements);
+ HStoreKeyedSpecializedArrayElement(HValue* external_elements,
+ HValue* key,
+ HValue* val,
+ JSObject::ElementsKind elements_kind)
+ : elements_kind_(elements_kind) {
+ SetFlag(kChangesSpecializedArrayElements);
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
SetOperandAt(2, val);
@@ -3243,16 +3716,26 @@ class HStorePixelArrayElement: public HTemplateInstruction<3> {
if (index == 0) {
return Representation::External();
} else {
- return Representation::Integer32();
+ bool float_or_double_elements =
+ elements_kind() == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind() == JSObject::EXTERNAL_DOUBLE_ELEMENTS;
+ if (index == 2 && float_or_double_elements) {
+ return Representation::Double();
+ } else {
+ return Representation::Integer32();
+ }
}
}
HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
+ JSObject::ElementsKind elements_kind() const { return elements_kind_; }
- DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement,
- "store_pixel_array_element")
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
+
+ private:
+ JSObject::ElementsKind elements_kind_;
};
@@ -3261,7 +3744,9 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
HStoreKeyedGeneric(HValue* context,
HValue* object,
HValue* key,
- HValue* value) {
+ HValue* value,
+ bool strict_mode)
+ : strict_mode_(strict_mode) {
SetOperandAt(0, object);
SetOperandAt(1, key);
SetOperandAt(2, value);
@@ -3273,6 +3758,7 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
HValue* context() { return OperandAt(3); }
+ bool strict_mode() { return strict_mode_; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
@@ -3280,7 +3766,33 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
+
+ private:
+ bool strict_mode_;
+};
+
+
+class HStringAdd: public HBinaryOperation {
+ public:
+ HStringAdd(HValue* left, HValue* right) : HBinaryOperation(left, right) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetFlag(kDependsOnMaps);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ virtual HType CalculateInferredType() {
+ return HType::String();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -3302,7 +3814,7 @@ class HStringCharCodeAt: public HBinaryOperation {
HValue* string() { return OperandAt(0); }
HValue* index() { return OperandAt(1); }
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at")
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -3313,6 +3825,23 @@ class HStringCharCodeAt: public HBinaryOperation {
};
+class HStringCharFromCode: public HUnaryOperation {
+ public:
+ explicit HStringCharFromCode(HValue* char_code) : HUnaryOperation(char_code) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Integer32();
+ }
+
+ virtual bool DataEquals(HValue* other) { return true; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
+};
+
+
class HStringLength: public HUnaryOperation {
public:
explicit HStringLength(HValue* string) : HUnaryOperation(string) {
@@ -3330,7 +3859,7 @@ class HStringLength: public HUnaryOperation {
return HType::Smi();
}
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length")
+ DECLARE_CONCRETE_INSTRUCTION(StringLength)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -3377,7 +3906,7 @@ class HArrayLiteral: public HMaterializedLiteral<0> {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array_literal")
+ DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
private:
int length_;
@@ -3391,10 +3920,12 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
Handle<FixedArray> constant_properties,
bool fast_elements,
int literal_index,
- int depth)
+ int depth,
+ bool has_function)
: HMaterializedLiteral<1>(literal_index, depth),
constant_properties_(constant_properties),
- fast_elements_(fast_elements) {
+ fast_elements_(fast_elements),
+ has_function_(has_function) {
SetOperandAt(0, context);
}
@@ -3403,16 +3934,18 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
return constant_properties_;
}
bool fast_elements() const { return fast_elements_; }
+ bool has_function() const { return has_function_; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object_literal")
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)
private:
Handle<FixedArray> constant_properties_;
bool fast_elements_;
+ bool has_function_;
};
@@ -3432,7 +3965,7 @@ class HRegExpLiteral: public HMaterializedLiteral<0> {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp_literal")
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
private:
Handle<String> pattern_;
@@ -3451,7 +3984,7 @@ class HFunctionLiteral: public HTemplateInstruction<0> {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function_literal")
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral)
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
bool pretenure() const { return pretenure_; }
@@ -3472,7 +4005,25 @@ class HTypeof: public HUnaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+ DECLARE_CONCRETE_INSTRUCTION(Typeof)
+};
+
+
+class HToFastProperties: public HUnaryOperation {
+ public:
+ explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
+ // This instruction is not marked as having side effects, but
+ // changes the map of the input operand. Use it only when creating
+ // object literals.
+ ASSERT(value->IsObjectLiteral());
+ set_representation(Representation::Tagged());
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties)
};
@@ -3486,7 +4037,7 @@ class HValueOf: public HUnaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value_of")
+ DECLARE_CONCRETE_INSTRUCTION(ValueOf)
};
@@ -3502,12 +4053,40 @@ class HDeleteProperty: public HBinaryOperation {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete_property")
+ virtual HType CalculateInferredType();
+
+ DECLARE_CONCRETE_INSTRUCTION(DeleteProperty)
HValue* object() { return left(); }
HValue* key() { return right(); }
};
+
+class HIn: public HTemplateInstruction<2> {
+ public:
+ HIn(HValue* key, HValue* object) {
+ SetOperandAt(0, key);
+ SetOperandAt(1, object);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
+ HValue* key() { return OperandAt(0); }
+ HValue* object() { return OperandAt(1); }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ virtual HType CalculateInferredType() {
+ return HType::Boolean();
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(In)
+};
+
#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index b37d3356c..38676624c 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -25,10 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
#include "hydrogen.h"
#include "codegen.h"
-#include "data-flow.h"
#include "full-codegen.h"
#include "hashmap.h"
#include "lithium-allocator.h"
@@ -42,6 +42,8 @@
#include "x64/lithium-codegen-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-codegen-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-codegen-mips.h"
#else
#error Unsupported target architecture.
#endif
@@ -66,13 +68,13 @@ HBasicBlock::HBasicBlock(HGraph* graph)
last_instruction_index_(-1),
deleted_phis_(4),
parent_loop_header_(NULL),
- is_inline_return_target_(false) {
-}
+ is_inline_return_target_(false),
+ is_deoptimizing_(false) { }
void HBasicBlock::AttachLoopInformation() {
ASSERT(!IsLoopHeader());
- loop_information_ = new HLoopInformation(this);
+ loop_information_ = new(zone()) HLoopInformation(this);
}
@@ -92,7 +94,7 @@ void HBasicBlock::AddPhi(HPhi* phi) {
void HBasicBlock::RemovePhi(HPhi* phi) {
ASSERT(phi->block() == this);
ASSERT(phis_.Contains(phi));
- ASSERT(phi->HasNoUses());
+ ASSERT(phi->HasNoUses() || !phi->is_live());
phi->ClearOperands();
phis_.RemoveElement(phi);
phi->SetBlock(NULL);
@@ -104,7 +106,7 @@ void HBasicBlock::AddInstruction(HInstruction* instr) {
ASSERT(!instr->IsLinked());
ASSERT(!IsFinished());
if (first_ == NULL) {
- HBlockEntry* entry = new HBlockEntry();
+ HBlockEntry* entry = new(zone()) HBlockEntry();
entry->InitializeAsFirst(this);
first_ = last_ = entry;
}
@@ -113,12 +115,13 @@ void HBasicBlock::AddInstruction(HInstruction* instr) {
}
-HDeoptimize* HBasicBlock::CreateDeoptimize() {
+HDeoptimize* HBasicBlock::CreateDeoptimize(
+ HDeoptimize::UseEnvironment has_uses) {
ASSERT(HasEnvironment());
- HEnvironment* environment = last_environment();
-
- HDeoptimize* instr = new HDeoptimize(environment->length());
+ if (has_uses == HDeoptimize::kNoUses) return new(zone()) HDeoptimize(0);
+ HEnvironment* environment = last_environment();
+ HDeoptimize* instr = new(zone()) HDeoptimize(environment->length());
for (int i = 0; i < environment->length(); i++) {
HValue* val = environment->values()->at(i);
instr->AddEnvironmentValue(val);
@@ -128,17 +131,16 @@ HDeoptimize* HBasicBlock::CreateDeoptimize() {
}
-HSimulate* HBasicBlock::CreateSimulate(int id) {
+HSimulate* HBasicBlock::CreateSimulate(int ast_id) {
ASSERT(HasEnvironment());
HEnvironment* environment = last_environment();
- ASSERT(id == AstNode::kNoNumber ||
- environment->closure()->shared()->VerifyBailoutId(id));
+ ASSERT(ast_id == AstNode::kNoNumber ||
+ environment->closure()->shared()->VerifyBailoutId(ast_id));
int push_count = environment->push_count();
int pop_count = environment->pop_count();
- int length = environment->length();
- HSimulate* instr = new HSimulate(id, pop_count, length);
+ HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count);
for (int i = push_count - 1; i >= 0; --i) {
instr->AddPushedValue(environment->ExpressionStackAt(i));
}
@@ -155,19 +157,31 @@ void HBasicBlock::Finish(HControlInstruction* end) {
ASSERT(!IsFinished());
AddInstruction(end);
end_ = end;
- if (end->FirstSuccessor() != NULL) {
- end->FirstSuccessor()->RegisterPredecessor(this);
- if (end->SecondSuccessor() != NULL) {
- end->SecondSuccessor()->RegisterPredecessor(this);
- }
+ for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
+ it.Current()->RegisterPredecessor(this);
+ }
+}
+
+
+void HBasicBlock::Goto(HBasicBlock* block) {
+ if (block->IsInlineReturnTarget()) {
+ AddInstruction(new(zone()) HLeaveInlined);
+ last_environment_ = last_environment()->outer();
}
+ AddSimulate(AstNode::kNoNumber);
+ HGoto* instr = new(zone()) HGoto(block);
+ Finish(instr);
}
-void HBasicBlock::Goto(HBasicBlock* block, bool include_stack_check) {
+void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) {
+ ASSERT(target->IsInlineReturnTarget());
+ ASSERT(return_value != NULL);
+ AddInstruction(new(zone()) HLeaveInlined);
+ last_environment_ = last_environment()->outer();
+ last_environment()->Push(return_value);
AddSimulate(AstNode::kNoNumber);
- HGoto* instr = new HGoto(block);
- instr->set_include_stack_check(include_stack_check);
+ HGoto* instr = new(zone()) HGoto(target);
Finish(instr);
}
@@ -179,7 +193,7 @@ void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
}
-void HBasicBlock::SetJoinId(int id) {
+void HBasicBlock::SetJoinId(int ast_id) {
int length = predecessors_.length();
ASSERT(length > 0);
for (int i = 0; i < length; i++) {
@@ -189,8 +203,8 @@ void HBasicBlock::SetJoinId(int id) {
// We only need to verify the ID once.
ASSERT(i != 0 ||
predecessor->last_environment()->closure()->shared()
- ->VerifyBailoutId(id));
- simulate->set_ast_id(id);
+ ->VerifyBailoutId(ast_id));
+ simulate->set_ast_id(ast_id);
}
}
@@ -224,7 +238,7 @@ void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
- if (!predecessors_.is_empty()) {
+ if (HasPredecessor()) {
// Only loop header blocks can have a predecessor added after
// instructions have been added to the block (they have phis for all
// values in the environment, these phis may be eliminated later).
@@ -301,6 +315,13 @@ void HBasicBlock::Verify() {
// Check that every block is finished.
ASSERT(IsFinished());
ASSERT(block_id() >= 0);
+
+ // Check that the incoming edges are in edge split form.
+ if (predecessors_.length() > 1) {
+ for (int i = 0; i < predecessors_.length(); ++i) {
+ ASSERT(predecessors_[i]->end()->SecondSuccessor() == NULL);
+ }
+ }
}
#endif
@@ -376,8 +397,9 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
void Analyze() {
while (!stack_.is_empty()) {
HControlInstruction* end = stack_.RemoveLast()->end();
- PushBlock(end->FirstSuccessor());
- PushBlock(end->SecondSuccessor());
+ for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
+ PushBlock(it.Current());
+ }
}
}
@@ -468,8 +490,8 @@ void HGraph::Verify() const {
HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
Object* value) {
if (!pointer->is_set()) {
- HConstant* constant = new HConstant(Handle<Object>(value),
- Representation::Tagged());
+ HConstant* constant = new(zone()) HConstant(Handle<Object>(value),
+ Representation::Tagged());
constant->InsertAfter(GetConstantUndefined());
pointer->set(constant);
}
@@ -488,15 +510,37 @@ HConstant* HGraph::GetConstantMinus1() {
HConstant* HGraph::GetConstantTrue() {
- return GetConstant(&constant_true_, Heap::true_value());
+ return GetConstant(&constant_true_, isolate()->heap()->true_value());
}
HConstant* HGraph::GetConstantFalse() {
- return GetConstant(&constant_false_, Heap::false_value());
+ return GetConstant(&constant_false_, isolate()->heap()->false_value());
}
+HConstant* HGraph::GetConstantHole() {
+ return GetConstant(&constant_hole_, isolate()->heap()->the_hole_value());
+}
+
+
+HGraphBuilder::HGraphBuilder(CompilationInfo* info,
+ TypeFeedbackOracle* oracle)
+ : function_state_(NULL),
+ initial_function_state_(this, info, oracle),
+ ast_context_(NULL),
+ break_scope_(NULL),
+ graph_(NULL),
+ current_block_(NULL),
+ inlined_count_(0),
+ zone_(info->isolate()->zone()),
+ inline_bailout_(false) {
+ // This is not initialized in the initializer list because the
+ // constructor for the initial state relies on function_state_ == NULL
+ // to know it's the initial state.
+ function_state_= &initial_function_state_;
+}
+
HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
HBasicBlock* second,
int join_id) {
@@ -518,48 +562,27 @@ HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
HBasicBlock* exit_block,
HBasicBlock* continue_block) {
if (continue_block != NULL) {
+ if (exit_block != NULL) exit_block->Goto(continue_block);
continue_block->SetJoinId(statement->ContinueId());
+ return continue_block;
}
- return CreateJoin(exit_block, continue_block, statement->ContinueId());
-}
-
-
-HBasicBlock* HGraphBuilder::CreateEndless(IterationStatement* statement,
- HBasicBlock* body_entry,
- HBasicBlock* body_exit,
- HBasicBlock* break_block) {
- if (body_exit != NULL) body_exit->Goto(body_entry, true);
- if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
- body_entry->PostProcessLoopHeader(statement);
- return break_block;
-}
-
-
-HBasicBlock* HGraphBuilder::CreateDoWhile(IterationStatement* statement,
- HBasicBlock* body_entry,
- HBasicBlock* go_back,
- HBasicBlock* exit_block,
- HBasicBlock* break_block) {
- if (go_back != NULL) go_back->Goto(body_entry, true);
- if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
- HBasicBlock* new_exit =
- CreateJoin(exit_block, break_block, statement->ExitId());
- body_entry->PostProcessLoopHeader(statement);
- return new_exit;
+ return exit_block;
}
-HBasicBlock* HGraphBuilder::CreateWhile(IterationStatement* statement,
- HBasicBlock* loop_entry,
- HBasicBlock* cond_false,
- HBasicBlock* body_exit,
- HBasicBlock* break_block) {
- if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
- HBasicBlock* new_exit =
- CreateJoin(cond_false, break_block, statement->ExitId());
- if (body_exit != NULL) body_exit->Goto(loop_entry, true);
+HBasicBlock* HGraphBuilder::CreateLoop(IterationStatement* statement,
+ HBasicBlock* loop_entry,
+ HBasicBlock* body_exit,
+ HBasicBlock* loop_successor,
+ HBasicBlock* break_block) {
+ if (body_exit != NULL) body_exit->Goto(loop_entry);
loop_entry->PostProcessLoopHeader(statement);
- return new_exit;
+ if (break_block != NULL) {
+ if (loop_successor != NULL) loop_successor->Goto(break_block);
+ break_block->SetJoinId(statement->ExitId());
+ return break_block;
+ }
+ return loop_successor;
}
@@ -570,23 +593,21 @@ void HBasicBlock::FinishExit(HControlInstruction* instruction) {
HGraph::HGraph(CompilationInfo* info)
- : HSubgraph(this),
+ : isolate_(info->isolate()),
next_block_id_(0),
- info_(info),
+ entry_block_(NULL),
blocks_(8),
values_(16),
phi_list_(NULL) {
- start_environment_ = new HEnvironment(NULL, info->scope(), info->closure());
- start_environment_->set_ast_id(info->function()->id());
+ start_environment_ =
+ new(zone()) HEnvironment(NULL, info->scope(), info->closure());
+ start_environment_->set_ast_id(AstNode::kFunctionEntryId);
+ entry_block_ = CreateBasicBlock();
+ entry_block_->SetInitialEnvironment(start_environment_);
}
-bool HGraph::AllowCodeMotion() const {
- return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount;
-}
-
-
-Handle<Code> HGraph::Compile() {
+Handle<Code> HGraph::Compile(CompilationInfo* info) {
int values = GetMaximumValueID();
if (values > LAllocator::max_initial_value_ids()) {
if (FLAG_trace_bailout) PrintF("Function is too big\n");
@@ -594,7 +615,7 @@ Handle<Code> HGraph::Compile() {
}
LAllocator allocator(values, this);
- LChunkBuilder builder(this, &allocator);
+ LChunkBuilder builder(info, this, &allocator);
LChunk* chunk = builder.Build();
if (chunk == NULL) return Handle<Code>::null();
@@ -604,8 +625,8 @@ Handle<Code> HGraph::Compile() {
if (!FLAG_use_lithium) return Handle<Code>::null();
- MacroAssembler assembler(NULL, 0);
- LCodeGen generator(chunk, &assembler, info());
+ MacroAssembler assembler(info->isolate(), NULL, 0);
+ LCodeGen generator(chunk, &assembler, info);
if (FLAG_eliminate_empty_blocks) {
chunk->MarkEmptyBlocks();
@@ -615,13 +636,13 @@ Handle<Code> HGraph::Compile() {
if (FLAG_trace_codegen) {
PrintF("Crankshaft Compiler - ");
}
- CodeGenerator::MakeCodePrologue(info());
+ CodeGenerator::MakeCodePrologue(info);
Code::Flags flags =
Code::ComputeFlags(Code::OPTIMIZED_FUNCTION, NOT_IN_LOOP);
Handle<Code> code =
- CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
+ CodeGenerator::MakeCodeEpilogue(&assembler, flags, info);
generator.FinishCode(code);
- CodeGenerator::PrintCode(code, info());
+ CodeGenerator::PrintCode(code, info);
return code;
}
return Handle<Code>::null();
@@ -629,27 +650,21 @@ Handle<Code> HGraph::Compile() {
HBasicBlock* HGraph::CreateBasicBlock() {
- HBasicBlock* result = new HBasicBlock(this);
+ HBasicBlock* result = new(zone()) HBasicBlock(this);
blocks_.Add(result);
return result;
}
void HGraph::Canonicalize() {
+ if (!FLAG_use_canonicalizing) return;
HPhase phase("Canonicalize", this);
- if (FLAG_use_canonicalizing) {
- for (int i = 0; i < blocks()->length(); ++i) {
- HBasicBlock* b = blocks()->at(i);
- for (HInstruction* insn = b->first(); insn != NULL; insn = insn->next()) {
- HValue* value = insn->Canonicalize();
- if (value != insn) {
- if (value != NULL) {
- insn->ReplaceAndDelete(value);
- } else {
- insn->Delete();
- }
- }
- }
+ for (int i = 0; i < blocks()->length(); ++i) {
+ HInstruction* instr = blocks()->at(i)->first();
+ while (instr != NULL) {
+ HValue* value = instr->Canonicalize();
+ if (value != instr) instr->DeleteAndReplaceWith(value);
+ instr = instr->next();
}
}
}
@@ -663,7 +678,7 @@ void HGraph::OrderBlocks() {
HBasicBlock* start = blocks_[0];
Postorder(start, &visited, &reverse_result, NULL);
- blocks_.Clear();
+ blocks_.Rewind(0);
int index = 0;
for (int i = reverse_result.length() - 1; i >= 0; --i) {
HBasicBlock* b = reverse_result[i];
@@ -679,8 +694,9 @@ void HGraph::PostorderLoopBlocks(HLoopInformation* loop,
HBasicBlock* loop_header) {
for (int i = 0; i < loop->blocks()->length(); ++i) {
HBasicBlock* b = loop->blocks()->at(i);
- Postorder(b->end()->SecondSuccessor(), visited, order, loop_header);
- Postorder(b->end()->FirstSuccessor(), visited, order, loop_header);
+ for (HSuccessorIterator it(b->end()); !it.Done(); it.Advance()) {
+ Postorder(it.Current(), visited, order, loop_header);
+ }
if (b->IsLoopHeader() && b != loop->loop_header()) {
PostorderLoopBlocks(b->loop_information(), visited, order, loop_header);
}
@@ -697,11 +713,13 @@ void HGraph::Postorder(HBasicBlock* block,
visited->Add(block->block_id());
if (block->IsLoopHeader()) {
PostorderLoopBlocks(block->loop_information(), visited, order, loop_header);
- Postorder(block->end()->SecondSuccessor(), visited, order, block);
- Postorder(block->end()->FirstSuccessor(), visited, order, block);
+ for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+ Postorder(it.Current(), visited, order, block);
+ }
} else {
- Postorder(block->end()->SecondSuccessor(), visited, order, loop_header);
- Postorder(block->end()->FirstSuccessor(), visited, order, loop_header);
+ for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+ Postorder(it.Current(), visited, order, loop_header);
+ }
}
ASSERT(block->end()->FirstSuccessor() == NULL ||
order->Contains(block->end()->FirstSuccessor()) ||
@@ -724,16 +742,27 @@ void HGraph::AssignDominators() {
}
}
}
+
+ // Propagate flag marking blocks containing unconditional deoptimize.
+ MarkAsDeoptimizingRecursively(entry_block());
}
+// Mark all blocks that are dominated by an unconditional deoptimize.
+void HGraph::MarkAsDeoptimizingRecursively(HBasicBlock* block) {
+ for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
+ HBasicBlock* dominated = block->dominated_blocks()->at(i);
+ if (block->IsDeoptimizing()) dominated->MarkAsDeoptimizing();
+ MarkAsDeoptimizingRecursively(dominated);
+ }
+}
+
void HGraph::EliminateRedundantPhis() {
- HPhase phase("Phi elimination", this);
- ZoneList<HValue*> uses_to_replace(2);
+ HPhase phase("Redundant phi elimination", this);
- // Worklist of phis that can potentially be eliminated. Initialized
- // with all phi nodes. When elimination of a phi node modifies
- // another phi node the modified phi node is added to the worklist.
+ // Worklist of phis that can potentially be eliminated. Initialized with
+ // all phi nodes. When elimination of a phi node modifies another phi node
+ // the modified phi node is added to the worklist.
ZoneList<HPhi*> worklist(blocks_.length());
for (int i = 0; i < blocks_.length(); ++i) {
worklist.AddAll(*blocks_[i]->phis());
@@ -747,31 +776,58 @@ void HGraph::EliminateRedundantPhis() {
if (block == NULL) continue;
// Get replacement value if phi is redundant.
- HValue* value = phi->GetRedundantReplacement();
-
- if (value != NULL) {
- // Iterate through uses finding the ones that should be
- // replaced.
- const ZoneList<HValue*>* uses = phi->uses();
- for (int i = 0; i < uses->length(); ++i) {
- HValue* use = uses->at(i);
- if (!use->block()->IsStartBlock()) {
- uses_to_replace.Add(use);
- }
+ HValue* replacement = phi->GetRedundantReplacement();
+
+ if (replacement != NULL) {
+ // Iterate through the uses and replace them all.
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ HValue* value = it.value();
+ value->SetOperandAt(it.index(), replacement);
+ if (value->IsPhi()) worklist.Add(HPhi::cast(value));
}
- // Replace the uses and add phis modified to the work list.
- for (int i = 0; i < uses_to_replace.length(); ++i) {
- HValue* use = uses_to_replace[i];
- phi->ReplaceAtUse(use, value);
- if (use->IsPhi()) worklist.Add(HPhi::cast(use));
- }
- uses_to_replace.Rewind(0);
block->RemovePhi(phi);
- } else if (FLAG_eliminate_dead_phis && phi->HasNoUses() &&
- !phi->IsReceiver()) {
+ }
+ }
+}
+
+
+void HGraph::EliminateUnreachablePhis() {
+ HPhase phase("Unreachable phi elimination", this);
+
+ // Initialize worklist.
+ ZoneList<HPhi*> phi_list(blocks_.length());
+ ZoneList<HPhi*> worklist(blocks_.length());
+ for (int i = 0; i < blocks_.length(); ++i) {
+ for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
+ HPhi* phi = blocks_[i]->phis()->at(j);
+ phi_list.Add(phi);
// We can't eliminate phis in the receiver position in the environment
// because in case of throwing an error we need this value to
// construct a stack trace.
+ if (phi->HasRealUses() || phi->IsReceiver()) {
+ phi->set_is_live(true);
+ worklist.Add(phi);
+ }
+ }
+ }
+
+ // Iteratively mark live phis.
+ while (!worklist.is_empty()) {
+ HPhi* phi = worklist.RemoveLast();
+ for (int i = 0; i < phi->OperandCount(); i++) {
+ HValue* operand = phi->OperandAt(i);
+ if (operand->IsPhi() && !HPhi::cast(operand)->is_live()) {
+ HPhi::cast(operand)->set_is_live(true);
+ worklist.Add(HPhi::cast(operand));
+ }
+ }
+ }
+
+ // Remove unreachable phis.
+ for (int i = 0; i < phi_list.length(); i++) {
+ HPhi* phi = phi_list[i];
+ if (!phi->is_live()) {
+ HBasicBlock* block = phi->block();
block->RemovePhi(phi);
block->RecordDeletedPhi(phi->merged_index());
}
@@ -780,14 +836,18 @@ void HGraph::EliminateRedundantPhis() {
bool HGraph::CollectPhis() {
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- phi_list_ = new ZoneList<HPhi*>(blocks->length());
- for (int i = 0; i < blocks->length(); ++i) {
- for (int j = 0; j < blocks->at(i)->phis()->length(); j++) {
- HPhi* phi = blocks->at(i)->phis()->at(j);
+ int block_count = blocks_.length();
+ phi_list_ = new ZoneList<HPhi*>(block_count);
+ for (int i = 0; i < block_count; ++i) {
+ for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
+ HPhi* phi = blocks_[i]->phis()->at(j);
phi_list_->Add(phi);
// We don't support phi uses of arguments for now.
if (phi->CheckFlag(HValue::kIsArguments)) return false;
+ // Check for the hole value (from an uninitialized const).
+ for (int k = 0; k < phi->OperandCount(); k++) {
+ if (phi->OperandAt(k) == GetConstantHole()) return false;
+ }
}
}
return true;
@@ -805,8 +865,8 @@ void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
HValue* current = worklist->RemoveLast();
in_worklist.Remove(current->id());
if (current->UpdateInferredType()) {
- for (int j = 0; j < current->uses()->length(); j++) {
- HValue* use = current->uses()->at(j);
+ for (HUseIterator it(current->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
if (!in_worklist.Contains(use->id())) {
in_worklist.Add(use->id());
worklist->Add(use);
@@ -850,7 +910,7 @@ void HRangeAnalysis::TraceRange(const char* msg, ...) {
void HRangeAnalysis::Analyze() {
HPhase phase("Range analysis", graph_);
- Analyze(graph_->blocks()->at(0));
+ Analyze(graph_->entry_block());
}
@@ -893,13 +953,15 @@ void HRangeAnalysis::InferControlFlowRange(HTest* test, HBasicBlock* dest) {
ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
if (test->value()->IsCompare()) {
HCompare* compare = HCompare::cast(test->value());
- Token::Value op = compare->token();
- if (test->SecondSuccessor() == dest) {
- op = Token::NegateCompareOp(op);
+ if (compare->GetInputRepresentation().IsInteger32()) {
+ Token::Value op = compare->token();
+ if (test->SecondSuccessor() == dest) {
+ op = Token::NegateCompareOp(op);
+ }
+ Token::Value inverted_op = Token::InvertCompareOp(op);
+ InferControlFlowRange(op, compare->left(), compare->right());
+ InferControlFlowRange(inverted_op, compare->right(), compare->left());
}
- Token::Value inverted_op = Token::InvertCompareOp(op);
- InferControlFlowRange(op, compare->left(), compare->right());
- InferControlFlowRange(inverted_op, compare->right(), compare->left());
}
}
@@ -909,8 +971,8 @@ void HRangeAnalysis::InferControlFlowRange(HTest* test, HBasicBlock* dest) {
void HRangeAnalysis::InferControlFlowRange(Token::Value op,
HValue* value,
HValue* other) {
- Range* range = other->range();
- if (range == NULL) range = new Range();
+ Range temp_range;
+ Range* range = other->range() != NULL ? other->range() : &temp_range;
Range* new_range = NULL;
TraceRange("Control flow range infer %d %s %d\n",
@@ -997,13 +1059,13 @@ void TraceGVN(const char* msg, ...) {
}
-HValueMap::HValueMap(const HValueMap* other)
+HValueMap::HValueMap(Zone* zone, const HValueMap* other)
: array_size_(other->array_size_),
lists_size_(other->lists_size_),
count_(other->count_),
present_flags_(other->present_flags_),
- array_(Zone::NewArray<HValueMapListElement>(other->array_size_)),
- lists_(Zone::NewArray<HValueMapListElement>(other->lists_size_)),
+ array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
+ lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
free_list_head_(other->free_list_head_) {
memcpy(array_, other->array_, array_size_ * sizeof(HValueMapListElement));
memcpy(lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
@@ -1082,7 +1144,7 @@ void HValueMap::Resize(int new_size) {
}
HValueMapListElement* new_array =
- Zone::NewArray<HValueMapListElement>(new_size);
+ ZONE->NewArray<HValueMapListElement>(new_size);
memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
HValueMapListElement* old_array = array_;
@@ -1120,7 +1182,7 @@ void HValueMap::ResizeLists(int new_size) {
ASSERT(new_size > lists_size_);
HValueMapListElement* new_lists =
- Zone::NewArray<HValueMapListElement>(new_size);
+ ZONE->NewArray<HValueMapListElement>(new_size);
memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
HValueMapListElement* old_lists = lists_;
@@ -1171,8 +1233,6 @@ class HStackCheckEliminator BASE_EMBEDDED {
void Process();
private:
- void RemoveStackCheck(HBasicBlock* block);
-
HGraph* graph_;
};
@@ -1187,16 +1247,20 @@ void HStackCheckEliminator::Process() {
if (block->IsLoopHeader()) {
HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
HBasicBlock* dominator = back_edge;
- bool back_edge_dominated_by_call = false;
- while (dominator != block && !back_edge_dominated_by_call) {
+ while (true) {
HInstruction* instr = dominator->first();
- while (instr != NULL && !back_edge_dominated_by_call) {
+ while (instr != NULL) {
if (instr->IsCall()) {
- RemoveStackCheck(back_edge);
- back_edge_dominated_by_call = true;
+ block->loop_information()->stack_check()->Eliminate();
+ break;
}
instr = instr->next();
}
+
+ // Done when the loop header is processed.
+ if (dominator == block) break;
+
+ // Move up the dominator tree.
dominator = dominator->dominator();
}
}
@@ -1204,50 +1268,92 @@ void HStackCheckEliminator::Process() {
}
-void HStackCheckEliminator::RemoveStackCheck(HBasicBlock* block) {
- HInstruction* instr = block->first();
- while (instr != NULL) {
- if (instr->IsGoto()) {
- HGoto::cast(instr)->set_include_stack_check(false);
- return;
- }
- instr = instr->next();
+// Simple sparse set with O(1) add, contains, and clear.
+class SparseSet {
+ public:
+ SparseSet(Zone* zone, int capacity)
+ : capacity_(capacity),
+ length_(0),
+ dense_(zone->NewArray<int>(capacity)),
+ sparse_(zone->NewArray<int>(capacity)) {
+#ifndef NVALGRIND
+ // Initialize the sparse array to make valgrind happy.
+ memset(sparse_, 0, sizeof(sparse_[0]) * capacity);
+#endif
+ }
+
+ bool Contains(int n) const {
+ ASSERT(0 <= n && n < capacity_);
+ int d = sparse_[n];
+ return 0 <= d && d < length_ && dense_[d] == n;
+ }
+
+ bool Add(int n) {
+ if (Contains(n)) return false;
+ dense_[length_] = n;
+ sparse_[n] = length_;
+ ++length_;
+ return true;
}
-}
+
+ void Clear() { length_ = 0; }
+
+ private:
+ int capacity_;
+ int length_;
+ int* dense_;
+ int* sparse_;
+
+ DISALLOW_COPY_AND_ASSIGN(SparseSet);
+};
class HGlobalValueNumberer BASE_EMBEDDED {
public:
- explicit HGlobalValueNumberer(HGraph* graph)
+ explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
: graph_(graph),
- block_side_effects_(graph_->blocks()->length()),
- loop_side_effects_(graph_->blocks()->length()) {
- ASSERT(Heap::allow_allocation(false));
+ info_(info),
+ block_side_effects_(graph->blocks()->length()),
+ loop_side_effects_(graph->blocks()->length()),
+ visited_on_paths_(graph->zone(), graph->blocks()->length()) {
+ ASSERT(info->isolate()->heap()->allow_allocation(false));
block_side_effects_.AddBlock(0, graph_->blocks()->length());
loop_side_effects_.AddBlock(0, graph_->blocks()->length());
}
~HGlobalValueNumberer() {
- ASSERT(!Heap::allow_allocation(true));
+ ASSERT(!info_->isolate()->heap()->allow_allocation(true));
}
void Analyze();
private:
+ int CollectSideEffectsOnPathsToDominatedBlock(HBasicBlock* dominator,
+ HBasicBlock* dominated);
void AnalyzeBlock(HBasicBlock* block, HValueMap* map);
void ComputeBlockSideEffects();
void LoopInvariantCodeMotion();
void ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* before_loop,
int loop_kills);
+ bool AllowCodeMotion();
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
+ HGraph* graph() { return graph_; }
+ CompilationInfo* info() { return info_; }
+ Zone* zone() { return graph_->zone(); }
+
HGraph* graph_;
+ CompilationInfo* info_;
// A map of block IDs to their side effects.
ZoneList<int> block_side_effects_;
// A map of loop header block IDs to their loop's side effects.
ZoneList<int> loop_side_effects_;
+
+ // Used when collecting side effects on paths from dominator to
+ // dominated.
+ SparseSet visited_on_paths_;
};
@@ -1256,8 +1362,8 @@ void HGlobalValueNumberer::Analyze() {
if (FLAG_loop_invariant_code_motion) {
LoopInvariantCodeMotion();
}
- HValueMap* map = new HValueMap();
- AnalyzeBlock(graph_->blocks()->at(0), map);
+ HValueMap* map = new(zone()) HValueMap();
+ AnalyzeBlock(graph_->entry_block(), map);
}
@@ -1342,44 +1448,40 @@ void HGlobalValueNumberer::ProcessLoopBlock(HBasicBlock* block,
}
+bool HGlobalValueNumberer::AllowCodeMotion() {
+ return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount;
+}
+
+
bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
HBasicBlock* loop_header) {
- // If we've disabled code motion, don't move any instructions.
- if (!graph_->AllowCodeMotion()) return false;
+ // If we've disabled code motion or we're in a block that unconditionally
+ // deoptimizes, don't move any instructions.
+ return AllowCodeMotion() && !instr->block()->IsDeoptimizing();
+}
- // If --aggressive-loop-invariant-motion, move everything except change
- // instructions.
- if (FLAG_aggressive_loop_invariant_motion && !instr->IsChange()) {
- return true;
- }
- // Otherwise only move instructions that postdominate the loop header
- // (i.e. are always executed inside the loop). This is to avoid
- // unnecessary deoptimizations assuming the loop is executed at least
- // once. TODO(fschneider): Better type feedback should give us
- // information about code that was never executed.
- HBasicBlock* block = instr->block();
- bool result = true;
- if (block != loop_header) {
- for (int i = 1; i < loop_header->predecessors()->length(); ++i) {
- bool found = false;
- HBasicBlock* pred = loop_header->predecessors()->at(i);
- while (pred != loop_header) {
- if (pred == block) found = true;
- pred = pred->dominator();
- }
- if (!found) {
- result = false;
- break;
- }
+int HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
+ HBasicBlock* dominator, HBasicBlock* dominated) {
+ int side_effects = 0;
+ for (int i = 0; i < dominated->predecessors()->length(); ++i) {
+ HBasicBlock* block = dominated->predecessors()->at(i);
+ if (dominator->block_id() < block->block_id() &&
+ block->block_id() < dominated->block_id() &&
+ visited_on_paths_.Add(block->block_id())) {
+ side_effects |= block_side_effects_[block->block_id()];
+ side_effects |= CollectSideEffectsOnPathsToDominatedBlock(
+ dominator, block);
}
}
- return result;
+ return side_effects;
}
void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
- TraceGVN("Analyzing block B%d\n", block->block_id());
+ TraceGVN("Analyzing block B%d%s\n",
+ block->block_id(),
+ block->IsLoopHeader() ? " (loop header)" : "");
// If this is a loop header kill everything killed by the loop.
if (block->IsLoopHeader()) {
@@ -1405,8 +1507,7 @@ void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
instr->Mnemonic(),
other->id(),
other->Mnemonic());
- instr->ReplaceValue(other);
- instr->Delete();
+ instr->DeleteAndReplaceWith(other);
} else {
map->Add(instr);
}
@@ -1419,25 +1520,20 @@ void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
for (int i = 0; i < length; ++i) {
HBasicBlock* dominated = block->dominated_blocks()->at(i);
// No need to copy the map for the last child in the dominator tree.
- HValueMap* successor_map = (i == length - 1) ? map : map->Copy();
-
- // If the dominated block is not a successor to this block we have to
- // kill everything killed on any path between this block and the
- // dominated block. Note we rely on the block ordering.
- bool is_successor = false;
- int predecessor_count = dominated->predecessors()->length();
- for (int j = 0; !is_successor && j < predecessor_count; ++j) {
- is_successor = (dominated->predecessors()->at(j) == block);
- }
-
- if (!is_successor) {
- int side_effects = 0;
- for (int j = block->block_id() + 1; j < dominated->block_id(); ++j) {
- side_effects |= block_side_effects_[j];
- }
- successor_map->Kill(side_effects);
+ HValueMap* successor_map = (i == length - 1) ? map : map->Copy(zone());
+
+ // Kill everything killed on any path between this block and the
+ // dominated block.
+ // We don't have to traverse these paths if the value map is
+ // already empty.
+ // If the range of block ids (block_id, dominated_id) is empty
+ // there are no such paths.
+ if (!successor_map->IsEmpty() &&
+ block->block_id() + 1 < dominated->block_id()) {
+ visited_on_paths_.Clear();
+ successor_map->Kill(CollectSideEffectsOnPathsToDominatedBlock(block,
+ dominated));
}
-
AnalyzeBlock(dominated, successor_map);
}
}
@@ -1457,6 +1553,8 @@ class HInferRepresentation BASE_EMBEDDED {
void AddDependantsToWorklist(HValue* current);
void InferBasedOnUses(HValue* current);
+ Zone* zone() { return graph_->zone(); }
+
HGraph* graph_;
ZoneList<HValue*> worklist_;
BitVector in_worklist_;
@@ -1488,12 +1586,12 @@ void HInferRepresentation::InferBasedOnInputs(HValue* current) {
}
-void HInferRepresentation::AddDependantsToWorklist(HValue* current) {
- for (int i = 0; i < current->uses()->length(); ++i) {
- AddToWorklist(current->uses()->at(i));
+void HInferRepresentation::AddDependantsToWorklist(HValue* value) {
+ for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+ AddToWorklist(it.value());
}
- for (int i = 0; i < current->OperandCount(); ++i) {
- AddToWorklist(current->OperandAt(i));
+ for (int i = 0; i < value->OperandCount(); ++i) {
+ AddToWorklist(value->OperandAt(i));
}
}
@@ -1502,37 +1600,30 @@ void HInferRepresentation::AddDependantsToWorklist(HValue* current) {
// given as the parameter has a benefit in terms of less necessary type
// conversions. If there is a benefit, then the representation of the value is
// specialized.
-void HInferRepresentation::InferBasedOnUses(HValue* current) {
- Representation r = current->representation();
- if (r.IsSpecialization() || current->HasNoUses()) return;
- ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
- Representation new_rep = TryChange(current);
+void HInferRepresentation::InferBasedOnUses(HValue* value) {
+ Representation r = value->representation();
+ if (r.IsSpecialization() || value->HasNoUses()) return;
+ ASSERT(value->CheckFlag(HValue::kFlexibleRepresentation));
+ Representation new_rep = TryChange(value);
if (!new_rep.IsNone()) {
- if (!current->representation().Equals(new_rep)) {
- current->ChangeRepresentation(new_rep);
- AddDependantsToWorklist(current);
+ if (!value->representation().Equals(new_rep)) {
+ value->ChangeRepresentation(new_rep);
+ AddDependantsToWorklist(value);
}
}
}
-Representation HInferRepresentation::TryChange(HValue* current) {
+Representation HInferRepresentation::TryChange(HValue* value) {
// Array of use counts for each representation.
- int use_count[Representation::kNumRepresentations];
- for (int i = 0; i < Representation::kNumRepresentations; i++) {
- use_count[i] = 0;
- }
+ int use_count[Representation::kNumRepresentations] = { 0 };
- for (int i = 0; i < current->uses()->length(); ++i) {
- HValue* use = current->uses()->at(i);
- int index = use->LookupOperandIndex(0, current);
- Representation req_rep = use->RequiredInputRepresentation(index);
- if (req_rep.IsNone()) continue;
- if (use->IsPhi()) {
- HPhi* phi = HPhi::cast(use);
- phi->AddIndirectUsesTo(&use_count[0]);
- }
- use_count[req_rep.kind()]++;
+ for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ Representation rep = use->RequiredInputRepresentation(it.index());
+ if (rep.IsNone()) continue;
+ if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
+ ++use_count[rep.kind()];
}
int tagged_count = use_count[Representation::kTagged];
int double_count = use_count[Representation::kDouble];
@@ -1540,19 +1631,17 @@ Representation HInferRepresentation::TryChange(HValue* current) {
int non_tagged_count = double_count + int32_count;
// If a non-loop phi has tagged uses, don't convert it to untagged.
- if (current->IsPhi() && !current->block()->IsLoopHeader()) {
+ if (value->IsPhi() && !value->block()->IsLoopHeader()) {
if (tagged_count > 0) return Representation::None();
}
if (non_tagged_count >= tagged_count) {
- // More untagged than tagged.
- if (double_count > 0) {
- // There is at least one usage that is a double => guess that the
- // correct representation is double.
- return Representation::Double();
- } else if (int32_count > 0) {
- return Representation::Integer32();
+ if (int32_count > 0) {
+ if (!value->IsPhi() || value->IsConvertibleToInteger()) {
+ return Representation::Integer32();
+ }
}
+ if (double_count > 0) return Representation::Double();
}
return Representation::None();
}
@@ -1561,41 +1650,40 @@ Representation HInferRepresentation::TryChange(HValue* current) {
void HInferRepresentation::Analyze() {
HPhase phase("Infer representations", graph_);
- // (1) Initialize bit vectors and count real uses. Each phi
- // gets a bit-vector of length <number of phis>.
+ // (1) Initialize bit vectors and count real uses. Each phi gets a
+ // bit-vector of length <number of phis>.
const ZoneList<HPhi*>* phi_list = graph_->phi_list();
- int num_phis = phi_list->length();
- ScopedVector<BitVector*> connected_phis(num_phis);
- for (int i = 0; i < num_phis; i++) {
+ int phi_count = phi_list->length();
+ ZoneList<BitVector*> connected_phis(phi_count);
+ for (int i = 0; i < phi_count; ++i) {
phi_list->at(i)->InitRealUses(i);
- connected_phis[i] = new BitVector(num_phis);
- connected_phis[i]->Add(i);
+ BitVector* connected_set = new(zone()) BitVector(phi_count);
+ connected_set->Add(i);
+ connected_phis.Add(connected_set);
}
- // (2) Do a fixed point iteration to find the set of connected phis.
- // A phi is connected to another phi if its value is used either
- // directly or indirectly through a transitive closure of the def-use
- // relation.
+ // (2) Do a fixed point iteration to find the set of connected phis. A
+ // phi is connected to another phi if its value is used either directly or
+ // indirectly through a transitive closure of the def-use relation.
bool change = true;
while (change) {
change = false;
- for (int i = 0; i < num_phis; i++) {
+ for (int i = 0; i < phi_count; ++i) {
HPhi* phi = phi_list->at(i);
- for (int j = 0; j < phi->uses()->length(); j++) {
- HValue* use = phi->uses()->at(j);
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
if (use->IsPhi()) {
- int phi_use = HPhi::cast(use)->phi_id();
- if (connected_phis[i]->UnionIsChanged(*connected_phis[phi_use])) {
- change = true;
- }
+ int id = HPhi::cast(use)->phi_id();
+ change = change ||
+ connected_phis[i]->UnionIsChanged(*connected_phis[id]);
}
}
}
}
- // (3) Sum up the non-phi use counts of all connected phis.
- // Don't include the non-phi uses of the phi itself.
- for (int i = 0; i < num_phis; i++) {
+ // (3) Sum up the non-phi use counts of all connected phis. Don't include
+ // the non-phi uses of the phi itself.
+ for (int i = 0; i < phi_count; ++i) {
HPhi* phi = phi_list->at(i);
for (BitVector::Iterator it(connected_phis.at(i));
!it.Done();
@@ -1608,6 +1696,25 @@ void HInferRepresentation::Analyze() {
}
}
+ // (4) Compute phis that definitely can't be converted to integer
+ // without deoptimization and mark them to avoid unnecessary deoptimization.
+ change = true;
+ while (change) {
+ change = false;
+ for (int i = 0; i < phi_count; ++i) {
+ HPhi* phi = phi_list->at(i);
+ for (int j = 0; j < phi->OperandCount(); ++j) {
+ if (phi->IsConvertibleToInteger() &&
+ !phi->OperandAt(j)->IsConvertibleToInteger()) {
+ phi->set_is_convertible_to_integer(false);
+ change = true;
+ break;
+ }
+ }
+ }
+ }
+
+
for (int i = 0; i < graph_->blocks()->length(); ++i) {
HBasicBlock* block = graph_->blocks()->at(i);
const ZoneList<HPhi*>* phis = block->phis();
@@ -1705,18 +1812,16 @@ void HGraph::PropagateMinusZeroChecks(HValue* value, BitVector* visited) {
void HGraph::InsertRepresentationChangeForUse(HValue* value,
- HValue* use,
- Representation to,
- bool is_truncating) {
+ HValue* use_value,
+ int use_index,
+ Representation to) {
// Insert the representation change right before its use. For phi-uses we
// insert at the end of the corresponding predecessor.
HInstruction* next = NULL;
- if (use->IsPhi()) {
- int index = 0;
- while (use->OperandAt(index) != value) ++index;
- next = use->block()->predecessors()->at(index)->end();
+ if (use_value->IsPhi()) {
+ next = use_value->block()->predecessors()->at(use_index)->end();
} else {
- next = HInstruction::cast(use);
+ next = HInstruction::cast(use_value);
}
// For constants we try to make the representation change at compile
@@ -1724,6 +1829,9 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
// information we treat constants like normal instructions and insert the
// change instructions for them.
HInstruction* new_value = NULL;
+ bool is_truncating = use_value->CheckFlag(HValue::kTruncatingToInt32);
+ bool deoptimize_on_undefined =
+ use_value->CheckFlag(HValue::kDeoptimizeOnUndefined);
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
@@ -1733,88 +1841,36 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
}
if (new_value == NULL) {
- new_value = new HChange(value, value->representation(), to);
+ new_value = new(zone()) HChange(value, value->representation(), to,
+ is_truncating, deoptimize_on_undefined);
}
new_value->InsertBefore(next);
- value->ReplaceFirstAtUse(use, new_value, to);
+ use_value->SetOperandAt(use_index, new_value);
}
-int CompareConversionUses(HValue* a,
- HValue* b,
- Representation a_rep,
- Representation b_rep) {
- if (a_rep.kind() > b_rep.kind()) {
- // Make sure specializations are separated in the result array.
- return 1;
- }
- // Put truncating conversions before non-truncating conversions.
- bool a_truncate = a->CheckFlag(HValue::kTruncatingToInt32);
- bool b_truncate = b->CheckFlag(HValue::kTruncatingToInt32);
- if (a_truncate != b_truncate) {
- return a_truncate ? -1 : 1;
- }
- // Sort by increasing block ID.
- return a->block()->block_id() - b->block()->block_id();
-}
-
-
-void HGraph::InsertRepresentationChanges(HValue* current) {
- Representation r = current->representation();
+void HGraph::InsertRepresentationChangesForValue(HValue* value) {
+ Representation r = value->representation();
if (r.IsNone()) return;
- if (current->uses()->length() == 0) return;
-
- // Collect the representation changes in a sorted list. This allows
- // us to avoid duplicate changes without searching the list.
- ZoneList<HValue*> to_convert(2);
- ZoneList<Representation> to_convert_reps(2);
- for (int i = 0; i < current->uses()->length(); ++i) {
- HValue* use = current->uses()->at(i);
- // The occurrences index means the index within the operand array of "use"
- // at which "current" is used. While iterating through the use array we
- // also have to iterate over the different occurrence indices.
- int occurrence_index = 0;
- if (use->UsesMultipleTimes(current)) {
- occurrence_index = current->uses()->CountOccurrences(use, 0, i - 1);
- if (FLAG_trace_representation) {
- PrintF("Instruction %d is used multiple times at %d; occurrence=%d\n",
- current->id(),
- use->id(),
- occurrence_index);
- }
- }
- int operand_index = use->LookupOperandIndex(occurrence_index, current);
- Representation req = use->RequiredInputRepresentation(operand_index);
+ if (value->HasNoUses()) return;
+
+ for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+ HValue* use_value = it.value();
+ int use_index = it.index();
+ Representation req = use_value->RequiredInputRepresentation(use_index);
if (req.IsNone() || req.Equals(r)) continue;
- int index = 0;
- while (to_convert.length() > index &&
- CompareConversionUses(to_convert[index],
- use,
- to_convert_reps[index],
- req) < 0) {
- ++index;
- }
- if (FLAG_trace_representation) {
- PrintF("Inserting a representation change to %s of %d for use at %d\n",
- req.Mnemonic(),
- current->id(),
- use->id());
- }
- to_convert.InsertAt(index, use);
- to_convert_reps.InsertAt(index, req);
+ InsertRepresentationChangeForUse(value, use_value, use_index, req);
}
-
- for (int i = 0; i < to_convert.length(); ++i) {
- HValue* use = to_convert[i];
- Representation r_to = to_convert_reps[i];
- bool is_truncating = use->CheckFlag(HValue::kTruncatingToInt32);
- InsertRepresentationChangeForUse(current, use, r_to, is_truncating);
+ if (value->HasNoUses()) {
+ ASSERT(value->IsConstant());
+ value->DeleteAndReplaceWith(NULL);
}
- if (current->uses()->is_empty()) {
- ASSERT(current->IsConstant());
- current->Delete();
+ // The only purpose of a HForceRepresentation is to represent the value
+ // after the (possible) HChange instruction. We make it disappear.
+ if (value->IsForceRepresentation()) {
+ value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
}
}
@@ -1840,8 +1896,8 @@ void HGraph::InsertRepresentationChanges() {
for (int i = 0; i < phi_list()->length(); i++) {
HPhi* phi = phi_list()->at(i);
if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
- for (int j = 0; j < phi->uses()->length(); j++) {
- HValue* use = phi->uses()->at(j);
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
if (!use->CheckFlag(HValue::kTruncatingToInt32)) {
phi->ClearFlag(HValue::kTruncatingToInt32);
change = true;
@@ -1853,21 +1909,55 @@ void HGraph::InsertRepresentationChanges() {
for (int i = 0; i < blocks_.length(); ++i) {
// Process phi instructions first.
- for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- InsertRepresentationChanges(phi);
+ const ZoneList<HPhi*>* phis = blocks_[i]->phis();
+ for (int j = 0; j < phis->length(); j++) {
+ InsertRepresentationChangesForValue(phis->at(j));
}
// Process normal instructions.
HInstruction* current = blocks_[i]->first();
while (current != NULL) {
- InsertRepresentationChanges(current);
+ InsertRepresentationChangesForValue(current);
current = current->next();
}
}
}
+void HGraph::RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi) {
+ if (phi->CheckFlag(HValue::kDeoptimizeOnUndefined)) return;
+ phi->SetFlag(HValue::kDeoptimizeOnUndefined);
+ for (int i = 0; i < phi->OperandCount(); ++i) {
+ HValue* input = phi->OperandAt(i);
+ if (input->IsPhi()) {
+ RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi::cast(input));
+ }
+ }
+}
+
+
+void HGraph::MarkDeoptimizeOnUndefined() {
+ HPhase phase("MarkDeoptimizeOnUndefined", this);
+ // Compute DeoptimizeOnUndefined flag for phis.
+ // Any phi that can reach a use with DeoptimizeOnUndefined set must
+ // have DeoptimizeOnUndefined set. Currently only HCompare, with
+ // double input representation, has this flag set.
+ // The flag is used by HChange tagged->double, which must deoptimize
+ // if one of its uses has this flag set.
+ for (int i = 0; i < phi_list()->length(); i++) {
+ HPhi* phi = phi_list()->at(i);
+ if (phi->representation().IsDouble()) {
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ if (it.value()->CheckFlag(HValue::kDeoptimizeOnUndefined)) {
+ RecursivelyMarkPhiDeoptimizeOnUndefined(phi);
+ break;
+ }
+ }
+ }
+ }
+}
+
+
void HGraph::ComputeMinusZeroChecks() {
BitVector visited(GetMaximumValueID());
for (int i = 0; i < blocks_.length(); ++i) {
@@ -1892,10 +1982,55 @@ void HGraph::ComputeMinusZeroChecks() {
}
+// Implementation of utility class to encapsulate the translation state for
+// a (possibly inlined) function.
+FunctionState::FunctionState(HGraphBuilder* owner,
+ CompilationInfo* info,
+ TypeFeedbackOracle* oracle)
+ : owner_(owner),
+ compilation_info_(info),
+ oracle_(oracle),
+ call_context_(NULL),
+ function_return_(NULL),
+ test_context_(NULL),
+ outer_(owner->function_state()) {
+ if (outer_ != NULL) {
+ // State for an inline function.
+ if (owner->ast_context()->IsTest()) {
+ HBasicBlock* if_true = owner->graph()->CreateBasicBlock();
+ HBasicBlock* if_false = owner->graph()->CreateBasicBlock();
+ if_true->MarkAsInlineReturnTarget();
+ if_false->MarkAsInlineReturnTarget();
+ Expression* cond = TestContext::cast(owner->ast_context())->condition();
+ // The AstContext constructor pushed on the context stack. This newed
+ // instance is the reason that AstContext can't be BASE_EMBEDDED.
+ test_context_ = new TestContext(owner, cond, if_true, if_false);
+ } else {
+ function_return_ = owner->graph()->CreateBasicBlock();
+ function_return()->MarkAsInlineReturnTarget();
+ }
+ // Set this after possibly allocating a new TestContext above.
+ call_context_ = owner->ast_context();
+ }
+
+ // Push on the state stack.
+ owner->set_function_state(this);
+}
+
+
+FunctionState::~FunctionState() {
+ delete test_context_;
+ owner_->set_function_state(outer_);
+}
+
+
// Implementation of utility classes to represent an expression's context in
// the AST.
AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
- : owner_(owner), kind_(kind), outer_(owner->ast_context()) {
+ : owner_(owner),
+ kind_(kind),
+ outer_(owner->ast_context()),
+ for_typeof_(false) {
owner->set_ast_context(this); // Push.
#ifdef DEBUG
original_length_ = owner->environment()->length();
@@ -1930,6 +2065,9 @@ void EffectContext::ReturnValue(HValue* value) {
void ValueContext::ReturnValue(HValue* value) {
// The value is tracked in the bailout environment, and communicated
// through the environment as the result of the expression.
+ if (!arguments_allowed() && value->CheckFlag(HValue::kIsArguments)) {
+ owner()->Bailout("bad value context for arguments value");
+ }
owner()->Push(value);
}
@@ -1946,6 +2084,9 @@ void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+ if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
+ owner()->Bailout("bad value context for arguments object value");
+ }
owner()->AddInstruction(instr);
owner()->Push(instr);
if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
@@ -1972,98 +2113,39 @@ void TestContext::BuildBranch(HValue* value) {
// property by always adding an empty block on the outgoing edges of this
// branch.
HGraphBuilder* builder = owner();
+ if (value->CheckFlag(HValue::kIsArguments)) {
+ builder->Bailout("arguments object value in a test context");
+ }
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
- HTest* test = new HTest(value, empty_true, empty_false);
+ HTest* test = new(zone()) HTest(value, empty_true, empty_false);
builder->current_block()->Finish(test);
- HValue* const no_return_value = NULL;
- HBasicBlock* true_target = if_true();
- if (true_target->IsInlineReturnTarget()) {
- empty_true->AddLeaveInlined(no_return_value, true_target);
- } else {
- empty_true->Goto(true_target);
- }
-
- HBasicBlock* false_target = if_false();
- if (false_target->IsInlineReturnTarget()) {
- empty_false->AddLeaveInlined(no_return_value, false_target);
- } else {
- empty_false->Goto(false_target);
- }
+ empty_true->Goto(if_true());
+ empty_false->Goto(if_false());
builder->set_current_block(NULL);
}
// HGraphBuilder infrastructure for bailing out and checking bailouts.
-#define BAILOUT(reason) \
- do { \
- Bailout(reason); \
- return; \
- } while (false)
-
-
-#define CHECK_BAILOUT \
- do { \
- if (HasStackOverflow()) return; \
- } while (false)
-
-
-#define VISIT_FOR_EFFECT(expr) \
+#define CHECK_BAILOUT(call) \
do { \
- VisitForEffect(expr); \
+ call; \
if (HasStackOverflow()) return; \
} while (false)
-#define VISIT_FOR_VALUE(expr) \
- do { \
- VisitForValue(expr); \
- if (HasStackOverflow()) return; \
- } while (false)
-
-
-#define VISIT_FOR_CONTROL(expr, true_block, false_block) \
+#define CHECK_ALIVE(call) \
do { \
- VisitForControl(expr, true_block, false_block); \
- if (HasStackOverflow()) return; \
- } while (false)
-
-
-// 'thing' could be an expression, statement, or list of statements.
-#define ADD_TO_SUBGRAPH(graph, thing) \
- do { \
- AddToSubgraph(graph, thing); \
- if (HasStackOverflow()) return; \
+ call; \
+ if (HasStackOverflow() || current_block() == NULL) return; \
} while (false)
-class HGraphBuilder::SubgraphScope BASE_EMBEDDED {
- public:
- SubgraphScope(HGraphBuilder* builder, HSubgraph* new_subgraph)
- : builder_(builder) {
- old_subgraph_ = builder_->current_subgraph_;
- subgraph_ = new_subgraph;
- builder_->current_subgraph_ = subgraph_;
- }
-
- ~SubgraphScope() {
- builder_->current_subgraph_ = old_subgraph_;
- }
-
- HSubgraph* subgraph() const { return subgraph_; }
-
- private:
- HGraphBuilder* builder_;
- HSubgraph* old_subgraph_;
- HSubgraph* subgraph_;
-};
-
-
void HGraphBuilder::Bailout(const char* reason) {
if (FLAG_trace_bailout) {
- SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
- PrintF("Bailout in HGraphBuilder: @\"%s\": %s\n", *debug_name, reason);
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+ PrintF("Bailout in HGraphBuilder: @\"%s\": %s\n", *name, reason);
}
SetStackOverflow();
}
@@ -2075,123 +2157,155 @@ void HGraphBuilder::VisitForEffect(Expression* expr) {
}
-void HGraphBuilder::VisitForValue(Expression* expr) {
- ValueContext for_value(this);
+void HGraphBuilder::VisitForValue(Expression* expr, ArgumentsAllowedFlag flag) {
+ ValueContext for_value(this, flag);
+ Visit(expr);
+}
+
+
+void HGraphBuilder::VisitForTypeOf(Expression* expr) {
+ ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
+ for_value.set_for_typeof(true);
Visit(expr);
}
+
void HGraphBuilder::VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block) {
- TestContext for_test(this, true_block, false_block);
+ TestContext for_test(this, expr, true_block, false_block);
Visit(expr);
}
-void HGraphBuilder::VisitArgument(Expression* expr) {
- VISIT_FOR_VALUE(expr);
- Push(AddInstruction(new HPushArgument(Pop())));
+HValue* HGraphBuilder::VisitArgument(Expression* expr) {
+ VisitForValue(expr);
+ if (HasStackOverflow() || current_block() == NULL) return NULL;
+ HValue* value = Pop();
+ Push(AddInstruction(new(zone()) HPushArgument(value)));
+ return value;
}
void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
- VisitArgument(arguments->at(i));
- if (HasStackOverflow() || current_block() == NULL) return;
+ CHECK_ALIVE(VisitArgument(arguments->at(i)));
}
}
void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
- VISIT_FOR_VALUE(exprs->at(i));
+ CHECK_ALIVE(VisitForValue(exprs->at(i)));
}
}
-HGraph* HGraphBuilder::CreateGraph(CompilationInfo* info) {
- ASSERT(current_subgraph_ == NULL);
- graph_ = new HGraph(info);
+HGraph* HGraphBuilder::CreateGraph() {
+ graph_ = new(zone()) HGraph(info());
+ if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
{
HPhase phase("Block building");
- graph_->Initialize(CreateBasicBlock(graph_->start_environment()));
- current_subgraph_ = graph_;
+ current_block_ = graph()->entry_block();
- Scope* scope = info->scope();
+ Scope* scope = info()->scope();
+ if (scope->HasIllegalRedeclaration()) {
+ Bailout("function with illegal redeclaration");
+ return NULL;
+ }
SetupScope(scope);
VisitDeclarations(scope->declarations());
-
- AddInstruction(new HStackCheck());
-
- ZoneList<Statement*>* stmts = info->function()->body();
- HSubgraph* body = CreateGotoSubgraph(environment());
- current_block()->Goto(body->entry_block());
- AddToSubgraph(body, stmts);
+ AddInstruction(new(zone()) HStackCheck(HStackCheck::kFunctionEntry));
+
+ // Add an edge to the body entry. This is warty: the graph's start
+ // environment will be used by the Lithium translation as the initial
+ // environment on graph entry, but it has now been mutated by the
+ // Hydrogen translation of the instructions in the start block. This
+ // environment uses values which have not been defined yet. These
+ // Hydrogen instructions will then be replayed by the Lithium
+ // translation, so they cannot have an environment effect. The edge to
+ // the body's entry block (along with some special logic for the start
+ // block in HInstruction::InsertAfter) seals the start block from
+ // getting unwanted instructions inserted.
+ //
+ // TODO(kmillikin): Fix this. Stop mutating the initial environment.
+ // Make the Hydrogen instructions in the initial block into Hydrogen
+ // values (but not instructions), present in the initial environment and
+ // not replayed by the Lithium translation.
+ HEnvironment* initial_env = environment()->CopyWithoutHistory();
+ HBasicBlock* body_entry = CreateBasicBlock(initial_env);
+ current_block()->Goto(body_entry);
+ body_entry->SetJoinId(AstNode::kFunctionEntryId);
+ set_current_block(body_entry);
+ VisitStatements(info()->function()->body());
if (HasStackOverflow()) return NULL;
- body->entry_block()->SetJoinId(info->function()->id());
- set_current_block(body->exit_block());
- if (graph()->exit_block() != NULL) {
- HReturn* instr = new HReturn(graph()->GetConstantUndefined());
- graph()->exit_block()->FinishExit(instr);
- graph()->set_exit_block(NULL);
+ if (current_block() != NULL) {
+ HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
+ current_block()->FinishExit(instr);
+ set_current_block(NULL);
}
}
- graph_->OrderBlocks();
- graph_->AssignDominators();
- graph_->EliminateRedundantPhis();
- if (!graph_->CollectPhis()) {
- Bailout("Phi-use of arguments object");
+ graph()->OrderBlocks();
+ graph()->AssignDominators();
+ graph()->EliminateRedundantPhis();
+ if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
+ if (!graph()->CollectPhis()) {
+ Bailout("Unsupported phi-use");
return NULL;
}
- HInferRepresentation rep(graph_);
+ HInferRepresentation rep(graph());
rep.Analyze();
if (FLAG_use_range) {
- HRangeAnalysis rangeAnalysis(graph_);
+ HRangeAnalysis rangeAnalysis(graph());
rangeAnalysis.Analyze();
}
- graph_->InitializeInferredTypes();
- graph_->Canonicalize();
- graph_->InsertRepresentationChanges();
- graph_->ComputeMinusZeroChecks();
+ graph()->InitializeInferredTypes();
+ graph()->Canonicalize();
+ graph()->MarkDeoptimizeOnUndefined();
+ graph()->InsertRepresentationChanges();
+ graph()->ComputeMinusZeroChecks();
// Eliminate redundant stack checks on backwards branches.
- HStackCheckEliminator sce(graph_);
+ HStackCheckEliminator sce(graph());
sce.Process();
// Perform common subexpression elimination and loop-invariant code motion.
if (FLAG_use_gvn) {
- HPhase phase("Global value numbering", graph_);
- HGlobalValueNumberer gvn(graph_);
+ HPhase phase("Global value numbering", graph());
+ HGlobalValueNumberer gvn(graph(), info());
gvn.Analyze();
}
- return graph_;
-}
-
+ // Replace the results of check instructions with the original value, if the
+ // result is used. This is safe now, since we don't do code motion after this
+ // point. It enables better register allocation since the value produced by
+ // check instructions is really a copy of the original value.
+ graph()->ReplaceCheckedValues();
-void HGraphBuilder::AddToSubgraph(HSubgraph* graph, Statement* stmt) {
- SubgraphScope scope(this, graph);
- Visit(stmt);
+ return graph();
}
-void HGraphBuilder::AddToSubgraph(HSubgraph* graph, Expression* expr) {
- SubgraphScope scope(this, graph);
- VisitForValue(expr);
-}
-
-
-void HGraphBuilder::AddToSubgraph(HSubgraph* graph,
- ZoneList<Statement*>* stmts) {
- SubgraphScope scope(this, graph);
- VisitStatements(stmts);
+void HGraph::ReplaceCheckedValues() {
+ HPhase phase("Replace checked values", this);
+ for (int i = 0; i < blocks()->length(); ++i) {
+ HInstruction* instr = blocks()->at(i)->first();
+ while (instr != NULL) {
+ if (instr->IsBoundsCheck()) {
+ // Replace all uses of the checked value with the original input.
+ ASSERT(instr->UseCount() > 0);
+ instr->ReplaceAllUsesWith(HBoundsCheck::cast(instr)->index());
+ }
+ instr = instr->next();
+ }
+ }
}
@@ -2202,9 +2316,9 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
}
-void HGraphBuilder::AddSimulate(int id) {
+void HGraphBuilder::AddSimulate(int ast_id) {
ASSERT(current_block() != NULL);
- current_block()->AddSimulate(id);
+ current_block()->AddSimulate(ast_id);
}
@@ -2229,54 +2343,55 @@ HInstruction* HGraphBuilder::PreProcessCall(HCall<V>* call) {
}
while (!arguments.is_empty()) {
- AddInstruction(new HPushArgument(arguments.RemoveLast()));
+ AddInstruction(new(zone()) HPushArgument(arguments.RemoveLast()));
}
return call;
}
void HGraphBuilder::SetupScope(Scope* scope) {
- // We don't yet handle the function name for named function expressions.
- if (scope->function() != NULL) BAILOUT("named function expression");
-
- HConstant* undefined_constant =
- new HConstant(Factory::undefined_value(), Representation::Tagged());
+ HConstant* undefined_constant = new(zone()) HConstant(
+ isolate()->factory()->undefined_value(), Representation::Tagged());
AddInstruction(undefined_constant);
graph_->set_undefined_constant(undefined_constant);
// Set the initial values of parameters including "this". "This" has
// parameter index 0.
- int count = scope->num_parameters() + 1;
- for (int i = 0; i < count; ++i) {
- HInstruction* parameter = AddInstruction(new HParameter(i));
+ ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
+
+ for (int i = 0; i < environment()->parameter_count(); ++i) {
+ HInstruction* parameter = AddInstruction(new(zone()) HParameter(i));
environment()->Bind(i, parameter);
}
- // Set the initial values of stack-allocated locals.
- for (int i = count; i < environment()->length(); ++i) {
+ // First special is HContext.
+ HInstruction* context = AddInstruction(new(zone()) HContext);
+ environment()->BindContext(context);
+
+ // Initialize specials and locals to undefined.
+ for (int i = environment()->parameter_count() + 1;
+ i < environment()->length();
+ ++i) {
environment()->Bind(i, undefined_constant);
}
// Handle the arguments and arguments shadow variables specially (they do
// not have declarations).
if (scope->arguments() != NULL) {
- if (!scope->arguments()->IsStackAllocated() ||
- !scope->arguments_shadow()->IsStackAllocated()) {
- BAILOUT("context-allocated arguments");
+ if (!scope->arguments()->IsStackAllocated()) {
+ return Bailout("context-allocated arguments");
}
- HArgumentsObject* object = new HArgumentsObject;
+ HArgumentsObject* object = new(zone()) HArgumentsObject;
AddInstruction(object);
graph()->SetArgumentsObject(object);
environment()->Bind(scope->arguments(), object);
- environment()->Bind(scope->arguments_shadow(), object);
}
}
void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
- Visit(statements->at(i));
- if (HasStackOverflow() || current_block() == NULL) break;
+ CHECK_ALIVE(Visit(statements->at(i)));
}
}
@@ -2288,42 +2403,7 @@ HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
}
-HSubgraph* HGraphBuilder::CreateInlinedSubgraph(HEnvironment* outer,
- Handle<JSFunction> target,
- FunctionLiteral* function) {
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner =
- outer->CopyForInlining(target, function, true, undefined);
- HSubgraph* subgraph = new HSubgraph(graph());
- subgraph->Initialize(CreateBasicBlock(inner));
- return subgraph;
-}
-
-
-HSubgraph* HGraphBuilder::CreateGotoSubgraph(HEnvironment* env) {
- HSubgraph* subgraph = new HSubgraph(graph());
- HEnvironment* new_env = env->CopyWithoutHistory();
- subgraph->Initialize(CreateBasicBlock(new_env));
- return subgraph;
-}
-
-
-HSubgraph* HGraphBuilder::CreateEmptySubgraph() {
- HSubgraph* subgraph = new HSubgraph(graph());
- subgraph->Initialize(graph()->CreateBasicBlock());
- return subgraph;
-}
-
-
-HSubgraph* HGraphBuilder::CreateBranchSubgraph(HEnvironment* env) {
- HSubgraph* subgraph = new HSubgraph(graph());
- HEnvironment* new_env = env->Copy();
- subgraph->Initialize(CreateBasicBlock(new_env));
- return subgraph;
-}
-
-
-HBasicBlock* HGraphBuilder::CreateLoopHeader() {
+HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
HBasicBlock* header = graph()->CreateBasicBlock();
HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
header->SetInitialEnvironment(entry_env);
@@ -2333,35 +2413,41 @@ HBasicBlock* HGraphBuilder::CreateLoopHeader() {
void HGraphBuilder::VisitBlock(Block* stmt) {
- if (stmt->labels() != NULL) {
- HSubgraph* block_graph = CreateGotoSubgraph(environment());
- current_block()->Goto(block_graph->entry_block());
- block_graph->entry_block()->SetJoinId(stmt->EntryId());
- BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- ADD_TO_SUBGRAPH(block_graph, stmt->statements());
- }
- HBasicBlock* break_block = break_info.break_block();
- if (break_block != NULL) break_block->SetJoinId(stmt->EntryId());
- set_current_block(CreateJoin(block_graph->exit_block(),
- break_block,
- stmt->ExitId()));
- } else {
- VisitStatements(stmt->statements());
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ BreakAndContinueInfo break_info(stmt);
+ { BreakAndContinueScope push(&break_info, this);
+ CHECK_BAILOUT(VisitStatements(stmt->statements()));
+ }
+ HBasicBlock* break_block = break_info.break_block();
+ if (break_block != NULL) {
+ if (current_block() != NULL) current_block()->Goto(break_block);
+ break_block->SetJoinId(stmt->ExitId());
+ set_current_block(break_block);
}
}
void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
VisitForEffect(stmt->expression());
}
void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
}
void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
if (stmt->condition()->ToBooleanIsTrue()) {
AddSimulate(stmt->ThenId());
Visit(stmt->then_statement());
@@ -2369,21 +2455,30 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
AddSimulate(stmt->ElseId());
Visit(stmt->else_statement());
} else {
- HSubgraph* then_graph = CreateEmptySubgraph();
- HSubgraph* else_graph = CreateEmptySubgraph();
- VISIT_FOR_CONTROL(stmt->condition(),
- then_graph->entry_block(),
- else_graph->entry_block());
+ HBasicBlock* cond_true = graph()->CreateBasicBlock();
+ HBasicBlock* cond_false = graph()->CreateBasicBlock();
+ CHECK_BAILOUT(VisitForControl(stmt->condition(), cond_true, cond_false));
- then_graph->entry_block()->SetJoinId(stmt->ThenId());
- ADD_TO_SUBGRAPH(then_graph, stmt->then_statement());
+ if (cond_true->HasPredecessor()) {
+ cond_true->SetJoinId(stmt->ThenId());
+ set_current_block(cond_true);
+ CHECK_BAILOUT(Visit(stmt->then_statement()));
+ cond_true = current_block();
+ } else {
+ cond_true = NULL;
+ }
- else_graph->entry_block()->SetJoinId(stmt->ElseId());
- ADD_TO_SUBGRAPH(else_graph, stmt->else_statement());
+ if (cond_false->HasPredecessor()) {
+ cond_false->SetJoinId(stmt->ElseId());
+ set_current_block(cond_false);
+ CHECK_BAILOUT(Visit(stmt->else_statement()));
+ cond_false = current_block();
+ } else {
+ cond_false = NULL;
+ }
- set_current_block(CreateJoin(then_graph->exit_block(),
- else_graph->exit_block(),
- stmt->id()));
+ HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->IfId());
+ set_current_block(join);
}
}
@@ -2420,6 +2515,9 @@ HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
HBasicBlock* continue_block = break_scope()->Get(stmt->target(), CONTINUE);
current_block()->Goto(continue_block);
set_current_block(NULL);
@@ -2427,6 +2525,9 @@ void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
HBasicBlock* break_block = break_scope()->Get(stmt->target(), BREAK);
current_block()->Goto(break_block);
set_current_block(NULL);
@@ -2434,12 +2535,15 @@ void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
AstContext* context = call_context();
if (context == NULL) {
// Not an inlined return, so an actual one.
- VISIT_FOR_VALUE(stmt->expression());
+ CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* result = environment()->Pop();
- current_block()->FinishExit(new HReturn(result));
+ current_block()->FinishExit(new(zone()) HReturn(result));
set_current_block(NULL);
} else {
// Return from an inlined function, visit the subexpression in the
@@ -2449,213 +2553,172 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
VisitForControl(stmt->expression(),
test->if_true(),
test->if_false());
+ } else if (context->IsEffect()) {
+ CHECK_ALIVE(VisitForEffect(stmt->expression()));
+ current_block()->Goto(function_return());
} else {
- HValue* return_value = NULL;
- if (context->IsEffect()) {
- VISIT_FOR_EFFECT(stmt->expression());
- return_value = graph()->GetConstantUndefined();
- } else {
- ASSERT(context->IsValue());
- VISIT_FOR_VALUE(stmt->expression());
- return_value = environment()->Pop();
- }
- current_block()->AddLeaveInlined(return_value,
- function_return_);
- set_current_block(NULL);
+ ASSERT(context->IsValue());
+ CHECK_ALIVE(VisitForValue(stmt->expression()));
+ HValue* return_value = environment()->Pop();
+ current_block()->AddLeaveInlined(return_value, function_return());
}
+ set_current_block(NULL);
}
}
-void HGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
- BAILOUT("WithEnterStatement");
-}
-
-
-void HGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
- BAILOUT("WithExitStatement");
+void HGraphBuilder::VisitEnterWithContextStatement(
+ EnterWithContextStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ return Bailout("EnterWithContextStatement");
}
-HCompare* HGraphBuilder::BuildSwitchCompare(HSubgraph* subgraph,
- HValue* switch_value,
- CaseClause* clause) {
- AddToSubgraph(subgraph, clause->label());
- if (HasStackOverflow()) return NULL;
- HValue* clause_value = subgraph->exit_block()->last_environment()->Pop();
- HCompare* compare = new HCompare(switch_value,
- clause_value,
- Token::EQ_STRICT);
- compare->SetInputRepresentation(Representation::Integer32());
- subgraph->exit_block()->AddInstruction(compare);
- return compare;
+void HGraphBuilder::VisitExitContextStatement(ExitContextStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ return Bailout("ExitContextStatement");
}
void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
- VISIT_FOR_VALUE(stmt->tag());
- // TODO(3168478): simulate added for tag should be enough.
- AddSimulate(stmt->EntryId());
- HValue* switch_value = Pop();
-
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ // We only optimize switch statements with smi-literal smi comparisons,
+ // with a bounded number of clauses.
+ const int kCaseClauseLimit = 128;
ZoneList<CaseClause*>* clauses = stmt->cases();
- int num_clauses = clauses->length();
- if (num_clauses == 0) return;
- if (num_clauses > 128) BAILOUT("SwitchStatement: too many clauses");
-
- int num_smi_clauses = num_clauses;
- for (int i = 0; i < num_clauses; i++) {
- CaseClause* clause = clauses->at(i);
- if (clause->is_default()) continue;
- clause->RecordTypeFeedback(oracle());
- if (!clause->IsSmiCompare()) {
- if (i == 0) BAILOUT("SwitchStatement: no smi compares");
- // We will deoptimize if the first non-smi compare is reached.
- num_smi_clauses = i;
- break;
- }
- if (!clause->label()->IsSmiLiteral()) {
- BAILOUT("SwitchStatement: non-literal switch label");
- }
+ int clause_count = clauses->length();
+ if (clause_count > kCaseClauseLimit) {
+ return Bailout("SwitchStatement: too many clauses");
}
- // The single exit block of the whole switch statement.
- HBasicBlock* single_exit_block = graph_->CreateBasicBlock();
-
- // Build a series of empty subgraphs for the comparisons.
- // The default clause does not have a comparison subgraph.
- ZoneList<HSubgraph*> compare_graphs(num_smi_clauses);
- for (int i = 0; i < num_smi_clauses; i++) {
- if (clauses->at(i)->is_default()) {
- compare_graphs.Add(NULL);
- } else {
- compare_graphs.Add(CreateEmptySubgraph());
- }
- }
+ CHECK_ALIVE(VisitForValue(stmt->tag()));
+ AddSimulate(stmt->EntryId());
+ HValue* tag_value = Pop();
+ HBasicBlock* first_test_block = current_block();
- HSubgraph* prev_graph = current_subgraph_;
- HCompare* prev_compare_inst = NULL;
- for (int i = 0; i < num_smi_clauses; i++) {
+ // 1. Build all the tests, with dangling true branches. Unconditionally
+ // deoptimize if we encounter a non-smi comparison.
+ for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
if (clause->is_default()) continue;
-
- // Finish the previous graph by connecting it to the current.
- HSubgraph* subgraph = compare_graphs.at(i);
- if (prev_compare_inst == NULL) {
- ASSERT(prev_graph == current_subgraph_);
- prev_graph->exit_block()->Finish(new HGoto(subgraph->entry_block()));
- } else {
- HBasicBlock* empty = graph()->CreateBasicBlock();
- prev_graph->exit_block()->Finish(new HTest(prev_compare_inst,
- empty,
- subgraph->entry_block()));
+ if (!clause->label()->IsSmiLiteral()) {
+ return Bailout("SwitchStatement: non-literal switch label");
}
- // Build instructions for current subgraph.
- ASSERT(clause->IsSmiCompare());
- prev_compare_inst = BuildSwitchCompare(subgraph, switch_value, clause);
- if (HasStackOverflow()) return;
-
- prev_graph = subgraph;
- }
-
- // Finish last comparison if there was at least one comparison.
- // last_false_block is the (empty) false-block of the last comparison. If
- // there are no comparisons at all (a single default clause), it is just
- // the last block of the current subgraph.
- HBasicBlock* last_false_block = current_block();
- if (prev_graph != current_subgraph_) {
- last_false_block = graph()->CreateBasicBlock();
- HBasicBlock* empty = graph()->CreateBasicBlock();
- prev_graph->exit_block()->Finish(new HTest(prev_compare_inst,
- empty,
- last_false_block));
- }
-
- // If we have a non-smi compare clause, we deoptimize after trying
- // all the previous compares.
- if (num_smi_clauses < num_clauses) {
- last_false_block->FinishExitWithDeoptimization();
- }
+ // Unconditionally deoptimize on the first non-smi compare.
+ clause->RecordTypeFeedback(oracle());
+ if (!clause->IsSmiCompare()) {
+ // Finish with deoptimize and add uses of enviroment values to
+ // account for invisible uses.
+ current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
+ set_current_block(NULL);
+ break;
+ }
- // Build statement blocks, connect them to their comparison block and
- // to the previous statement block, if there is a fall-through.
- HSubgraph* previous_subgraph = NULL;
- for (int i = 0; i < num_clauses; i++) {
- CaseClause* clause = clauses->at(i);
- // Subgraph for the statements of the clause is only created when
- // it's reachable either from the corresponding compare or as a
- // fall-through from previous statements.
- HSubgraph* subgraph = NULL;
+ // Otherwise generate a compare and branch.
+ CHECK_ALIVE(VisitForValue(clause->label()));
+ HValue* label_value = Pop();
+ HCompare* compare =
+ new(zone()) HCompare(tag_value, label_value, Token::EQ_STRICT);
+ compare->SetInputRepresentation(Representation::Integer32());
+ ASSERT(!compare->HasSideEffects());
+ AddInstruction(compare);
+ HBasicBlock* body_block = graph()->CreateBasicBlock();
+ HBasicBlock* next_test_block = graph()->CreateBasicBlock();
+ HTest* branch = new(zone()) HTest(compare, body_block, next_test_block);
+ current_block()->Finish(branch);
+ set_current_block(next_test_block);
+ }
+
+ // Save the current block to use for the default or to join with the
+ // exit. This block is NULL if we deoptimized.
+ HBasicBlock* last_block = current_block();
+
+ // 2. Loop over the clauses and the linked list of tests in lockstep,
+ // translating the clause bodies.
+ HBasicBlock* curr_test_block = first_test_block;
+ HBasicBlock* fall_through_block = NULL;
+ BreakAndContinueInfo break_info(stmt);
+ { BreakAndContinueScope push(&break_info, this);
+ for (int i = 0; i < clause_count; ++i) {
+ CaseClause* clause = clauses->at(i);
- if (i < num_smi_clauses) {
+ // Identify the block where normal (non-fall-through) control flow
+ // goes to.
+ HBasicBlock* normal_block = NULL;
if (clause->is_default()) {
- if (!last_false_block->IsFinished()) {
- // Default clause: Connect it to the last false block.
- subgraph = CreateEmptySubgraph();
- last_false_block->Finish(new HGoto(subgraph->entry_block()));
+ if (last_block != NULL) {
+ normal_block = last_block;
+ last_block = NULL; // Cleared to indicate we've handled it.
}
- } else {
- ASSERT(clause->IsSmiCompare());
- // Connect with the corresponding comparison.
- subgraph = CreateEmptySubgraph();
- HBasicBlock* empty =
- compare_graphs.at(i)->exit_block()->end()->FirstSuccessor();
- empty->Finish(new HGoto(subgraph->entry_block()));
+ } else if (!curr_test_block->end()->IsDeoptimize()) {
+ normal_block = curr_test_block->end()->FirstSuccessor();
+ curr_test_block = curr_test_block->end()->SecondSuccessor();
}
- }
- // Check for fall-through from previous statement block.
- if (previous_subgraph != NULL && previous_subgraph->exit_block() != NULL) {
- if (subgraph == NULL) subgraph = CreateEmptySubgraph();
- previous_subgraph->exit_block()->
- Finish(new HGoto(subgraph->entry_block()));
- }
-
- if (subgraph != NULL) {
- BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- ADD_TO_SUBGRAPH(subgraph, clause->statements());
- }
- if (break_info.break_block() != NULL) {
- break_info.break_block()->SetJoinId(stmt->ExitId());
- break_info.break_block()->Finish(new HGoto(single_exit_block));
+ // Identify a block to emit the body into.
+ if (normal_block == NULL) {
+ if (fall_through_block == NULL) {
+ // (a) Unreachable.
+ if (clause->is_default()) {
+ continue; // Might still be reachable clause bodies.
+ } else {
+ break;
+ }
+ } else {
+ // (b) Reachable only as fall through.
+ set_current_block(fall_through_block);
+ }
+ } else if (fall_through_block == NULL) {
+ // (c) Reachable only normally.
+ set_current_block(normal_block);
+ } else {
+ // (d) Reachable both ways.
+ HBasicBlock* join = CreateJoin(fall_through_block,
+ normal_block,
+ clause->EntryId());
+ set_current_block(join);
}
- }
-
- previous_subgraph = subgraph;
- }
- // If the last statement block has a fall-through, connect it to the
- // single exit block.
- if (previous_subgraph != NULL && previous_subgraph->exit_block() != NULL) {
- previous_subgraph->exit_block()->Finish(new HGoto(single_exit_block));
- }
-
- // If there is no default clause finish the last comparison's false target.
- if (!last_false_block->IsFinished()) {
- last_false_block->Finish(new HGoto(single_exit_block));
+ CHECK_BAILOUT(VisitStatements(clause->statements()));
+ fall_through_block = current_block();
+ }
}
- if (single_exit_block->HasPredecessor()) {
- set_current_block(single_exit_block);
+ // Create an up-to-3-way join. Use the break block if it exists since
+ // it's already a join block.
+ HBasicBlock* break_block = break_info.break_block();
+ if (break_block == NULL) {
+ set_current_block(CreateJoin(fall_through_block,
+ last_block,
+ stmt->ExitId()));
} else {
- set_current_block(NULL);
+ if (fall_through_block != NULL) fall_through_block->Goto(break_block);
+ if (last_block != NULL) last_block->Goto(break_block);
+ break_block->SetJoinId(stmt->ExitId());
+ set_current_block(break_block);
}
}
-bool HGraph::HasOsrEntryAt(IterationStatement* statement) {
+
+bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
return statement->OsrEntryId() == info()->osr_ast_id();
}
void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
- if (!graph()->HasOsrEntryAt(statement)) return;
+ if (!HasOsrEntryAt(statement)) return;
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
HBasicBlock* osr_entry = graph()->CreateBasicBlock();
HValue* true_value = graph()->GetConstantTrue();
- HTest* test = new HTest(true_value, non_osr_entry, osr_entry);
+ HTest* test = new(zone()) HTest(true_value, non_osr_entry, osr_entry);
current_block()->Finish(test);
HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
@@ -2665,312 +2728,411 @@ void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
int osr_entry_id = statement->OsrEntryId();
// We want the correct environment at the OsrEntry instruction. Build
// it explicitly. The expression stack should be empty.
- int count = environment()->length();
- ASSERT(count ==
- (environment()->parameter_count() + environment()->local_count()));
- for (int i = 0; i < count; ++i) {
- HUnknownOSRValue* unknown = new HUnknownOSRValue;
- AddInstruction(unknown);
- environment()->Bind(i, unknown);
+ ASSERT(environment()->ExpressionStackIsEmpty());
+ for (int i = 0; i < environment()->length(); ++i) {
+ HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
+ AddInstruction(osr_value);
+ environment()->Bind(i, osr_value);
}
AddSimulate(osr_entry_id);
- AddInstruction(new HOsrEntry(osr_entry_id));
+ AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
+ HContext* context = new(zone()) HContext;
+ AddInstruction(context);
+ environment()->BindContext(context);
current_block()->Goto(loop_predecessor);
loop_predecessor->SetJoinId(statement->EntryId());
set_current_block(loop_predecessor);
}
+void HGraphBuilder::VisitLoopBody(Statement* body,
+ HBasicBlock* loop_entry,
+ BreakAndContinueInfo* break_info) {
+ BreakAndContinueScope push(break_info, this);
+ HStackCheck* stack_check =
+ new(zone()) HStackCheck(HStackCheck::kBackwardsBranch);
+ AddInstruction(stack_check);
+ ASSERT(loop_entry->IsLoopHeader());
+ loop_entry->loop_information()->set_stack_check(stack_check);
+ CHECK_BAILOUT(Visit(body));
+}
+
+
void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeader();
- current_block()->Goto(loop_entry, false);
+ HBasicBlock* loop_entry = CreateLoopHeaderBlock();
+ current_block()->Goto(loop_entry);
set_current_block(loop_entry);
BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- Visit(stmt->body());
- CHECK_BAILOUT;
- }
+ CHECK_BAILOUT(VisitLoopBody(stmt->body(), loop_entry, &break_info));
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
- HBasicBlock* loop_exit = NULL;
- if (body_exit == NULL || stmt->cond()->ToBooleanIsTrue()) {
- loop_exit = CreateEndless(stmt,
- loop_entry,
- body_exit,
- break_info.break_block());
- } else {
+ HBasicBlock* loop_successor = NULL;
+ if (body_exit != NULL && !stmt->cond()->ToBooleanIsTrue()) {
set_current_block(body_exit);
- HBasicBlock* cond_true = graph()->CreateBasicBlock();
- HBasicBlock* cond_false = graph()->CreateBasicBlock();
- VISIT_FOR_CONTROL(stmt->cond(), cond_true, cond_false);
- cond_true->SetJoinId(stmt->BackEdgeId());
- cond_false->SetJoinId(stmt->ExitId());
- loop_exit = CreateDoWhile(stmt,
- loop_entry,
- cond_true,
- cond_false,
- break_info.break_block());
+ // The block for a true condition, the actual predecessor block of the
+ // back edge.
+ body_exit = graph()->CreateBasicBlock();
+ loop_successor = graph()->CreateBasicBlock();
+ CHECK_BAILOUT(VisitForControl(stmt->cond(), body_exit, loop_successor));
+ if (body_exit->HasPredecessor()) {
+ body_exit->SetJoinId(stmt->BackEdgeId());
+ } else {
+ body_exit = NULL;
+ }
+ if (loop_successor->HasPredecessor()) {
+ loop_successor->SetJoinId(stmt->ExitId());
+ } else {
+ loop_successor = NULL;
+ }
}
+ HBasicBlock* loop_exit = CreateLoop(stmt,
+ loop_entry,
+ body_exit,
+ loop_successor,
+ break_info.break_block());
set_current_block(loop_exit);
}
void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeader();
- current_block()->Goto(loop_entry, false);
+ HBasicBlock* loop_entry = CreateLoopHeaderBlock();
+ current_block()->Goto(loop_entry);
set_current_block(loop_entry);
// If the condition is constant true, do not generate a branch.
- HBasicBlock* cond_false = NULL;
+ HBasicBlock* loop_successor = NULL;
if (!stmt->cond()->ToBooleanIsTrue()) {
- HBasicBlock* cond_true = graph()->CreateBasicBlock();
- cond_false = graph()->CreateBasicBlock();
- VISIT_FOR_CONTROL(stmt->cond(), cond_true, cond_false);
- cond_true->SetJoinId(stmt->BodyId());
- cond_false->SetJoinId(stmt->ExitId());
- set_current_block(cond_true);
+ HBasicBlock* body_entry = graph()->CreateBasicBlock();
+ loop_successor = graph()->CreateBasicBlock();
+ CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+ if (body_entry->HasPredecessor()) {
+ body_entry->SetJoinId(stmt->BodyId());
+ set_current_block(body_entry);
+ }
+ if (loop_successor->HasPredecessor()) {
+ loop_successor->SetJoinId(stmt->ExitId());
+ } else {
+ loop_successor = NULL;
+ }
}
BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- Visit(stmt->body());
- CHECK_BAILOUT;
+ if (current_block() != NULL) {
+ BreakAndContinueScope push(&break_info, this);
+ CHECK_BAILOUT(VisitLoopBody(stmt->body(), loop_entry, &break_info));
}
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
- HBasicBlock* loop_exit = NULL;
- if (stmt->cond()->ToBooleanIsTrue()) {
- // TODO(fschneider): Implement peeling for endless loops as well.
- loop_exit = CreateEndless(stmt,
- loop_entry,
- body_exit,
- break_info.break_block());
- } else {
- loop_exit = CreateWhile(stmt,
- loop_entry,
- cond_false,
- body_exit,
- break_info.break_block());
- }
+ HBasicBlock* loop_exit = CreateLoop(stmt,
+ loop_entry,
+ body_exit,
+ loop_successor,
+ break_info.break_block());
set_current_block(loop_exit);
}
void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
- // Only visit the init statement in the peeled part of the loop.
- if (stmt->init() != NULL && peeled_statement_ != stmt) {
- Visit(stmt->init());
- CHECK_BAILOUT;
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ if (stmt->init() != NULL) {
+ CHECK_ALIVE(Visit(stmt->init()));
}
ASSERT(current_block() != NULL);
PreProcessOsrEntry(stmt);
- HBasicBlock* loop_entry = CreateLoopHeader();
- current_block()->Goto(loop_entry, false);
+ HBasicBlock* loop_entry = CreateLoopHeaderBlock();
+ current_block()->Goto(loop_entry);
set_current_block(loop_entry);
- HBasicBlock* cond_false = NULL;
+ HBasicBlock* loop_successor = NULL;
if (stmt->cond() != NULL) {
- HBasicBlock* cond_true = graph()->CreateBasicBlock();
- cond_false = graph()->CreateBasicBlock();
- VISIT_FOR_CONTROL(stmt->cond(), cond_true, cond_false);
- cond_true->SetJoinId(stmt->BodyId());
- cond_false->SetJoinId(stmt->ExitId());
- set_current_block(cond_true);
+ HBasicBlock* body_entry = graph()->CreateBasicBlock();
+ loop_successor = graph()->CreateBasicBlock();
+ CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+ if (body_entry->HasPredecessor()) {
+ body_entry->SetJoinId(stmt->BodyId());
+ set_current_block(body_entry);
+ }
+ if (loop_successor->HasPredecessor()) {
+ loop_successor->SetJoinId(stmt->ExitId());
+ } else {
+ loop_successor = NULL;
+ }
}
BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- Visit(stmt->body());
- CHECK_BAILOUT;
+ if (current_block() != NULL) {
+ BreakAndContinueScope push(&break_info, this);
+ CHECK_BAILOUT(VisitLoopBody(stmt->body(), loop_entry, &break_info));
}
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
if (stmt->next() != NULL && body_exit != NULL) {
set_current_block(body_exit);
- Visit(stmt->next());
- CHECK_BAILOUT;
+ CHECK_BAILOUT(Visit(stmt->next()));
body_exit = current_block();
}
- HBasicBlock* loop_exit = NULL;
- if (stmt->cond() == NULL) {
- loop_exit = CreateEndless(stmt,
- loop_entry,
- body_exit,
- break_info.break_block());
- } else {
- loop_exit = CreateWhile(stmt,
- loop_entry,
- cond_false,
- body_exit,
- break_info.break_block());
- }
+ HBasicBlock* loop_exit = CreateLoop(stmt,
+ loop_entry,
+ body_exit,
+ loop_successor,
+ break_info.break_block());
set_current_block(loop_exit);
}
void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
- BAILOUT("ForInStatement");
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ return Bailout("ForInStatement");
}
void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
- BAILOUT("TryCatchStatement");
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ return Bailout("TryCatchStatement");
}
void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- BAILOUT("TryFinallyStatement");
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ return Bailout("TryFinallyStatement");
}
void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- BAILOUT("DebuggerStatement");
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ return Bailout("DebuggerStatement");
+}
+
+
+static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
+ Code* unoptimized_code, FunctionLiteral* expr) {
+ int start_position = expr->start_position();
+ RelocIterator it(unoptimized_code);
+ for (;!it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
+ Object* obj = rinfo->target_object();
+ if (obj->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+ if (shared->start_position() == start_position) {
+ return Handle<SharedFunctionInfo>(shared);
+ }
+ }
+ }
+
+ return Handle<SharedFunctionInfo>();
}
void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
Handle<SharedFunctionInfo> shared_info =
- Compiler::BuildFunctionInfo(expr, graph_->info()->script());
- CHECK_BAILOUT;
+ SearchSharedFunctionInfo(info()->shared_info()->code(),
+ expr);
+ if (shared_info.is_null()) {
+ shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
+ }
+ // We also have a stack overflow if the recursive compilation did.
+ if (HasStackOverflow()) return;
HFunctionLiteral* instr =
- new HFunctionLiteral(shared_info, expr->pretenure());
+ new(zone()) HFunctionLiteral(shared_info, expr->pretenure());
ast_context()->ReturnInstruction(instr, expr->id());
}
void HGraphBuilder::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
- BAILOUT("SharedFunctionInfoLiteral");
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ return Bailout("SharedFunctionInfoLiteral");
}
void HGraphBuilder::VisitConditional(Conditional* expr) {
- HSubgraph* then_graph = CreateEmptySubgraph();
- HSubgraph* else_graph = CreateEmptySubgraph();
- VISIT_FOR_CONTROL(expr->condition(),
- then_graph->entry_block(),
- else_graph->entry_block());
-
- then_graph->entry_block()->SetJoinId(expr->ThenId());
- ADD_TO_SUBGRAPH(then_graph, expr->then_expression());
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ HBasicBlock* cond_true = graph()->CreateBasicBlock();
+ HBasicBlock* cond_false = graph()->CreateBasicBlock();
+ CHECK_BAILOUT(VisitForControl(expr->condition(), cond_true, cond_false));
+
+ // Visit the true and false subexpressions in the same AST context as the
+ // whole expression.
+ if (cond_true->HasPredecessor()) {
+ cond_true->SetJoinId(expr->ThenId());
+ set_current_block(cond_true);
+ CHECK_BAILOUT(Visit(expr->then_expression()));
+ cond_true = current_block();
+ } else {
+ cond_true = NULL;
+ }
- else_graph->entry_block()->SetJoinId(expr->ElseId());
- ADD_TO_SUBGRAPH(else_graph, expr->else_expression());
+ if (cond_false->HasPredecessor()) {
+ cond_false->SetJoinId(expr->ElseId());
+ set_current_block(cond_false);
+ CHECK_BAILOUT(Visit(expr->else_expression()));
+ cond_false = current_block();
+ } else {
+ cond_false = NULL;
+ }
- set_current_block(CreateJoin(then_graph->exit_block(),
- else_graph->exit_block(),
- expr->id()));
- ast_context()->ReturnValue(Pop());
+ if (!ast_context()->IsTest()) {
+ HBasicBlock* join = CreateJoin(cond_true, cond_false, expr->id());
+ set_current_block(join);
+ if (join != NULL && !ast_context()->IsEffect()) {
+ ast_context()->ReturnValue(Pop());
+ }
+ }
}
-void HGraphBuilder::LookupGlobalPropertyCell(Variable* var,
- LookupResult* lookup,
- bool is_store) {
- if (var->is_this()) {
- BAILOUT("global this reference");
- }
- if (!graph()->info()->has_global_object()) {
- BAILOUT("no global object to optimize VariableProxy");
+HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
+ Variable* var, LookupResult* lookup, bool is_store) {
+ if (var->is_this() || !info()->has_global_object()) {
+ return kUseGeneric;
}
- Handle<GlobalObject> global(graph()->info()->global_object());
+ Handle<GlobalObject> global(info()->global_object());
global->Lookup(*var->name(), lookup);
- if (!lookup->IsProperty()) {
- BAILOUT("global variable cell not yet introduced");
- }
- if (lookup->type() != NORMAL) {
- BAILOUT("global variable has accessors");
- }
- if (is_store && lookup->IsReadOnly()) {
- BAILOUT("read-only global variable");
- }
- if (lookup->holder() != *global) {
- BAILOUT("global property on prototype of global object");
+ if (!lookup->IsProperty() ||
+ lookup->type() != NORMAL ||
+ (is_store && lookup->IsReadOnly()) ||
+ lookup->holder() != *global) {
+ return kUseGeneric;
}
+
+ return kUseCell;
}
HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
ASSERT(var->IsContextSlot());
- HInstruction* context = new HContext;
- AddInstruction(context);
- int length = graph()->info()->scope()->ContextChainLength(var->scope());
+ HValue* context = environment()->LookupContext();
+ int length = info()->scope()->ContextChainLength(var->scope());
while (length-- > 0) {
- context = new HOuterContext(context);
- AddInstruction(context);
+ HInstruction* context_instruction = new(zone()) HOuterContext(context);
+ AddInstruction(context_instruction);
+ context = context_instruction;
}
return context;
}
void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
Variable* variable = expr->AsVariable();
if (variable == NULL) {
- BAILOUT("reference to rewritten variable");
+ return Bailout("reference to rewritten variable");
} else if (variable->IsStackAllocated()) {
- if (environment()->Lookup(variable)->CheckFlag(HValue::kIsArguments)) {
- BAILOUT("unsupported context for arguments object");
+ HValue* value = environment()->Lookup(variable);
+ if (variable->mode() == Variable::CONST &&
+ value == graph()->GetConstantHole()) {
+ return Bailout("reference to uninitialized const variable");
}
- ast_context()->ReturnValue(environment()->Lookup(variable));
+ ast_context()->ReturnValue(value);
} else if (variable->IsContextSlot()) {
if (variable->mode() == Variable::CONST) {
- BAILOUT("reference to const context slot");
+ return Bailout("reference to const context slot");
}
HValue* context = BuildContextChainWalk(variable);
int index = variable->AsSlot()->index();
- HLoadContextSlot* instr = new HLoadContextSlot(context, index);
+ HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, index);
ast_context()->ReturnInstruction(instr, expr->id());
} else if (variable->is_global()) {
LookupResult lookup;
- LookupGlobalPropertyCell(variable, &lookup, false);
- CHECK_BAILOUT;
+ GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, false);
- Handle<GlobalObject> global(graph()->info()->global_object());
- // TODO(3039103): Handle global property load through an IC call when access
- // checks are enabled.
- if (global->IsAccessCheckNeeded()) {
- BAILOUT("global object requires access check");
+ if (type == kUseCell &&
+ info()->global_object()->IsAccessCheckNeeded()) {
+ type = kUseGeneric;
+ }
+
+ if (type == kUseCell) {
+ Handle<GlobalObject> global(info()->global_object());
+ Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+ bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+ HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
+ ast_context()->ReturnInstruction(instr, expr->id());
+ } else {
+ HValue* context = environment()->LookupContext();
+ HGlobalObject* global_object = new(zone()) HGlobalObject(context);
+ AddInstruction(global_object);
+ HLoadGlobalGeneric* instr =
+ new(zone()) HLoadGlobalGeneric(context,
+ global_object,
+ variable->name(),
+ ast_context()->is_for_typeof());
+ instr->set_position(expr->position());
+ ASSERT(instr->HasSideEffects());
+ ast_context()->ReturnInstruction(instr, expr->id());
}
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
- HLoadGlobal* instr = new HLoadGlobal(cell, check_hole);
- ast_context()->ReturnInstruction(instr, expr->id());
} else {
- BAILOUT("reference to a variable which requires dynamic lookup");
+ return Bailout("reference to a variable which requires dynamic lookup");
}
}
void HGraphBuilder::VisitLiteral(Literal* expr) {
- HConstant* instr = new HConstant(expr->handle(), Representation::Tagged());
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ HConstant* instr =
+ new(zone()) HConstant(expr->handle(), Representation::Tagged());
ast_context()->ReturnInstruction(instr, expr->id());
}
void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- HRegExpLiteral* instr = new HRegExpLiteral(expr->pattern(),
- expr->flags(),
- expr->literal_index());
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ HRegExpLiteral* instr = new(zone()) HRegExpLiteral(expr->pattern(),
+ expr->flags(),
+ expr->literal_index());
ast_context()->ReturnInstruction(instr, expr->id());
}
void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- HContext* context = new HContext;
- AddInstruction(context);
- HObjectLiteral* literal = (new HObjectLiteral(context,
- expr->constant_properties(),
- expr->fast_elements(),
- expr->literal_index(),
- expr->depth()));
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ HValue* context = environment()->LookupContext();
+ HObjectLiteral* literal =
+ new(zone()) HObjectLiteral(context,
+ expr->constant_properties(),
+ expr->fast_elements(),
+ expr->literal_index(),
+ expr->depth(),
+ expr->has_function());
// The object is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
PushAndAdd(literal);
@@ -2991,15 +3153,20 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
if (property->emit_store()) {
- VISIT_FOR_VALUE(value);
+ CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
Handle<String> name = Handle<String>::cast(key->handle());
HStoreNamedGeneric* store =
- new HStoreNamedGeneric(context, literal, name, value);
+ new(zone()) HStoreNamedGeneric(
+ context,
+ literal,
+ name,
+ value,
+ function_strict_mode());
AddInstruction(store);
AddSimulate(key->id());
} else {
- VISIT_FOR_EFFECT(value);
+ CHECK_ALIVE(VisitForEffect(value));
}
break;
}
@@ -3007,22 +3174,37 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::PROTOTYPE:
case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
- BAILOUT("Object literal with complex property");
+ return Bailout("Object literal with complex property");
default: UNREACHABLE();
}
}
- ast_context()->ReturnValue(Pop());
+
+ if (expr->has_function()) {
+ // Return the result of the transformation to fast properties
+ // instead of the original since this operation changes the map
+ // of the object. This makes sure that the original object won't
+ // be used by other optimized code before it is transformed
+ // (e.g. because of code motion).
+ HToFastProperties* result = new(zone()) HToFastProperties(Pop());
+ AddInstruction(result);
+ ast_context()->ReturnValue(result);
+ } else {
+ ast_context()->ReturnValue(Pop());
+ }
}
void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
- HArrayLiteral* literal = new HArrayLiteral(expr->constant_elements(),
- length,
- expr->literal_index(),
- expr->depth());
+ HArrayLiteral* literal = new(zone()) HArrayLiteral(expr->constant_elements(),
+ length,
+ expr->literal_index(),
+ expr->depth());
// The array is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
PushAndAdd(literal);
@@ -3035,79 +3217,26 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
- VISIT_FOR_VALUE(subexpr);
+ CHECK_ALIVE(VisitForValue(subexpr));
HValue* value = Pop();
- if (!Smi::IsValid(i)) BAILOUT("Non-smi key in array literal");
+ if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
// Load the elements array before the first store.
if (elements == NULL) {
- elements = new HLoadElements(literal);
+ elements = new(zone()) HLoadElements(literal);
AddInstruction(elements);
}
- HValue* key = AddInstruction(new HConstant(Handle<Object>(Smi::FromInt(i)),
- Representation::Integer32()));
- AddInstruction(new HStoreKeyedFastElement(elements, key, value));
+ HValue* key = AddInstruction(
+ new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
+ Representation::Integer32()));
+ AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
AddSimulate(expr->GetIdForElement(i));
}
ast_context()->ReturnValue(Pop());
}
-void HGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- BAILOUT("CatchExtensionObject");
-}
-
-
-HBasicBlock* HGraphBuilder::BuildTypeSwitch(HValue* receiver,
- ZoneMapList* maps,
- ZoneList<HSubgraph*>* body_graphs,
- HSubgraph* default_graph,
- int join_id) {
- ASSERT(maps->length() == body_graphs->length());
- HBasicBlock* join_block = graph()->CreateBasicBlock();
- AddInstruction(new HCheckNonSmi(receiver));
-
- for (int i = 0; i < maps->length(); ++i) {
- // Build the branches, connect all the target subgraphs to the join
- // block. Use the default as a target of the last branch.
- HSubgraph* if_true = body_graphs->at(i);
- HSubgraph* if_false = (i == maps->length() - 1)
- ? default_graph
- : CreateBranchSubgraph(environment());
- HCompareMap* compare =
- new HCompareMap(receiver,
- maps->at(i),
- if_true->entry_block(),
- if_false->entry_block());
- current_block()->Finish(compare);
-
- if (if_true->exit_block() != NULL) {
- // In an effect context the value of the type switch is not needed.
- // There is no need to merge it at the join block only to discard it.
- if (ast_context()->IsEffect()) {
- if_true->exit_block()->last_environment()->Drop(1);
- }
- if_true->exit_block()->Goto(join_block);
- }
-
- set_current_block(if_false->exit_block());
- }
-
- // Connect the default if necessary.
- if (current_block() != NULL) {
- if (ast_context()->IsEffect()) {
- environment()->Drop(1);
- }
- current_block()->Goto(join_block);
- }
-
- if (join_block->predecessors()->is_empty()) return NULL;
- join_block->SetJoinId(join_id);
- return join_block;
-}
-
-
// Sets the lookup result and returns true if the store can be inlined.
static bool ComputeStoredField(Handle<Map> type,
Handle<String> name,
@@ -3140,8 +3269,8 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
LookupResult* lookup,
bool smi_and_map_check) {
if (smi_and_map_check) {
- AddInstruction(new HCheckNonSmi(object));
- AddInstruction(new HCheckMap(object, type));
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+ AddInstruction(new(zone()) HCheckMap(object, type));
}
int index = ComputeStoredFieldIndex(type, name, lookup);
@@ -3155,7 +3284,7 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
offset += FixedArray::kHeaderSize;
}
HStoreNamedField* instr =
- new HStoreNamedField(object, name, value, is_in_object, offset);
+ new(zone()) HStoreNamedField(object, name, value, is_in_object, offset);
if (lookup->type() == MAP_TRANSITION) {
Handle<Map> transition(lookup->GetTransitionMapFromMap(*type));
instr->set_transition(transition);
@@ -3170,9 +3299,13 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
Handle<String> name,
HValue* value) {
- HContext* context = new HContext;
- AddInstruction(context);
- return new HStoreNamedGeneric(context, object, name, value);
+ HValue* context = environment()->LookupContext();
+ return new(zone()) HStoreNamedGeneric(
+ context,
+ object,
+ name,
+ value,
+ function_strict_mode());
}
@@ -3203,67 +3336,74 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
HValue* value,
ZoneMapList* types,
Handle<String> name) {
- int number_of_types = Min(types->length(), kMaxStorePolymorphism);
- ZoneMapList maps(number_of_types);
- ZoneList<HSubgraph*> subgraphs(number_of_types);
- bool needs_generic = (types->length() > kMaxStorePolymorphism);
-
- // Build subgraphs for each of the specific maps.
- //
- // TODO(ager): We should recognize when the prototype chains for
- // different maps are identical. In that case we can avoid
- // repeatedly generating the same prototype map checks.
- for (int i = 0; i < number_of_types; ++i) {
+ // TODO(ager): We should recognize when the prototype chains for different
+ // maps are identical. In that case we can avoid repeatedly generating the
+ // same prototype map checks.
+ int count = 0;
+ HBasicBlock* join = NULL;
+ for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
Handle<Map> map = types->at(i);
LookupResult lookup;
if (ComputeStoredField(map, name, &lookup)) {
- HSubgraph* subgraph = CreateBranchSubgraph(environment());
- SubgraphScope scope(this, subgraph);
+ if (count == 0) {
+ AddInstruction(new(zone()) HCheckNonSmi(object)); // Only needed once.
+ join = graph()->CreateBasicBlock();
+ }
+ ++count;
+ HBasicBlock* if_true = graph()->CreateBasicBlock();
+ HBasicBlock* if_false = graph()->CreateBasicBlock();
+ HCompareMap* compare =
+ new(zone()) HCompareMap(object, map, if_true, if_false);
+ current_block()->Finish(compare);
+
+ set_current_block(if_true);
HInstruction* instr =
BuildStoreNamedField(object, name, value, map, &lookup, false);
- Push(value);
instr->set_position(expr->position());
+ // Goto will add the HSimulate for the store.
AddInstruction(instr);
- maps.Add(map);
- subgraphs.Add(subgraph);
- } else {
- needs_generic = true;
+ if (!ast_context()->IsEffect()) Push(value);
+ current_block()->Goto(join);
+
+ set_current_block(if_false);
}
}
- // If none of the properties were named fields we generate a
- // generic store.
- if (maps.length() == 0) {
+ // Finish up. Unconditionally deoptimize if we've handled all the maps we
+ // know about and do not want to handle ones we've never seen. Otherwise
+ // use a generic IC.
+ if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
+ current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+ } else {
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
- Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
- } else {
- // Build subgraph for generic store through IC.
- HSubgraph* default_graph = CreateBranchSubgraph(environment());
- { SubgraphScope scope(this, default_graph);
- if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
- default_graph->exit_block()->FinishExitWithDeoptimization();
- default_graph->set_exit_block(NULL);
- } else {
- HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
- Push(value);
- instr->set_position(expr->position());
- AddInstruction(instr);
- }
- }
- HBasicBlock* new_exit_block =
- BuildTypeSwitch(object, &maps, &subgraphs, default_graph, expr->id());
- set_current_block(new_exit_block);
- // In an effect context, we did not materialized the value in the
- // predecessor environments so there's no need to handle it here.
- if (current_block() != NULL && !ast_context()->IsEffect()) {
- ast_context()->ReturnValue(Pop());
+ if (join != NULL) {
+ if (!ast_context()->IsEffect()) Push(value);
+ current_block()->Goto(join);
+ } else {
+ // The HSimulate for the store should not see the stored value in
+ // effect contexts (it is not materialized at expr->id() in the
+ // unoptimized code).
+ if (instr->HasSideEffects()) {
+ if (ast_context()->IsEffect()) {
+ AddSimulate(expr->id());
+ } else {
+ Push(value);
+ AddSimulate(expr->id());
+ Drop(1);
+ }
+ }
+ ast_context()->ReturnValue(value);
+ return;
}
}
+
+ ASSERT(join != NULL);
+ join->SetJoinId(expr->id());
+ set_current_block(join);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
}
@@ -3271,14 +3411,14 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
expr->RecordTypeFeedback(oracle());
- VISIT_FOR_VALUE(prop->obj());
+ CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* value = NULL;
HInstruction* instr = NULL;
if (prop->key()->IsPropertyName()) {
// Named store.
- VISIT_FOR_VALUE(expr->value());
+ CHECK_ALIVE(VisitForValue(expr->value()));
value = Pop();
HValue* object = Pop();
@@ -3302,28 +3442,22 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
} else {
// Keyed store.
- VISIT_FOR_VALUE(prop->key());
- VISIT_FOR_VALUE(expr->value());
+ CHECK_ALIVE(VisitForValue(prop->key()));
+ CHECK_ALIVE(VisitForValue(expr->value()));
value = Pop();
HValue* key = Pop();
HValue* object = Pop();
-
- if (expr->IsMonomorphic()) {
- Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
- // An object has either fast elements or pixel array elements, but never
- // both. Pixel array maps that are assigned to pixel array elements are
- // always created with the fast elements flag cleared.
- if (receiver_type->has_pixel_array_elements()) {
- instr = BuildStoreKeyedPixelArrayElement(object, key, value, expr);
- } else if (receiver_type->has_fast_elements()) {
- instr = BuildStoreKeyedFastElement(object, key, value, expr);
- }
- }
- if (instr == NULL) {
- instr = BuildStoreKeyedGeneric(object, key, value);
- }
+ bool has_side_effects = false;
+ HandleKeyedElementAccess(object, key, value, expr, expr->AssignmentId(),
+ expr->position(),
+ true, // is_store
+ &has_side_effects);
+ Push(value);
+ ASSERT(has_side_effects); // Stores always have side effects.
+ AddSimulate(expr->AssignmentId());
+ ast_context()->ReturnValue(Pop());
+ return;
}
-
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
@@ -3340,16 +3474,30 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
int position,
int ast_id) {
LookupResult lookup;
- LookupGlobalPropertyCell(var, &lookup, true);
- CHECK_BAILOUT;
-
- bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
- Handle<GlobalObject> global(graph()->info()->global_object());
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- HInstruction* instr = new HStoreGlobal(value, cell, check_hole);
- instr->set_position(position);
- AddInstruction(instr);
- if (instr->HasSideEffects()) AddSimulate(ast_id);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
+ if (type == kUseCell) {
+ bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+ Handle<GlobalObject> global(info()->global_object());
+ Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+ HInstruction* instr = new(zone()) HStoreGlobalCell(value, cell, check_hole);
+ instr->set_position(position);
+ AddInstruction(instr);
+ if (instr->HasSideEffects()) AddSimulate(ast_id);
+ } else {
+ HValue* context = environment()->LookupContext();
+ HGlobalObject* global_object = new(zone()) HGlobalObject(context);
+ AddInstruction(global_object);
+ HStoreGlobalGeneric* instr =
+ new(zone()) HStoreGlobalGeneric(context,
+ global_object,
+ var->name(),
+ value,
+ function_strict_mode());
+ instr->set_position(position);
+ AddInstruction(instr);
+ ASSERT(instr->HasSideEffects());
+ if (instr->HasSideEffects()) AddSimulate(ast_id);
+ }
}
@@ -3365,7 +3513,11 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
BinaryOperation* operation = expr->binary_operation();
if (var != NULL) {
- VISIT_FOR_VALUE(operation);
+ if (var->mode() == Variable::CONST) {
+ return Bailout("unsupported const compound assignment");
+ }
+
+ CHECK_ALIVE(VisitForValue(operation));
if (var->is_global()) {
HandleGlobalVariableAssignment(var,
@@ -3375,13 +3527,28 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
} else if (var->IsStackAllocated()) {
Bind(var, Top());
} else if (var->IsContextSlot()) {
+ // Bail out if we try to mutate a parameter value in a function using
+ // the arguments object. We do not (yet) correctly handle the
+ // arguments property of the function.
+ if (info()->scope()->arguments() != NULL) {
+ // Parameters will rewrite to context slots. We have no direct way
+ // to detect that the variable is a parameter.
+ int count = info()->scope()->num_parameters();
+ for (int i = 0; i < count; ++i) {
+ if (var == info()->scope()->parameter(i)) {
+ Bailout("assignment to parameter, function uses arguments object");
+ }
+ }
+ }
+
HValue* context = BuildContextChainWalk(var);
int index = var->AsSlot()->index();
- HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
+ HStoreContextSlot* instr =
+ new(zone()) HStoreContextSlot(context, index, Top());
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
} else {
- BAILOUT("compound assignment to lookup slot");
+ return Bailout("compound assignment to lookup slot");
}
ast_context()->ReturnValue(Pop());
@@ -3390,7 +3557,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
if (prop->key()->IsPropertyName()) {
// Named property.
- VISIT_FOR_VALUE(prop->obj());
+ CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* obj = Top();
HInstruction* load = NULL;
@@ -3404,7 +3571,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
PushAndAdd(load);
if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
- VISIT_FOR_VALUE(expr->value());
+ CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
HValue* left = Pop();
@@ -3422,20 +3589,21 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
} else {
// Keyed property.
- VISIT_FOR_VALUE(prop->obj());
- VISIT_FOR_VALUE(prop->key());
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ CHECK_ALIVE(VisitForValue(prop->key()));
HValue* obj = environment()->ExpressionStackAt(1);
HValue* key = environment()->ExpressionStackAt(0);
- bool is_fast_elements = prop->IsMonomorphic() &&
- prop->GetMonomorphicReceiverType()->has_fast_elements();
- HInstruction* load = is_fast_elements
- ? BuildLoadKeyedFastElement(obj, key, prop)
- : BuildLoadKeyedGeneric(obj, key);
- PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
+ bool has_side_effects = false;
+ HValue* load = HandleKeyedElementAccess(
+ obj, key, NULL, prop, expr->CompoundLoadId(), RelocInfo::kNoPosition,
+ false, // is_store
+ &has_side_effects);
+ Push(load);
+ if (has_side_effects) AddSimulate(expr->CompoundLoadId());
+
- VISIT_FOR_VALUE(expr->value());
+ CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
HValue* left = Pop();
@@ -3443,24 +3611,30 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
PushAndAdd(instr);
if (instr->HasSideEffects()) AddSimulate(operation->id());
- HInstruction* store = is_fast_elements
- ? BuildStoreKeyedFastElement(obj, key, instr, prop)
- : BuildStoreKeyedGeneric(obj, key, instr);
- AddInstruction(store);
+ expr->RecordTypeFeedback(oracle());
+ HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
+ RelocInfo::kNoPosition,
+ true, // is_store
+ &has_side_effects);
+
// Drop the simulated receiver, key, and value. Return the value.
Drop(3);
Push(instr);
- if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ ASSERT(has_side_effects); // Stores always have side effects.
+ AddSimulate(expr->AssignmentId());
ast_context()->ReturnValue(Pop());
}
} else {
- BAILOUT("invalid lhs in compound assignment");
+ return Bailout("invalid lhs in compound assignment");
}
}
void HGraphBuilder::VisitAssignment(Assignment* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
VariableProxy* proxy = expr->target()->AsVariableProxy();
Variable* var = proxy->AsVariable();
Property* prop = expr->target()->AsProperty();
@@ -3472,37 +3646,58 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
}
if (var != NULL) {
- if (proxy->IsArguments()) BAILOUT("assignment to arguments");
+ if (var->mode() == Variable::CONST) {
+ if (expr->op() != Token::INIT_CONST) {
+ return Bailout("non-initializer assignment to const");
+ }
+ if (!var->IsStackAllocated()) {
+ return Bailout("assignment to const context slot");
+ }
+ // We insert a use of the old value to detect unsupported uses of const
+ // variables (e.g. initialization inside a loop).
+ HValue* old_value = environment()->Lookup(var);
+ AddInstruction(new HUseConst(old_value));
+ }
+
+ if (proxy->IsArguments()) return Bailout("assignment to arguments");
// Handle the assignment.
if (var->IsStackAllocated()) {
- HValue* value = NULL;
- // Handle stack-allocated variables on the right-hand side directly.
// We do not allow the arguments object to occur in a context where it
// may escape, but assignments to stack-allocated locals are
- // permitted. Handling such assignments here bypasses the check for
- // the arguments object in VisitVariableProxy.
- Variable* rhs_var = expr->value()->AsVariableProxy()->AsVariable();
- if (rhs_var != NULL && rhs_var->IsStackAllocated()) {
- value = environment()->Lookup(rhs_var);
- } else {
- VISIT_FOR_VALUE(expr->value());
- value = Pop();
- }
+ // permitted.
+ CHECK_ALIVE(VisitForValue(expr->value(), ARGUMENTS_ALLOWED));
+ HValue* value = Pop();
Bind(var, value);
ast_context()->ReturnValue(value);
- } else if (var->IsContextSlot() && var->mode() != Variable::CONST) {
- VISIT_FOR_VALUE(expr->value());
+ } else if (var->IsContextSlot()) {
+ ASSERT(var->mode() != Variable::CONST);
+ // Bail out if we try to mutate a parameter value in a function using
+ // the arguments object. We do not (yet) correctly handle the
+ // arguments property of the function.
+ if (info()->scope()->arguments() != NULL) {
+ // Parameters will rewrite to context slots. We have no direct way
+ // to detect that the variable is a parameter.
+ int count = info()->scope()->num_parameters();
+ for (int i = 0; i < count; ++i) {
+ if (var == info()->scope()->parameter(i)) {
+ Bailout("assignment to parameter, function uses arguments object");
+ }
+ }
+ }
+
+ CHECK_ALIVE(VisitForValue(expr->value()));
HValue* context = BuildContextChainWalk(var);
int index = var->AsSlot()->index();
- HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
+ HStoreContextSlot* instr =
+ new(zone()) HStoreContextSlot(context, index, Top());
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
ast_context()->ReturnValue(Pop());
} else if (var->is_global()) {
- VISIT_FOR_VALUE(expr->value());
+ CHECK_ALIVE(VisitForValue(expr->value()));
HandleGlobalVariableAssignment(var,
Top(),
expr->position(),
@@ -3510,107 +3705,45 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
ast_context()->ReturnValue(Pop());
} else {
- BAILOUT("assignment to LOOKUP or const CONTEXT variable");
+ return Bailout("assignment to LOOKUP or const CONTEXT variable");
}
} else if (prop != NULL) {
HandlePropertyAssignment(expr);
} else {
- BAILOUT("invalid left-hand side in assignment");
+ return Bailout("invalid left-hand side in assignment");
}
}
void HGraphBuilder::VisitThrow(Throw* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
// We don't optimize functions with invalid left-hand sides in
// assignments, count operations, or for-in. Consequently throw can
// currently only occur in an effect context.
ASSERT(ast_context()->IsEffect());
- VISIT_FOR_VALUE(expr->exception());
+ CHECK_ALIVE(VisitForValue(expr->exception()));
HValue* value = environment()->Pop();
- HThrow* instr = new HThrow(value);
+ HThrow* instr = new(zone()) HThrow(value);
instr->set_position(expr->position());
AddInstruction(instr);
AddSimulate(expr->id());
- current_block()->FinishExit(new HAbnormalExit);
+ current_block()->FinishExit(new(zone()) HAbnormalExit);
set_current_block(NULL);
}
-void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
- HValue* object,
- ZoneMapList* types,
- Handle<String> name) {
- int number_of_types = Min(types->length(), kMaxLoadPolymorphism);
- ZoneMapList maps(number_of_types);
- ZoneList<HSubgraph*> subgraphs(number_of_types);
- bool needs_generic = (types->length() > kMaxLoadPolymorphism);
-
- // Build subgraphs for each of the specific maps.
- //
- // TODO(ager): We should recognize when the prototype chains for
- // different maps are identical. In that case we can avoid
- // repeatedly generating the same prototype map checks.
- for (int i = 0; i < number_of_types; ++i) {
- Handle<Map> map = types->at(i);
- LookupResult lookup;
- map->LookupInDescriptors(NULL, *name, &lookup);
- if (lookup.IsProperty() && lookup.type() == FIELD) {
- HSubgraph* subgraph = CreateBranchSubgraph(environment());
- SubgraphScope scope(this, subgraph);
- HLoadNamedField* instr =
- BuildLoadNamedField(object, expr, map, &lookup, false);
- instr->set_position(expr->position());
- instr->ClearFlag(HValue::kUseGVN); // Don't do GVN on polymorphic loads.
- PushAndAdd(instr);
- maps.Add(map);
- subgraphs.Add(subgraph);
- } else {
- needs_generic = true;
- }
- }
-
- // If none of the properties were named fields we generate a
- // generic load.
- if (maps.length() == 0) {
- HInstruction* instr = BuildLoadNamedGeneric(object, expr);
- instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
- } else {
- // Build subgraph for generic load through IC.
- HSubgraph* default_graph = CreateBranchSubgraph(environment());
- { SubgraphScope scope(this, default_graph);
- if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
- default_graph->exit_block()->FinishExitWithDeoptimization();
- default_graph->set_exit_block(NULL);
- } else {
- HInstruction* instr = BuildLoadNamedGeneric(object, expr);
- instr->set_position(expr->position());
- PushAndAdd(instr);
- }
- }
-
- HBasicBlock* new_exit_block =
- BuildTypeSwitch(object, &maps, &subgraphs, default_graph, expr->id());
- set_current_block(new_exit_block);
- // In an effect context, we did not materialized the value in the
- // predecessor environments so there's no need to handle it here.
- if (current_block() != NULL && !ast_context()->IsEffect()) {
- ast_context()->ReturnValue(Pop());
- }
- }
-}
-
-
HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
Property* expr,
Handle<Map> type,
LookupResult* lookup,
bool smi_and_map_check) {
if (smi_and_map_check) {
- AddInstruction(new HCheckNonSmi(object));
- AddInstruction(new HCheckMap(object, type));
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+ AddInstruction(new(zone()) HCheckMap(object, type));
}
int index = lookup->GetLocalFieldIndexFromMap(*type);
@@ -3618,11 +3751,11 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
int offset = (index * kPointerSize) + type->instance_size();
- return new HLoadNamedField(object, true, offset);
+ return new(zone()) HLoadNamedField(object, true, offset);
} else {
// Non-negative property indices are in the properties array.
int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- return new HLoadNamedField(object, false, offset);
+ return new(zone()) HLoadNamedField(object, false, offset);
}
}
@@ -3631,9 +3764,8 @@ HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
Property* expr) {
ASSERT(expr->key()->IsPropertyName());
Handle<Object> name = expr->key()->AsLiteral()->handle();
- HContext* context = new HContext;
- AddInstruction(context);
- return new HLoadNamedGeneric(context, obj, name);
+ HValue* context = environment()->LookupContext();
+ return new(zone()) HLoadNamedGeneric(context, obj, name);
}
@@ -3650,10 +3782,10 @@ HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
&lookup,
true);
} else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
- AddInstruction(new HCheckNonSmi(obj));
- AddInstruction(new HCheckMap(obj, map));
+ AddInstruction(new(zone()) HCheckNonSmi(obj));
+ AddInstruction(new(zone()) HCheckMap(obj, map));
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
- return new HConstant(function, Representation::Tagged());
+ return new(zone()) HConstant(function, Representation::Tagged());
} else {
return BuildLoadNamedGeneric(obj, expr);
}
@@ -3662,113 +3794,282 @@ HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
HValue* key) {
- HContext* context = new HContext;
- AddInstruction(context);
- return new HLoadKeyedGeneric(context, object, key);
+ HValue* context = environment()->LookupContext();
+ return new(zone()) HLoadKeyedGeneric(context, object, key);
+}
+
+
+HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ JSObject::ElementsKind elements_kind,
+ bool is_store) {
+ if (is_store) {
+ ASSERT(val != NULL);
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS: {
+ HClampToUint8* clamp = new(zone()) HClampToUint8(val);
+ AddInstruction(clamp);
+ val = clamp;
+ break;
+ }
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+ HToInt32* floor_val = new(zone()) HToInt32(val);
+ AddInstruction(floor_val);
+ val = floor_val;
+ break;
+ }
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ return new(zone()) HStoreKeyedSpecializedArrayElement(
+ external_elements, checked_key, val, elements_kind);
+ } else {
+ return new(zone()) HLoadKeyedSpecializedArrayElement(
+ external_elements, checked_key, elements_kind);
+ }
}
-HInstruction* HGraphBuilder::BuildLoadKeyedFastElement(HValue* object,
- HValue* key,
- Property* expr) {
- ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
- AddInstruction(new HCheckNonSmi(object));
+HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* expr,
+ bool is_store) {
+ ASSERT(expr->IsMonomorphic());
Handle<Map> map = expr->GetMonomorphicReceiverType();
- ASSERT(map->has_fast_elements());
- AddInstruction(new HCheckMap(object, map));
- bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
- HLoadElements* elements = new HLoadElements(object);
+ if (!map->has_fast_elements() && !map->has_external_array_elements()) {
+ return is_store ? BuildStoreKeyedGeneric(object, key, val)
+ : BuildLoadKeyedGeneric(object, key);
+ }
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+ AddInstruction(new(zone()) HCheckMap(object, map));
+ HInstruction* elements = new(zone()) HLoadElements(object);
HInstruction* length = NULL;
- if (is_array) {
- length = AddInstruction(new HJSArrayLength(object));
- AddInstruction(new HBoundsCheck(key, length));
+ HInstruction* checked_key = NULL;
+ if (map->has_external_array_elements()) {
+ AddInstruction(elements);
+ length = AddInstruction(new(zone()) HExternalArrayLength(elements));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ HLoadExternalArrayPointer* external_elements =
+ new(zone()) HLoadExternalArrayPointer(elements);
+ AddInstruction(external_elements);
+ return BuildExternalArrayElementAccess(external_elements, checked_key,
+ val, map->elements_kind(), is_store);
+ }
+ ASSERT(map->has_fast_elements());
+ if (map->instance_type() == JS_ARRAY_TYPE) {
+ length = AddInstruction(new(zone()) HJSArrayLength(object));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
AddInstruction(elements);
} else {
AddInstruction(elements);
- length = AddInstruction(new HFixedArrayLength(elements));
- AddInstruction(new HBoundsCheck(key, length));
+ length = AddInstruction(new(zone()) HFixedArrayLength(elements));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ }
+ if (is_store) {
+ return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
+ } else {
+ return new(zone()) HLoadKeyedFastElement(elements, checked_key);
}
- return new HLoadKeyedFastElement(elements, key);
}
-HInstruction* HGraphBuilder::BuildLoadKeyedPixelArrayElement(HValue* object,
- HValue* key,
- Property* expr) {
- ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
- AddInstruction(new HCheckNonSmi(object));
- Handle<Map> map = expr->GetMonomorphicReceiverType();
- ASSERT(!map->has_fast_elements());
- ASSERT(map->has_pixel_array_elements());
- AddInstruction(new HCheckMap(object, map));
- HLoadElements* elements = new HLoadElements(object);
- AddInstruction(elements);
- HInstruction* length = new HPixelArrayLength(elements);
- AddInstruction(length);
- AddInstruction(new HBoundsCheck(key, length));
- HLoadPixelArrayExternalPointer* external_elements =
- new HLoadPixelArrayExternalPointer(elements);
- AddInstruction(external_elements);
- HLoadPixelArrayElement* pixel_array_value =
- new HLoadPixelArrayElement(external_elements, key);
- return pixel_array_value;
-}
+HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* prop,
+ int ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects) {
+ *has_side_effects = false;
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
+ ZoneMapList* maps = prop->GetReceiverTypes();
+ bool todo_external_array = false;
+ static const int kNumElementTypes = JSObject::kElementsKindCount;
+ bool type_todo[kNumElementTypes];
+ for (int i = 0; i < kNumElementTypes; ++i) {
+ type_todo[i] = false;
+ }
-HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
- HValue* key,
- HValue* value) {
- HContext* context = new HContext;
- AddInstruction(context);
- return new HStoreKeyedGeneric(context, object, key, value);
+ for (int i = 0; i < maps->length(); ++i) {
+ ASSERT(maps->at(i)->IsMap());
+ type_todo[maps->at(i)->elements_kind()] = true;
+ if (maps->at(i)->elements_kind()
+ >= JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
+ todo_external_array = true;
+ }
+ }
+ // We can't treat dictionary elements here (need to deopt instead).
+ type_todo[JSObject::DICTIONARY_ELEMENTS] = false;
+ // Support for FAST_DOUBLE_ELEMENTS isn't implemented yet, so we deopt.
+ type_todo[JSObject::FAST_DOUBLE_ELEMENTS] = false;
+
+ HBasicBlock* join = graph()->CreateBasicBlock();
+
+ HInstruction* elements_kind_instr =
+ AddInstruction(new(zone()) HElementsKind(object));
+ HInstruction* elements = NULL;
+ HLoadExternalArrayPointer* external_elements = NULL;
+ HInstruction* checked_key = NULL;
+
+ // FAST_ELEMENTS is assumed to be the first case.
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+
+ for (JSObject::ElementsKind elements_kind = JSObject::FAST_ELEMENTS;
+ elements_kind <= JSObject::LAST_ELEMENTS_KIND;
+ elements_kind = JSObject::ElementsKind(elements_kind + 1)) {
+ // After having handled FAST_ELEMENTS in the first run of the loop, we
+ // need to add some code that's executed for all other cases.
+ if (elements_kind == 1 && todo_external_array) {
+ elements = AddInstruction(new(zone()) HLoadElements(object));
+ // We need to forcibly prevent some ElementsKind-dependent instructions
+ // from being hoisted out of any loops they might occur in, because
+ // the current loop-invariant-code-motion algorithm isn't clever enough
+ // to deal with them properly.
+ // There's some performance to be gained by developing a smarter
+ // solution for this.
+ elements->ClearFlag(HValue::kUseGVN);
+ HInstruction* length =
+ AddInstruction(new(zone()) HExternalArrayLength(elements));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ external_elements = new(zone()) HLoadExternalArrayPointer(elements);
+ AddInstruction(external_elements);
+ }
+ if (type_todo[elements_kind]) {
+ HBasicBlock* if_true = graph()->CreateBasicBlock();
+ HBasicBlock* if_false = graph()->CreateBasicBlock();
+ HCompareConstantEq* compare = new(zone()) HCompareConstantEq(
+ elements_kind_instr,
+ elements_kind,
+ Token::EQ_STRICT);
+ AddInstruction(compare);
+ HTest* branch = new(zone()) HTest(compare, if_true, if_false);
+ current_block()->Finish(branch);
+
+ set_current_block(if_true);
+ HInstruction* access;
+ if (elements_kind == JSObject::FAST_ELEMENTS) {
+ HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
+ HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
+ HInstruction* typecheck =
+ AddInstruction(new(zone()) HHasInstanceType(object, JS_ARRAY_TYPE));
+ HTest* test = new(zone()) HTest(typecheck, if_jsarray, if_fastobject);
+ current_block()->Finish(test);
+
+ set_current_block(if_jsarray);
+ HInstruction* length = new(zone()) HJSArrayLength(object);
+ AddInstruction(length);
+ length->ClearFlag(HValue::kUseGVN);
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ elements = AddInstruction(new(zone()) HLoadElements(object));
+ elements->ClearFlag(HValue::kUseGVN);
+ if (is_store) {
+ access = AddInstruction(
+ new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
+ } else {
+ access = AddInstruction(
+ new(zone()) HLoadKeyedFastElement(elements, checked_key));
+ Push(access);
+ }
+ *has_side_effects |= access->HasSideEffects();
+ if (position != -1) {
+ access->set_position(position);
+ }
+ if_jsarray->Goto(join);
+
+ set_current_block(if_fastobject);
+ elements = AddInstruction(new(zone()) HLoadElements(object));
+ elements->ClearFlag(HValue::kUseGVN);
+ length = AddInstruction(new(zone()) HFixedArrayLength(elements));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ if (is_store) {
+ access = AddInstruction(
+ new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
+ } else {
+ access = AddInstruction(
+ new(zone()) HLoadKeyedFastElement(elements, checked_key));
+ }
+ } else { // External array elements.
+ access = AddInstruction(BuildExternalArrayElementAccess(
+ external_elements, checked_key, val, elements_kind, is_store));
+ }
+ *has_side_effects |= access->HasSideEffects();
+ access->set_position(position);
+ if (!is_store) {
+ Push(access);
+ }
+ current_block()->Goto(join);
+ set_current_block(if_false);
+ }
+ }
+
+ // Deopt if none of the cases matched.
+ current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+ join->SetJoinId(ast_id);
+ set_current_block(join);
+ return is_store ? NULL : Pop();
}
-HInstruction* HGraphBuilder::BuildStoreKeyedFastElement(HValue* object,
- HValue* key,
- HValue* val,
- Expression* expr) {
- ASSERT(expr->IsMonomorphic());
- AddInstruction(new HCheckNonSmi(object));
- Handle<Map> map = expr->GetMonomorphicReceiverType();
- ASSERT(map->has_fast_elements());
- AddInstruction(new HCheckMap(object, map));
- HInstruction* elements = AddInstruction(new HLoadElements(object));
- AddInstruction(new HCheckMap(elements, Factory::fixed_array_map()));
- bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
- HInstruction* length = NULL;
- if (is_array) {
- length = AddInstruction(new HJSArrayLength(object));
+HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
+ HValue* key,
+ HValue* val,
+ Expression* expr,
+ int ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects) {
+ ASSERT(!expr->IsPropertyName());
+ HInstruction* instr = NULL;
+ if (expr->IsMonomorphic()) {
+ instr = BuildMonomorphicElementAccess(obj, key, val, expr, is_store);
+ } else if (expr->GetReceiverTypes() != NULL &&
+ !expr->GetReceiverTypes()->is_empty()) {
+ return HandlePolymorphicElementAccess(
+ obj, key, val, expr, ast_id, position, is_store, has_side_effects);
} else {
- length = AddInstruction(new HFixedArrayLength(elements));
+ if (is_store) {
+ instr = BuildStoreKeyedGeneric(obj, key, val);
+ } else {
+ instr = BuildLoadKeyedGeneric(obj, key);
+ }
}
- AddInstruction(new HBoundsCheck(key, length));
- return new HStoreKeyedFastElement(elements, key, val);
+ instr->set_position(position);
+ AddInstruction(instr);
+ *has_side_effects = instr->HasSideEffects();
+ return instr;
}
-HInstruction* HGraphBuilder::BuildStoreKeyedPixelArrayElement(
- HValue* object,
- HValue* key,
- HValue* val,
- Expression* expr) {
- ASSERT(expr->IsMonomorphic());
- AddInstruction(new HCheckNonSmi(object));
- Handle<Map> map = expr->GetMonomorphicReceiverType();
- ASSERT(!map->has_fast_elements());
- ASSERT(map->has_pixel_array_elements());
- AddInstruction(new HCheckMap(object, map));
- HLoadElements* elements = new HLoadElements(object);
- AddInstruction(elements);
- HInstruction* length = AddInstruction(new HPixelArrayLength(elements));
- AddInstruction(new HBoundsCheck(key, length));
- HLoadPixelArrayExternalPointer* external_elements =
- new HLoadPixelArrayExternalPointer(elements);
- AddInstruction(external_elements);
- return new HStorePixelArrayElement(external_elements, key, val);
+HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
+ HValue* key,
+ HValue* value) {
+ HValue* context = environment()->LookupContext();
+ return new(zone()) HStoreKeyedGeneric(
+ context,
+ object,
+ key,
+ value,
+ function_strict_mode());
}
-
bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy == NULL) return false;
@@ -3777,22 +4078,31 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
return false;
}
+ // Our implementation of arguments (based on this stack frame or an
+ // adapter below it) does not work for inlined functions.
+ if (function_state()->outer() != NULL) {
+ Bailout("arguments access in inlined function");
+ return true;
+ }
+
HInstruction* result = NULL;
if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
if (!name->IsEqualTo(CStrVector("length"))) return false;
- HInstruction* elements = AddInstruction(new HArgumentsElements);
- result = new HArgumentsLength(elements);
+ HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+ result = new(zone()) HArgumentsLength(elements);
} else {
Push(graph()->GetArgumentsObject());
VisitForValue(expr->key());
- if (HasStackOverflow()) return false;
+ if (HasStackOverflow() || current_block() == NULL) return true;
HValue* key = Pop();
Drop(1); // Arguments object.
- HInstruction* elements = AddInstruction(new HArgumentsElements);
- HInstruction* length = AddInstruction(new HArgumentsLength(elements));
- AddInstruction(new HBoundsCheck(key, length));
- result = new HAccessArgumentsAt(elements, length, key);
+ HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+ HInstruction* length = AddInstruction(
+ new(zone()) HArgumentsLength(elements));
+ HInstruction* checked_key =
+ AddInstruction(new(zone()) HBoundsCheck(key, length));
+ result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
}
ast_context()->ReturnInstruction(result, expr->id());
return true;
@@ -3800,32 +4110,39 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
void HGraphBuilder::VisitProperty(Property* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
expr->RecordTypeFeedback(oracle());
if (TryArgumentsAccess(expr)) return;
- CHECK_BAILOUT;
- VISIT_FOR_VALUE(expr->obj());
+ CHECK_ALIVE(VisitForValue(expr->obj()));
HInstruction* instr = NULL;
if (expr->IsArrayLength()) {
HValue* array = Pop();
- AddInstruction(new HCheckNonSmi(array));
- AddInstruction(new HCheckInstanceType(array, JS_ARRAY_TYPE, JS_ARRAY_TYPE));
- instr = new HJSArrayLength(array);
+ AddInstruction(new(zone()) HCheckNonSmi(array));
+ AddInstruction(HCheckInstanceType::NewIsJSArray(array));
+ instr = new(zone()) HJSArrayLength(array);
} else if (expr->IsStringLength()) {
HValue* string = Pop();
- AddInstruction(new HCheckNonSmi(string));
- AddInstruction(new HCheckInstanceType(string,
- FIRST_STRING_TYPE,
- LAST_STRING_TYPE));
- instr = new HStringLength(string);
+ AddInstruction(new(zone()) HCheckNonSmi(string));
+ AddInstruction(HCheckInstanceType::NewIsString(string));
+ instr = new(zone()) HStringLength(string);
+ } else if (expr->IsStringAccess()) {
+ CHECK_ALIVE(VisitForValue(expr->key()));
+ HValue* index = Pop();
+ HValue* string = Pop();
+ HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
+ AddInstruction(char_code);
+ instr = new(zone()) HStringCharFromCode(char_code);
} else if (expr->IsFunctionPrototype()) {
HValue* function = Pop();
- AddInstruction(new HCheckNonSmi(function));
- instr = new HLoadFunctionPrototype(function);
+ AddInstruction(new(zone()) HCheckNonSmi(function));
+ instr = new(zone()) HLoadFunctionPrototype(function);
} else if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
@@ -3835,33 +4152,34 @@ void HGraphBuilder::VisitProperty(Property* expr) {
if (expr->IsMonomorphic()) {
instr = BuildLoadNamed(obj, expr, types->first(), name);
} else if (types != NULL && types->length() > 1) {
- HandlePolymorphicLoadNamedField(expr, obj, types, name);
- return;
-
+ AddInstruction(new(zone()) HCheckNonSmi(obj));
+ instr = new(zone()) HLoadNamedFieldPolymorphic(obj, types, name);
} else {
instr = BuildLoadNamedGeneric(obj, expr);
}
} else {
- VISIT_FOR_VALUE(expr->key());
+ CHECK_ALIVE(VisitForValue(expr->key()));
HValue* key = Pop();
HValue* obj = Pop();
- if (expr->IsMonomorphic()) {
- Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
- // An object has either fast elements or pixel array elements, but never
- // both. Pixel array maps that are assigned to pixel array elements are
- // always created with the fast elements flag cleared.
- if (receiver_type->has_pixel_array_elements()) {
- instr = BuildLoadKeyedPixelArrayElement(obj, key, expr);
- } else if (receiver_type->has_fast_elements()) {
- instr = BuildLoadKeyedFastElement(obj, key, expr);
+ bool has_side_effects = false;
+ HValue* load = HandleKeyedElementAccess(
+ obj, key, NULL, expr, expr->id(), expr->position(),
+ false, // is_store
+ &has_side_effects);
+ if (has_side_effects) {
+ if (ast_context()->IsEffect()) {
+ AddSimulate(expr->id());
+ } else {
+ Push(load);
+ AddSimulate(expr->id());
+ Drop(1);
}
}
- if (instr == NULL) {
- instr = BuildLoadKeyedGeneric(obj, key);
- }
+ ast_context()->ReturnValue(load);
+ return;
}
instr->set_position(expr->position());
ast_context()->ReturnInstruction(instr, expr->id());
@@ -3876,11 +4194,11 @@ void HGraphBuilder::AddCheckConstantFunction(Call* expr,
// are overwritten. Therefore it is enough to check the map of the holder and
// its prototypes.
if (smi_and_map_check) {
- AddInstruction(new HCheckNonSmi(receiver));
- AddInstruction(new HCheckMap(receiver, receiver_map));
+ AddInstruction(new(zone()) HCheckNonSmi(receiver));
+ AddInstruction(new(zone()) HCheckMap(receiver, receiver_map));
}
if (!expr->holder().is_null()) {
- AddInstruction(new HCheckPrototypeMaps(
+ AddInstruction(new(zone()) HCheckPrototypeMaps(
Handle<JSObject>(JSObject::cast(receiver_map->prototype())),
expr->holder()));
}
@@ -3891,90 +4209,98 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
HValue* receiver,
ZoneMapList* types,
Handle<String> name) {
- int argument_count = expr->arguments()->length() + 1; // Plus receiver.
- int number_of_types = Min(types->length(), kMaxCallPolymorphism);
- ZoneMapList maps(number_of_types);
- ZoneList<HSubgraph*> subgraphs(number_of_types);
- bool needs_generic = (types->length() > kMaxCallPolymorphism);
-
- // Build subgraphs for each of the specific maps.
- //
// TODO(ager): We should recognize when the prototype chains for different
// maps are identical. In that case we can avoid repeatedly generating the
// same prototype map checks.
- for (int i = 0; i < number_of_types; ++i) {
+ int argument_count = expr->arguments()->length() + 1; // Includes receiver.
+ int count = 0;
+ HBasicBlock* join = NULL;
+ for (int i = 0; i < types->length() && count < kMaxCallPolymorphism; ++i) {
Handle<Map> map = types->at(i);
if (expr->ComputeTarget(map, name)) {
- HSubgraph* subgraph = CreateBranchSubgraph(environment());
- SubgraphScope scope(this, subgraph);
+ if (count == 0) {
+ // Only needed once.
+ AddInstruction(new(zone()) HCheckNonSmi(receiver));
+ join = graph()->CreateBasicBlock();
+ }
+ ++count;
+ HBasicBlock* if_true = graph()->CreateBasicBlock();
+ HBasicBlock* if_false = graph()->CreateBasicBlock();
+ HCompareMap* compare =
+ new(zone()) HCompareMap(receiver, map, if_true, if_false);
+ current_block()->Finish(compare);
+
+ set_current_block(if_true);
AddCheckConstantFunction(expr, receiver, map, false);
if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
PrintF("Trying to inline the polymorphic call to %s\n",
*name->ToCString());
}
- if (!FLAG_polymorphic_inlining || !TryInline(expr)) {
- // Check for bailout, as trying to inline might fail due to bailout
- // during hydrogen processing.
- CHECK_BAILOUT;
+ if (FLAG_polymorphic_inlining && TryInline(expr)) {
+ // Trying to inline will signal that we should bailout from the
+ // entire compilation by setting stack overflow on the visitor.
+ if (HasStackOverflow()) return;
+ } else {
HCallConstantFunction* call =
- new HCallConstantFunction(expr->target(), argument_count);
+ new(zone()) HCallConstantFunction(expr->target(), argument_count);
call->set_position(expr->position());
PreProcessCall(call);
- PushAndAdd(call);
+ AddInstruction(call);
+ if (!ast_context()->IsEffect()) Push(call);
}
- maps.Add(map);
- subgraphs.Add(subgraph);
- } else {
- needs_generic = true;
+
+ if (current_block() != NULL) current_block()->Goto(join);
+ set_current_block(if_false);
}
}
- // If we couldn't compute the target for any of the maps just perform an
- // IC call.
- if (maps.length() == 0) {
- HContext* context = new HContext;
- AddInstruction(context);
- HCallNamed* call = new HCallNamed(context, name, argument_count);
+ // Finish up. Unconditionally deoptimize if we've handled all the maps we
+ // know about and do not want to handle ones we've never seen. Otherwise
+ // use a generic IC.
+ if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
+ current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+ } else {
+ HValue* context = environment()->LookupContext();
+ HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
call->set_position(expr->position());
PreProcessCall(call);
- ast_context()->ReturnInstruction(call, expr->id());
- } else {
- // Build subgraph for generic call through IC.
- HSubgraph* default_graph = CreateBranchSubgraph(environment());
- { SubgraphScope scope(this, default_graph);
- if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
- default_graph->exit_block()->FinishExitWithDeoptimization();
- default_graph->set_exit_block(NULL);
- } else {
- HContext* context = new HContext;
- AddInstruction(context);
- HCallNamed* call = new HCallNamed(context, name, argument_count);
- call->set_position(expr->position());
- PreProcessCall(call);
- PushAndAdd(call);
- }
- }
- HBasicBlock* new_exit_block =
- BuildTypeSwitch(receiver, &maps, &subgraphs, default_graph, expr->id());
- set_current_block(new_exit_block);
- // In an effect context, we did not materialized the value in the
- // predecessor environments so there's no need to handle it here.
- if (new_exit_block != NULL && !ast_context()->IsEffect()) {
- ast_context()->ReturnValue(Pop());
+ if (join != NULL) {
+ AddInstruction(call);
+ if (!ast_context()->IsEffect()) Push(call);
+ current_block()->Goto(join);
+ } else {
+ ast_context()->ReturnInstruction(call, expr->id());
+ return;
}
}
+
+ // We assume that control flow is always live after an expression. So
+ // even without predecessors to the join block, we set it as the exit
+ // block and continue by adding instructions there.
+ ASSERT(join != NULL);
+ if (join->HasPredecessor()) {
+ set_current_block(join);
+ join->SetJoinId(expr->id());
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ } else {
+ set_current_block(NULL);
+ }
}
-void HGraphBuilder::TraceInline(Handle<JSFunction> target, bool result) {
- SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
- SmartPointer<char> caller =
- graph()->info()->function()->debug_name()->ToCString();
- if (result) {
- PrintF("Inlined %s called from %s.\n", *callee, *caller);
- } else {
- PrintF("Do not inline %s called from %s.\n", *callee, *caller);
+void HGraphBuilder::TraceInline(Handle<JSFunction> target,
+ Handle<JSFunction> caller,
+ const char* reason) {
+ if (FLAG_trace_inlining) {
+ SmartPointer<char> target_name = target->shared()->DebugName()->ToCString();
+ SmartPointer<char> caller_name = caller->shared()->DebugName()->ToCString();
+ if (reason == NULL) {
+ PrintF("Inlined %s called from %s.\n", *target_name, *caller_name);
+ } else {
+ PrintF("Did not inline %s called from %s (%s).\n",
+ *target_name, *caller_name, reason);
+ }
}
}
@@ -3982,157 +4308,186 @@ void HGraphBuilder::TraceInline(Handle<JSFunction> target, bool result) {
bool HGraphBuilder::TryInline(Call* expr) {
if (!FLAG_use_inlining) return false;
+ // The function call we are inlining is a method call if the call
+ // is a property call.
+ CallKind call_kind = (expr->expression()->AsProperty() == NULL)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+
// Precondition: call is monomorphic and we have found a target with the
// appropriate arity.
+ Handle<JSFunction> caller = info()->closure();
Handle<JSFunction> target = expr->target();
+ Handle<SharedFunctionInfo> target_shared(target->shared());
// Do a quick check on source code length to avoid parsing large
// inlining candidates.
if (FLAG_limit_inlining && target->shared()->SourceSize() > kMaxSourceSize) {
- if (FLAG_trace_inlining) TraceInline(target, false);
+ TraceInline(target, caller, "target text too big");
return false;
}
// Target must be inlineable.
- if (!target->IsInlineable()) return false;
+ if (!target->IsInlineable()) {
+ TraceInline(target, caller, "target not inlineable");
+ return false;
+ }
// No context change required.
- CompilationInfo* outer_info = graph()->info();
+ CompilationInfo* outer_info = info();
if (target->context() != outer_info->closure()->context() ||
outer_info->scope()->contains_with() ||
outer_info->scope()->num_heap_slots() > 0) {
+ TraceInline(target, caller, "target requires context change");
return false;
}
- // Don't inline deeper than two calls.
+ // Don't inline deeper than kMaxInliningLevels calls.
HEnvironment* env = environment();
- if (env->outer() != NULL && env->outer()->outer() != NULL) return false;
+ int current_level = 1;
+ while (env->outer() != NULL) {
+ if (current_level == Compiler::kMaxInliningLevels) {
+ TraceInline(target, caller, "inline depth limit reached");
+ return false;
+ }
+ current_level++;
+ env = env->outer();
+ }
// Don't inline recursive functions.
- if (target->shared() == outer_info->closure()->shared()) return false;
+ if (*target_shared == outer_info->closure()->shared()) {
+ TraceInline(target, caller, "target is recursive");
+ return false;
+ }
// We don't want to add more than a certain number of nodes from inlining.
if (FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) {
- if (FLAG_trace_inlining) TraceInline(target, false);
+ TraceInline(target, caller, "cumulative AST node limit reached");
return false;
}
int count_before = AstNode::Count();
// Parse and allocate variables.
- CompilationInfo inner_info(target);
- if (!ParserApi::Parse(&inner_info) ||
- !Scope::Analyze(&inner_info)) {
- if (Top::has_pending_exception()) {
+ CompilationInfo target_info(target);
+ if (!ParserApi::Parse(&target_info) ||
+ !Scope::Analyze(&target_info)) {
+ if (target_info.isolate()->has_pending_exception()) {
+ // Parse or scope error, never optimize this function.
SetStackOverflow();
- // Stop trying to optimize and inline this function.
- target->shared()->set_optimization_disabled(true);
+ target_shared->DisableOptimization(*target);
}
+ TraceInline(target, caller, "parse failure");
return false;
}
- if (inner_info.scope()->num_heap_slots() > 0) return false;
- FunctionLiteral* function = inner_info.function();
- // Count the number of AST nodes added by inlining this call.
- int nodes_added = AstNode::Count() - count_before;
- if (FLAG_limit_inlining && nodes_added > kMaxInlinedSize) {
- if (FLAG_trace_inlining) TraceInline(target, false);
+ if (target_info.scope()->num_heap_slots() > 0) {
+ TraceInline(target, caller, "target has context-allocated variables");
return false;
}
+ FunctionLiteral* function = target_info.function();
- // Check if we can handle all declarations in the inlined functions.
- VisitDeclarations(inner_info.scope()->declarations());
- if (HasStackOverflow()) {
- ClearStackOverflow();
+ // Count the number of AST nodes added by inlining this call.
+ int nodes_added = AstNode::Count() - count_before;
+ if (FLAG_limit_inlining && nodes_added > kMaxInlinedSize) {
+ TraceInline(target, caller, "target AST is too large");
return false;
}
// Don't inline functions that uses the arguments object or that
// have a mismatching number of parameters.
- Handle<SharedFunctionInfo> shared(target->shared());
int arity = expr->arguments()->length();
if (function->scope()->arguments() != NULL ||
- arity != shared->formal_parameter_count()) {
+ arity != target_shared->formal_parameter_count()) {
+ TraceInline(target, caller, "target requires special argument handling");
return false;
}
+ // All declarations must be inlineable.
+ ZoneList<Declaration*>* decls = target_info.scope()->declarations();
+ int decl_count = decls->length();
+ for (int i = 0; i < decl_count; ++i) {
+ if (!decls->at(i)->IsInlineable()) {
+ TraceInline(target, caller, "target has non-trivial declaration");
+ return false;
+ }
+ }
// All statements in the body must be inlineable.
for (int i = 0, count = function->body()->length(); i < count; ++i) {
- if (!function->body()->at(i)->IsInlineable()) return false;
+ if (!function->body()->at(i)->IsInlineable()) {
+ TraceInline(target, caller, "target contains unsupported syntax");
+ return false;
+ }
}
// Generate the deoptimization data for the unoptimized version of
// the target function if we don't already have it.
- if (!shared->has_deoptimization_support()) {
+ if (!target_shared->has_deoptimization_support()) {
// Note that we compile here using the same AST that we will use for
// generating the optimized inline code.
- inner_info.EnableDeoptimizationSupport();
- if (!FullCodeGenerator::MakeCode(&inner_info)) return false;
- shared->EnableDeoptimizationSupport(*inner_info.code());
- Compiler::RecordFunctionCompilation(
- Logger::FUNCTION_TAG, &inner_info, shared);
+ target_info.EnableDeoptimizationSupport();
+ if (!FullCodeGenerator::MakeCode(&target_info)) {
+ TraceInline(target, caller, "could not generate deoptimization info");
+ return false;
+ }
+ target_shared->EnableDeoptimizationSupport(*target_info.code());
+ Compiler::RecordFunctionCompilation(Logger::FUNCTION_TAG,
+ &target_info,
+ target_shared);
}
+ // ----------------------------------------------------------------
+ // After this point, we've made a decision to inline this function (so
+ // TryInline should always return true).
+
// Save the pending call context and type feedback oracle. Set up new ones
// for the inlined function.
- ASSERT(shared->has_deoptimization_support());
- AstContext* saved_call_context = call_context();
- HBasicBlock* saved_function_return = function_return();
- TypeFeedbackOracle* saved_oracle = oracle();
- // On-stack replacement cannot target inlined functions. Since we don't
- // use a separate CompilationInfo structure for the inlined function, we
- // save and restore the AST ID in the original compilation info.
- int saved_osr_ast_id = graph()->info()->osr_ast_id();
-
- TestContext* test_context = NULL;
- if (ast_context()->IsTest()) {
- // Inlined body is treated as if it occurs in an 'inlined' call context
- // with true and false blocks that will forward to the real ones.
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- if_true->MarkAsInlineReturnTarget();
- if_false->MarkAsInlineReturnTarget();
- // AstContext constructor pushes on the context stack.
- test_context = new TestContext(this, if_true, if_false);
- function_return_ = NULL;
- } else {
- // Inlined body is treated as if it occurs in the original call context.
- function_return_ = graph()->CreateBasicBlock();
- function_return_->MarkAsInlineReturnTarget();
- }
- call_context_ = ast_context();
- TypeFeedbackOracle new_oracle(
- Handle<Code>(shared->code()),
+ ASSERT(target_shared->has_deoptimization_support());
+ TypeFeedbackOracle target_oracle(
+ Handle<Code>(target_shared->code()),
Handle<Context>(target->context()->global_context()));
- oracle_ = &new_oracle;
- graph()->info()->SetOsrAstId(AstNode::kNoNumber);
+ FunctionState target_state(this, &target_info, &target_oracle);
- HSubgraph* body = CreateInlinedSubgraph(env, target, function);
- body->exit_block()->AddInstruction(new HEnterInlined(target, function));
- AddToSubgraph(body, function->body());
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner_env =
+ environment()->CopyForInlining(target,
+ function,
+ undefined,
+ call_kind);
+ HBasicBlock* body_entry = CreateBasicBlock(inner_env);
+ current_block()->Goto(body_entry);
+ body_entry->SetJoinId(expr->ReturnId());
+ set_current_block(body_entry);
+ AddInstruction(new(zone()) HEnterInlined(target,
+ function,
+ call_kind));
+ VisitDeclarations(target_info.scope()->declarations());
+ VisitStatements(function->body());
if (HasStackOverflow()) {
// Bail out if the inline function did, as we cannot residualize a call
// instead.
- delete test_context;
- call_context_ = saved_call_context;
- function_return_ = saved_function_return;
- oracle_ = saved_oracle;
- graph()->info()->SetOsrAstId(saved_osr_ast_id);
- return false;
+ TraceInline(target, caller, "inline graph construction failed");
+ target_shared->DisableOptimization(*target);
+ inline_bailout_ = true;
+ return true;
}
// Update inlined nodes count.
inlined_count_ += nodes_added;
- if (FLAG_trace_inlining) TraceInline(target, true);
+ TraceInline(target, caller, NULL);
- if (body->exit_block() != NULL) {
+ if (current_block() != NULL) {
// Add a return of undefined if control can fall off the body. In a
// test context, undefined is false.
- HValue* return_value = graph()->GetConstantUndefined();
- if (test_context == NULL) {
- ASSERT(function_return_ != NULL);
- body->exit_block()->AddLeaveInlined(return_value, function_return_);
+ if (inlined_test_context() == NULL) {
+ ASSERT(function_return() != NULL);
+ ASSERT(call_context()->IsEffect() || call_context()->IsValue());
+ if (call_context()->IsEffect()) {
+ current_block()->Goto(function_return());
+ } else {
+ current_block()->AddLeaveInlined(undefined, function_return());
+ }
} else {
// The graph builder assumes control can reach both branches of a
// test, so we materialize the undefined value and test it rather than
@@ -4141,76 +4496,47 @@ bool HGraphBuilder::TryInline(Call* expr) {
// TODO(3168478): refactor to avoid this.
HBasicBlock* empty_true = graph()->CreateBasicBlock();
HBasicBlock* empty_false = graph()->CreateBasicBlock();
- HTest* test = new HTest(return_value, empty_true, empty_false);
- body->exit_block()->Finish(test);
+ HTest* test = new(zone()) HTest(undefined, empty_true, empty_false);
+ current_block()->Finish(test);
- HValue* const no_return_value = NULL;
- empty_true->AddLeaveInlined(no_return_value, test_context->if_true());
- empty_false->AddLeaveInlined(no_return_value, test_context->if_false());
+ empty_true->Goto(inlined_test_context()->if_true());
+ empty_false->Goto(inlined_test_context()->if_false());
}
- body->set_exit_block(NULL);
}
- // Record the environment at the inlined function call.
- AddSimulate(expr->ReturnId());
-
- // Jump to the function entry (without re-recording the environment).
- current_block()->Finish(new HGoto(body->entry_block()));
-
// Fix up the function exits.
- if (test_context != NULL) {
- HBasicBlock* if_true = test_context->if_true();
- HBasicBlock* if_false = test_context->if_false();
- if_true->SetJoinId(expr->id());
- if_false->SetJoinId(expr->id());
- ASSERT(ast_context() == test_context);
- delete test_context; // Destructor pops from expression context stack.
+ if (inlined_test_context() != NULL) {
+ HBasicBlock* if_true = inlined_test_context()->if_true();
+ HBasicBlock* if_false = inlined_test_context()->if_false();
+
+ // Pop the return test context from the expression context stack.
+ ASSERT(ast_context() == inlined_test_context());
+ ClearInlinedTestContext();
// Forward to the real test context.
- HValue* const no_return_value = NULL;
- HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
- if (true_target->IsInlineReturnTarget()) {
- if_true->AddLeaveInlined(no_return_value, true_target);
- } else {
+ if (if_true->HasPredecessor()) {
+ if_true->SetJoinId(expr->id());
+ HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
if_true->Goto(true_target);
}
-
- HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
- if (false_target->IsInlineReturnTarget()) {
- if_false->AddLeaveInlined(no_return_value, false_target);
- } else {
+ if (if_false->HasPredecessor()) {
+ if_false->SetJoinId(expr->id());
+ HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
if_false->Goto(false_target);
}
-
- // TODO(kmillikin): Come up with a better way to handle this. It is too
- // subtle. NULL here indicates that the enclosing context has no control
- // flow to handle.
set_current_block(NULL);
+ } else if (function_return()->HasPredecessor()) {
+ function_return()->SetJoinId(expr->id());
+ set_current_block(function_return());
} else {
- function_return_->SetJoinId(expr->id());
- set_current_block(function_return_);
+ set_current_block(NULL);
}
- call_context_ = saved_call_context;
- function_return_ = saved_function_return;
- oracle_ = saved_oracle;
- graph()->info()->SetOsrAstId(saved_osr_ast_id);
-
return true;
}
-void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) {
- ASSERT(target->IsInlineReturnTarget());
- AddInstruction(new HLeaveInlined);
- HEnvironment* outer = last_environment()->outer();
- if (return_value != NULL) outer->Push(return_value);
- UpdateEnvironment(outer);
- Goto(target);
-}
-
-
bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
HValue* receiver,
Handle<Map> receiver_map,
@@ -4222,14 +4548,22 @@ bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
switch (id) {
case kStringCharCodeAt:
+ case kStringCharAt:
if (argument_count == 2 && check_type == STRING_CHECK) {
HValue* index = Pop();
HValue* string = Pop();
ASSERT(!expr->holder().is_null());
- AddInstruction(new HCheckPrototypeMaps(
+ AddInstruction(new(zone()) HCheckPrototypeMaps(
oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK),
expr->holder()));
- HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
+ HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
+ if (id == kStringCharCodeAt) {
+ ast_context()->ReturnInstruction(char_code, expr->id());
+ return true;
+ }
+ AddInstruction(char_code);
+ HStringCharFromCode* result =
+ new(zone()) HStringCharFromCode(char_code);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -4245,7 +4579,7 @@ bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
AddCheckConstantFunction(expr, receiver, receiver_map, true);
HValue* argument = Pop();
Drop(1); // Receiver.
- HUnaryMathOperation* op = new HUnaryMathOperation(argument, id);
+ HUnaryMathOperation* op = new(zone()) HUnaryMathOperation(argument, id);
op->set_position(expr->position());
ast_context()->ReturnInstruction(op, expr->id());
return true;
@@ -4262,30 +4596,30 @@ bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
double exponent = HConstant::cast(right)->DoubleValue();
if (exponent == 0.5) {
- result = new HUnaryMathOperation(left, kMathPowHalf);
+ result = new(zone()) HUnaryMathOperation(left, kMathPowHalf);
} else if (exponent == -0.5) {
HConstant* double_one =
- new HConstant(Handle<Object>(Smi::FromInt(1)),
- Representation::Double());
+ new(zone()) HConstant(Handle<Object>(Smi::FromInt(1)),
+ Representation::Double());
AddInstruction(double_one);
HUnaryMathOperation* square_root =
- new HUnaryMathOperation(left, kMathPowHalf);
+ new(zone()) HUnaryMathOperation(left, kMathPowHalf);
AddInstruction(square_root);
// MathPowHalf doesn't have side effects so there's no need for
// an environment simulation here.
ASSERT(!square_root->HasSideEffects());
- result = new HDiv(double_one, square_root);
+ result = new(zone()) HDiv(double_one, square_root);
} else if (exponent == 2.0) {
- result = new HMul(left, left);
+ result = new(zone()) HMul(left, left);
}
} else if (right->IsConstant() &&
HConstant::cast(right)->HasInteger32Value() &&
HConstant::cast(right)->Integer32Value() == 2) {
- result = new HMul(left, left);
+ result = new(zone()) HMul(left, left);
}
if (result == NULL) {
- result = new HPower(left, right);
+ result = new(zone()) HPower(left, right);
}
ast_context()->ReturnInstruction(result, expr->id());
return true;
@@ -4304,7 +4638,7 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
Property* prop = callee->AsProperty();
ASSERT(prop != NULL);
- if (graph()->info()->scope()->arguments() == NULL) return false;
+ if (info()->scope()->arguments() == NULL) return false;
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
if (!name->IsEqualTo(CStrVector("apply"))) return false;
@@ -4320,35 +4654,38 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
if (!expr->IsMonomorphic() ||
expr->check_type() != RECEIVER_MAP_CHECK) return false;
+ // Our implementation of arguments (based on this stack frame or an
+ // adapter below it) does not work for inlined functions.
+ if (function_state()->outer() != NULL) {
+ Bailout("Function.prototype.apply optimization in inlined function");
+ return true;
+ }
+
// Found pattern f.apply(receiver, arguments).
VisitForValue(prop->obj());
- if (HasStackOverflow()) return false;
+ if (HasStackOverflow() || current_block() == NULL) return true;
HValue* function = Pop();
VisitForValue(args->at(0));
- if (HasStackOverflow()) return false;
+ if (HasStackOverflow() || current_block() == NULL) return true;
HValue* receiver = Pop();
- HInstruction* elements = AddInstruction(new HArgumentsElements);
- HInstruction* length = AddInstruction(new HArgumentsLength(elements));
+ HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+ HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
AddCheckConstantFunction(expr,
function,
expr->GetReceiverTypes()->first(),
true);
HInstruction* result =
- new HApplyArguments(function, receiver, length, elements);
+ new(zone()) HApplyArguments(function, receiver, length, elements);
result->set_position(expr->position());
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
-static bool HasCustomCallGenerator(Handle<JSFunction> function) {
- SharedFunctionInfo* info = function->shared();
- return info->HasBuiltinFunctionId() &&
- CallStubCompiler::HasCustomCallGenerator(info->builtin_function_id());
-}
-
-
void HGraphBuilder::VisitCall(Call* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
Expression* callee = expr->expression();
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
HInstruction* call = NULL;
@@ -4357,40 +4694,35 @@ void HGraphBuilder::VisitCall(Call* expr) {
if (prop != NULL) {
if (!prop->key()->IsPropertyName()) {
// Keyed function call.
- VISIT_FOR_VALUE(prop->obj());
+ CHECK_ALIVE(VisitArgument(prop->obj()));
- VISIT_FOR_VALUE(prop->key());
+ CHECK_ALIVE(VisitForValue(prop->key()));
// Push receiver and key like the non-optimized code generator expects it.
HValue* key = Pop();
HValue* receiver = Pop();
Push(key);
Push(receiver);
- VisitExpressions(expr->arguments());
- CHECK_BAILOUT;
+ CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- HContext* context = new HContext;
- AddInstruction(context);
- call = PreProcessCall(new HCallKeyed(context, key, argument_count));
+ HValue* context = environment()->LookupContext();
+ call = new(zone()) HCallKeyed(context, key, argument_count);
call->set_position(expr->position());
- Drop(1); // Key.
+ Drop(argument_count + 1); // 1 is the key.
ast_context()->ReturnInstruction(call, expr->id());
return;
}
// Named function call.
- expr->RecordTypeFeedback(oracle());
+ expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
if (TryCallApply(expr)) return;
- CHECK_BAILOUT;
- VISIT_FOR_VALUE(prop->obj());
- VisitExpressions(expr->arguments());
- CHECK_BAILOUT;
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- expr->RecordTypeFeedback(oracle());
ZoneMapList* types = expr->GetReceiverTypes();
HValue* receiver =
@@ -4405,36 +4737,21 @@ void HGraphBuilder::VisitCall(Call* expr) {
return;
}
- if (HasCustomCallGenerator(expr->target()) ||
+ if (CallStubCompiler::HasCustomCallGenerator(*expr->target()) ||
expr->check_type() != RECEIVER_MAP_CHECK) {
// When the target has a custom call IC generator, use the IC,
- // because it is likely to generate better code. Also use the
- // IC when a primitive receiver check is required.
- HContext* context = new HContext;
- AddInstruction(context);
- call = PreProcessCall(new HCallNamed(context, name, argument_count));
+ // because it is likely to generate better code. Also use the IC
+ // when a primitive receiver check is required.
+ HValue* context = environment()->LookupContext();
+ call = PreProcessCall(
+ new(zone()) HCallNamed(context, name, argument_count));
} else {
AddCheckConstantFunction(expr, receiver, receiver_map, true);
- if (TryInline(expr)) {
- if (current_block() != NULL) {
- HValue* return_value = Pop();
- // If we inlined a function in a test context then we need to emit
- // a simulate here to shadow the ones at the end of the
- // predecessor blocks. Those environments contain the return
- // value on top and do not correspond to any actual state of the
- // unoptimized code.
- if (ast_context()->IsEffect()) AddSimulate(expr->id());
- ast_context()->ReturnValue(return_value);
- }
- return;
- } else {
- // Check for bailout, as the TryInline call in the if condition above
- // might return false due to bailout during hydrogen processing.
- CHECK_BAILOUT;
- call = PreProcessCall(new HCallConstantFunction(expr->target(),
- argument_count));
- }
+ if (TryInline(expr)) return;
+ call = PreProcessCall(
+ new(zone()) HCallConstantFunction(expr->target(),
+ argument_count));
}
} else if (types != NULL && types->length() > 1) {
ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
@@ -4442,45 +4759,42 @@ void HGraphBuilder::VisitCall(Call* expr) {
return;
} else {
- HContext* context = new HContext;
- AddInstruction(context);
- call = PreProcessCall(new HCallNamed(context, name, argument_count));
+ HValue* context = environment()->LookupContext();
+ call = PreProcessCall(
+ new(zone()) HCallNamed(context, name, argument_count));
}
} else {
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
bool global_call = (var != NULL) && var->is_global() && !var->is_this();
- if (!global_call) {
- ++argument_count;
- VISIT_FOR_VALUE(expr->expression());
- }
-
if (global_call) {
+ bool known_global_function = false;
// If there is a global property cell for the name at compile time and
// access check is not enabled we assume that the function will not change
// and generate optimized code for calling the function.
- CompilationInfo* info = graph()->info();
- bool known_global_function = info->has_global_object() &&
- !info->global_object()->IsAccessCheckNeeded() &&
- expr->ComputeGlobalTarget(Handle<GlobalObject>(info->global_object()),
- var->name());
+ LookupResult lookup;
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
+ if (type == kUseCell &&
+ !info()->global_object()->IsAccessCheckNeeded()) {
+ Handle<GlobalObject> global(info()->global_object());
+ known_global_function = expr->ComputeGlobalTarget(global, &lookup);
+ }
if (known_global_function) {
// Push the global object instead of the global receiver because
// code generated by the full code generator expects it.
- HContext* context = new HContext;
- HGlobalObject* global_object = new HGlobalObject(context);
- AddInstruction(context);
+ HValue* context = environment()->LookupContext();
+ HGlobalObject* global_object = new(zone()) HGlobalObject(context);
PushAndAdd(global_object);
- VisitExpressions(expr->arguments());
- CHECK_BAILOUT;
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
- VISIT_FOR_VALUE(expr->expression());
+ CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Pop();
- AddInstruction(new HCheckFunction(function, expr->target()));
+ AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
// Replace the global object with the global receiver.
- HGlobalReceiver* global_receiver = new HGlobalReceiver(global_object);
+ HGlobalReceiver* global_receiver =
+ new(zone()) HGlobalReceiver(global_object);
// Index of the receiver from the top of the expression stack.
const int receiver_index = argument_count - 1;
AddInstruction(global_receiver);
@@ -4488,47 +4802,34 @@ void HGraphBuilder::VisitCall(Call* expr) {
IsGlobalObject());
environment()->SetExpressionStackAt(receiver_index, global_receiver);
- if (TryInline(expr)) {
- if (current_block() != NULL) {
- HValue* return_value = Pop();
- // If we inlined a function in a test context then we need to
- // emit a simulate here to shadow the ones at the end of the
- // predecessor blocks. Those environments contain the return
- // value on top and do not correspond to any actual state of the
- // unoptimized code.
- if (ast_context()->IsEffect()) AddSimulate(expr->id());
- ast_context()->ReturnValue(return_value);
- }
- return;
- }
- // Check for bailout, as trying to inline might fail due to bailout
- // during hydrogen processing.
- CHECK_BAILOUT;
-
- call = PreProcessCall(new HCallKnownGlobal(expr->target(),
- argument_count));
+ if (TryInline(expr)) return;
+ call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
+ argument_count));
} else {
- HContext* context = new HContext;
- AddInstruction(context);
- PushAndAdd(new HGlobalObject(context));
- VisitExpressions(expr->arguments());
- CHECK_BAILOUT;
-
- call = PreProcessCall(new HCallGlobal(context,
- var->name(),
- argument_count));
+ HValue* context = environment()->LookupContext();
+ HGlobalObject* receiver = new(zone()) HGlobalObject(context);
+ AddInstruction(receiver);
+ PushAndAdd(new(zone()) HPushArgument(receiver));
+ CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+
+ call = new(zone()) HCallGlobal(context, var->name(), argument_count);
+ Drop(argument_count);
}
} else {
- HContext* context = new HContext;
- HGlobalObject* global_object = new HGlobalObject(context);
- AddInstruction(context);
+ CHECK_ALIVE(VisitArgument(expr->expression()));
+ HValue* context = environment()->LookupContext();
+ HGlobalObject* global_object = new(zone()) HGlobalObject(context);
+ HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
AddInstruction(global_object);
- PushAndAdd(new HGlobalReceiver(global_object));
- VisitExpressions(expr->arguments());
- CHECK_BAILOUT;
+ AddInstruction(receiver);
+ PushAndAdd(new(zone()) HPushArgument(receiver));
+ CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- call = PreProcessCall(new HCallFunction(context, argument_count));
+ // The function to call is treated as an argument to the call function
+ // stub.
+ call = new(zone()) HCallFunction(context, argument_count + 1);
+ Drop(argument_count + 1);
}
}
@@ -4538,22 +4839,23 @@ void HGraphBuilder::VisitCall(Call* expr) {
void HGraphBuilder::VisitCallNew(CallNew* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
// The constructor function is also used as the receiver argument to the
// JS construct call builtin.
- VISIT_FOR_VALUE(expr->expression());
- VisitExpressions(expr->arguments());
- CHECK_BAILOUT;
+ HValue* constructor = NULL;
+ CHECK_ALIVE(constructor = VisitArgument(expr->expression()));
+ CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- HContext* context = new HContext;
- AddInstruction(context);
+ HValue* context = environment()->LookupContext();
// The constructor is both an operand to the instruction and an argument
// to the construct call.
int arg_count = expr->arguments()->length() + 1; // Plus constructor.
- HValue* constructor = environment()->ExpressionStackAt(arg_count - 1);
- HCallNew* call = new HCallNew(context, constructor, arg_count);
+ HCallNew* call = new(zone()) HCallNew(context, constructor, arg_count);
call->set_position(expr->position());
- PreProcessCall(call);
+ Drop(arg_count);
ast_context()->ReturnInstruction(call, expr->id());
}
@@ -4574,11 +4876,14 @@ const HGraphBuilder::InlineFunctionGenerator
void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
if (expr->is_jsruntime()) {
- BAILOUT("call to a JavaScript runtime function");
+ return Bailout("call to a JavaScript runtime function");
}
- Runtime::Function* function = expr->function();
+ const Runtime::Function* function = expr->function();
ASSERT(function != NULL);
if (function->intrinsic_type == Runtime::INLINE) {
ASSERT(expr->name()->length() > 0);
@@ -4595,12 +4900,12 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
(this->*generator)(expr);
} else {
ASSERT(function->intrinsic_type == Runtime::RUNTIME);
- VisitArgumentList(expr->arguments());
- CHECK_BAILOUT;
+ CHECK_ALIVE(VisitArgumentList(expr->arguments()));
Handle<String> name = expr->name();
int argument_count = expr->arguments()->length();
- HCallRuntime* call = new HCallRuntime(name, function, argument_count);
+ HCallRuntime* call =
+ new(zone()) HCallRuntime(name, function, argument_count);
call->set_position(RelocInfo::kNoPosition);
Drop(argument_count);
ast_context()->ReturnInstruction(call, expr->id());
@@ -4609,133 +4914,216 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
- Token::Value op = expr->op();
- if (op == Token::VOID) {
- VISIT_FOR_EFFECT(expr->expression());
- ast_context()->ReturnValue(graph()->GetConstantUndefined());
- } else if (op == Token::DELETE) {
- Property* prop = expr->expression()->AsProperty();
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (prop == NULL && var == NULL) {
- // Result of deleting non-property, non-variable reference is true.
- // Evaluate the subexpression for side effects.
- VISIT_FOR_EFFECT(expr->expression());
- ast_context()->ReturnValue(graph()->GetConstantTrue());
- } else if (var != NULL &&
- !var->is_global() &&
- var->AsSlot() != NULL &&
- var->AsSlot()->type() != Slot::LOOKUP) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ switch (expr->op()) {
+ case Token::DELETE: return VisitDelete(expr);
+ case Token::VOID: return VisitVoid(expr);
+ case Token::TYPEOF: return VisitTypeof(expr);
+ case Token::ADD: return VisitAdd(expr);
+ case Token::SUB: return VisitSub(expr);
+ case Token::BIT_NOT: return VisitBitNot(expr);
+ case Token::NOT: return VisitNot(expr);
+ default: UNREACHABLE();
+ }
+}
+
+void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
+ Property* prop = expr->expression()->AsProperty();
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+ if (prop == NULL && var == NULL) {
+ // Result of deleting non-property, non-variable reference is true.
+ // Evaluate the subexpression for side effects.
+ CHECK_ALIVE(VisitForEffect(expr->expression()));
+ ast_context()->ReturnValue(graph()->GetConstantTrue());
+ } else if (var != NULL &&
+ !var->is_global() &&
+ var->AsSlot() != NULL &&
+ var->AsSlot()->type() != Slot::LOOKUP) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ ast_context()->ReturnValue(graph()->GetConstantFalse());
+ } else if (prop != NULL) {
+ if (prop->is_synthetic()) {
+ // Result of deleting parameters is false, even when they rewrite
+ // to accesses on the arguments object.
ast_context()->ReturnValue(graph()->GetConstantFalse());
- } else if (prop != NULL) {
- if (prop->is_synthetic()) {
- // Result of deleting parameters is false, even when they rewrite
- // to accesses on the arguments object.
- ast_context()->ReturnValue(graph()->GetConstantFalse());
- } else {
- VISIT_FOR_VALUE(prop->obj());
- VISIT_FOR_VALUE(prop->key());
- HValue* key = Pop();
- HValue* obj = Pop();
- HDeleteProperty* instr = new HDeleteProperty(obj, key);
- ast_context()->ReturnInstruction(instr, expr->id());
- }
- } else if (var->is_global()) {
- BAILOUT("delete with global variable");
- } else {
- BAILOUT("delete with non-global variable");
- }
- } else if (op == Token::NOT) {
- if (ast_context()->IsTest()) {
- TestContext* context = TestContext::cast(ast_context());
- VisitForControl(expr->expression(),
- context->if_false(),
- context->if_true());
- } else if (ast_context()->IsValue()) {
- HSubgraph* true_graph = CreateEmptySubgraph();
- HSubgraph* false_graph = CreateEmptySubgraph();
- VISIT_FOR_CONTROL(expr->expression(),
- false_graph->entry_block(),
- true_graph->entry_block());
- true_graph->entry_block()->SetJoinId(expr->expression()->id());
- true_graph->exit_block()->last_environment()->Push(
- graph_->GetConstantTrue());
-
- false_graph->entry_block()->SetJoinId(expr->expression()->id());
- false_graph->exit_block()->last_environment()->Push(
- graph_->GetConstantFalse());
-
- set_current_block(CreateJoin(true_graph->exit_block(),
- false_graph->exit_block(),
- expr->id()));
- ast_context()->ReturnValue(Pop());
- } else {
- ASSERT(ast_context()->IsEffect());
- VISIT_FOR_EFFECT(expr->expression());
- }
-
- } else if (op == Token::BIT_NOT || op == Token::SUB) {
- VISIT_FOR_VALUE(expr->expression());
- HValue* value = Pop();
- HInstruction* instr = NULL;
- switch (op) {
- case Token::BIT_NOT:
- instr = new HBitNot(value);
- break;
- case Token::SUB:
- instr = new HMul(graph_->GetConstantMinus1(), value);
- break;
- default:
- UNREACHABLE();
- break;
+ } else {
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ CHECK_ALIVE(VisitForValue(prop->key()));
+ HValue* key = Pop();
+ HValue* obj = Pop();
+ HDeleteProperty* instr = new(zone()) HDeleteProperty(obj, key);
+ ast_context()->ReturnInstruction(instr, expr->id());
}
- ast_context()->ReturnInstruction(instr, expr->id());
- } else if (op == Token::TYPEOF) {
- VISIT_FOR_VALUE(expr->expression());
- HValue* value = Pop();
- ast_context()->ReturnInstruction(new HTypeof(value), expr->id());
+ } else if (var->is_global()) {
+ Bailout("delete with global variable");
} else {
- BAILOUT("Value: unsupported unary operation");
+ Bailout("delete with non-global variable");
}
}
-void HGraphBuilder::VisitIncrementOperation(IncrementOperation* expr) {
- // IncrementOperation is never visited by the visitor. It only
- // occurs as a subexpression of CountOperation.
- UNREACHABLE();
+void HGraphBuilder::VisitVoid(UnaryOperation* expr) {
+ CHECK_ALIVE(VisitForEffect(expr->expression()));
+ ast_context()->ReturnValue(graph()->GetConstantUndefined());
+}
+
+
+void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+ CHECK_ALIVE(VisitForTypeOf(expr->expression()));
+ HValue* value = Pop();
+ ast_context()->ReturnInstruction(new(zone()) HTypeof(value), expr->id());
+}
+
+
+void HGraphBuilder::VisitAdd(UnaryOperation* expr) {
+ CHECK_ALIVE(VisitForValue(expr->expression()));
+ HValue* value = Pop();
+ HInstruction* instr = new(zone()) HMul(value, graph_->GetConstant1());
+ ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitSub(UnaryOperation* expr) {
+ CHECK_ALIVE(VisitForValue(expr->expression()));
+ HValue* value = Pop();
+ HInstruction* instr = new(zone()) HMul(value, graph_->GetConstantMinus1());
+ TypeInfo info = oracle()->UnaryType(expr);
+ if (info.IsUninitialized()) {
+ AddInstruction(new(zone()) HSoftDeoptimize);
+ current_block()->MarkAsDeoptimizing();
+ info = TypeInfo::Unknown();
+ }
+ Representation rep = ToRepresentation(info);
+ TraceRepresentation(expr->op(), info, instr, rep);
+ instr->AssumeRepresentation(rep);
+ ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
+ CHECK_ALIVE(VisitForValue(expr->expression()));
+ HValue* value = Pop();
+ TypeInfo info = oracle()->UnaryType(expr);
+ if (info.IsUninitialized()) {
+ AddInstruction(new(zone()) HSoftDeoptimize);
+ current_block()->MarkAsDeoptimizing();
+ }
+ HInstruction* instr = new(zone()) HBitNot(value);
+ ast_context()->ReturnInstruction(instr, expr->id());
}
-HInstruction* HGraphBuilder::BuildIncrement(HValue* value, bool increment) {
- HConstant* delta = increment
+void HGraphBuilder::VisitNot(UnaryOperation* expr) {
+ // TODO(svenpanne) Perhaps a switch/virtual function is nicer here.
+ if (ast_context()->IsTest()) {
+ TestContext* context = TestContext::cast(ast_context());
+ VisitForControl(expr->expression(),
+ context->if_false(),
+ context->if_true());
+ return;
+ }
+
+ if (ast_context()->IsEffect()) {
+ VisitForEffect(expr->expression());
+ return;
+ }
+
+ ASSERT(ast_context()->IsValue());
+ HBasicBlock* materialize_false = graph()->CreateBasicBlock();
+ HBasicBlock* materialize_true = graph()->CreateBasicBlock();
+ CHECK_BAILOUT(VisitForControl(expr->expression(),
+ materialize_false,
+ materialize_true));
+
+ if (materialize_false->HasPredecessor()) {
+ materialize_false->SetJoinId(expr->expression()->id());
+ set_current_block(materialize_false);
+ Push(graph()->GetConstantFalse());
+ } else {
+ materialize_false = NULL;
+ }
+
+ if (materialize_true->HasPredecessor()) {
+ materialize_true->SetJoinId(expr->expression()->id());
+ set_current_block(materialize_true);
+ Push(graph()->GetConstantTrue());
+ } else {
+ materialize_true = NULL;
+ }
+
+ HBasicBlock* join =
+ CreateJoin(materialize_false, materialize_true, expr->id());
+ set_current_block(join);
+ if (join != NULL) ast_context()->ReturnValue(Pop());
+}
+
+
+HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
+ CountOperation* expr) {
+ // The input to the count operation is on top of the expression stack.
+ TypeInfo info = oracle()->IncrementType(expr);
+ Representation rep = ToRepresentation(info);
+ if (rep.IsTagged()) {
+ rep = Representation::Integer32();
+ }
+
+ if (returns_original_input) {
+ // We need an explicit HValue representing ToNumber(input). The
+ // actual HChange instruction we need is (sometimes) added in a later
+ // phase, so it is not available now to be used as an input to HAdd and
+ // as the return value.
+ HInstruction* number_input = new(zone()) HForceRepresentation(Pop(), rep);
+ AddInstruction(number_input);
+ Push(number_input);
+ }
+
+ // The addition has no side effects, so we do not need
+ // to simulate the expression stack after this instruction.
+ // Any later failures deopt to the load of the input or earlier.
+ HConstant* delta = (expr->op() == Token::INC)
? graph_->GetConstant1()
: graph_->GetConstantMinus1();
- HInstruction* instr = new HAdd(value, delta);
- AssumeRepresentation(instr, Representation::Integer32());
+ HInstruction* instr = new(zone()) HAdd(Top(), delta);
+ TraceRepresentation(expr->op(), info, instr, rep);
+ instr->AssumeRepresentation(rep);
+ AddInstruction(instr);
return instr;
}
void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
- IncrementOperation* increment = expr->increment();
- Expression* target = increment->expression();
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ Expression* target = expr->expression();
VariableProxy* proxy = target->AsVariableProxy();
Variable* var = proxy->AsVariable();
Property* prop = target->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- bool inc = expr->op() == Token::INC;
+ if (var == NULL && prop == NULL) {
+ return Bailout("invalid lhs in count operation");
+ }
+
+ // Match the full code generator stack by simulating an extra stack
+ // element for postfix operations in a non-effect context. The return
+ // value is ToNumber(input).
+ bool returns_original_input =
+ expr->is_postfix() && !ast_context()->IsEffect();
+ HValue* input = NULL; // ToNumber(original_input).
+ HValue* after = NULL; // The result after incrementing or decrementing.
if (var != NULL) {
- VISIT_FOR_VALUE(target);
-
- // Match the full code generator stack by simulating an extra stack
- // element for postfix operations in a non-effect context.
- bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
- HValue* before = has_extra ? Top() : Pop();
- HInstruction* after = BuildIncrement(before, inc);
- AddInstruction(after);
+ if (var->mode() == Variable::CONST) {
+ return Bailout("unsupported count operation with const");
+ }
+ // Argument of the count operation is a variable, not a property.
+ ASSERT(prop == NULL);
+ CHECK_ALIVE(VisitForValue(target));
+
+ after = BuildIncrement(returns_original_input, expr);
+ input = returns_original_input ? Top() : Pop();
Push(after);
if (var->is_global()) {
@@ -4746,29 +5134,40 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
} else if (var->IsStackAllocated()) {
Bind(var, after);
} else if (var->IsContextSlot()) {
+ // Bail out if we try to mutate a parameter value in a function using
+ // the arguments object. We do not (yet) correctly handle the
+ // arguments property of the function.
+ if (info()->scope()->arguments() != NULL) {
+ // Parameters will rewrite to context slots. We have no direct way
+ // to detect that the variable is a parameter.
+ int count = info()->scope()->num_parameters();
+ for (int i = 0; i < count; ++i) {
+ if (var == info()->scope()->parameter(i)) {
+ Bailout("assignment to parameter, function uses arguments object");
+ }
+ }
+ }
+
HValue* context = BuildContextChainWalk(var);
int index = var->AsSlot()->index();
- HStoreContextSlot* instr = new HStoreContextSlot(context, index, after);
+ HStoreContextSlot* instr =
+ new(zone()) HStoreContextSlot(context, index, after);
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
} else {
- BAILOUT("lookup variable in count operation");
+ return Bailout("lookup variable in count operation");
}
- Drop(has_extra ? 2 : 1);
- ast_context()->ReturnValue(expr->is_postfix() ? before : after);
- } else if (prop != NULL) {
+ } else {
+ // Argument of the count operation is a property.
+ ASSERT(prop != NULL);
prop->RecordTypeFeedback(oracle());
if (prop->key()->IsPropertyName()) {
// Named property.
+ if (returns_original_input) Push(graph_->GetConstantUndefined());
- // Match the full code generator stack by simulating an extra stack
- // element for postfix operations in a non-effect context.
- bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
- if (has_extra) Push(graph_->GetConstantUndefined());
-
- VISIT_FOR_VALUE(prop->obj());
+ CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* obj = Top();
HInstruction* load = NULL;
@@ -4780,13 +5179,10 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
load = BuildLoadNamedGeneric(obj, prop);
}
PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(increment->id());
+ if (load->HasSideEffects()) AddSimulate(expr->CountId());
- HValue* before = Pop();
- // There is no deoptimization to after the increment, so we don't need
- // to simulate the expression stack after this instruction.
- HInstruction* after = BuildIncrement(before, inc);
- AddInstruction(after);
+ after = BuildIncrement(returns_original_input, expr);
+ input = Pop();
HInstruction* store = BuildStoreNamed(obj, after, prop);
AddInstruction(store);
@@ -4795,117 +5191,119 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
// of the operation, and the placeholder with the original value if
// necessary.
environment()->SetExpressionStackAt(0, after);
- if (has_extra) environment()->SetExpressionStackAt(1, before);
+ if (returns_original_input) environment()->SetExpressionStackAt(1, input);
if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
- Drop(has_extra ? 2 : 1);
-
- ast_context()->ReturnValue(expr->is_postfix() ? before : after);
} else {
// Keyed property.
+ if (returns_original_input) Push(graph_->GetConstantUndefined());
- // Match the full code generator stack by simulate an extra stack element
- // for postfix operations in a non-effect context.
- bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
- if (has_extra) Push(graph_->GetConstantUndefined());
-
- VISIT_FOR_VALUE(prop->obj());
- VISIT_FOR_VALUE(prop->key());
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ CHECK_ALIVE(VisitForValue(prop->key()));
HValue* obj = environment()->ExpressionStackAt(1);
HValue* key = environment()->ExpressionStackAt(0);
- bool is_fast_elements = prop->IsMonomorphic() &&
- prop->GetMonomorphicReceiverType()->has_fast_elements();
+ bool has_side_effects = false;
+ HValue* load = HandleKeyedElementAccess(
+ obj, key, NULL, prop, expr->CountId(), RelocInfo::kNoPosition,
+ false, // is_store
+ &has_side_effects);
+ Push(load);
+ if (has_side_effects) AddSimulate(expr->CountId());
- HInstruction* load = is_fast_elements
- ? BuildLoadKeyedFastElement(obj, key, prop)
- : BuildLoadKeyedGeneric(obj, key);
- PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(increment->id());
-
- HValue* before = Pop();
- // There is no deoptimization to after the increment, so we don't need
- // to simulate the expression stack after this instruction.
- HInstruction* after = BuildIncrement(before, inc);
- AddInstruction(after);
+ after = BuildIncrement(returns_original_input, expr);
+ input = Pop();
- HInstruction* store = is_fast_elements
- ? BuildStoreKeyedFastElement(obj, key, after, prop)
- : BuildStoreKeyedGeneric(obj, key, after);
- AddInstruction(store);
+ expr->RecordTypeFeedback(oracle());
+ HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
+ RelocInfo::kNoPosition,
+ true, // is_store
+ &has_side_effects);
// Drop the key from the bailout environment. Overwrite the receiver
// with the result of the operation, and the placeholder with the
// original value if necessary.
Drop(1);
environment()->SetExpressionStackAt(0, after);
- if (has_extra) environment()->SetExpressionStackAt(1, before);
- if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
- Drop(has_extra ? 2 : 1);
-
- ast_context()->ReturnValue(expr->is_postfix() ? before : after);
+ if (returns_original_input) environment()->SetExpressionStackAt(1, input);
+ ASSERT(has_side_effects); // Stores always have side effects.
+ AddSimulate(expr->AssignmentId());
}
-
- } else {
- BAILOUT("invalid lhs in count operation");
}
+
+ Drop(returns_original_input ? 2 : 1);
+ ast_context()->ReturnValue(expr->is_postfix() ? input : after);
}
HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* string,
HValue* index) {
- AddInstruction(new HCheckNonSmi(string));
- AddInstruction(new HCheckInstanceType(
- string, FIRST_STRING_TYPE, LAST_STRING_TYPE));
- HStringLength* length = new HStringLength(string);
+ AddInstruction(new(zone()) HCheckNonSmi(string));
+ AddInstruction(HCheckInstanceType::NewIsString(string));
+ HStringLength* length = new(zone()) HStringLength(string);
AddInstruction(length);
- AddInstruction(new HBoundsCheck(index, length));
- return new HStringCharCodeAt(string, index);
+ HInstruction* checked_index =
+ AddInstruction(new(zone()) HBoundsCheck(index, length));
+ return new(zone()) HStringCharCodeAt(string, checked_index);
}
HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
HValue* left,
HValue* right) {
+ TypeInfo info = oracle()->BinaryType(expr);
+ if (info.IsUninitialized()) {
+ AddInstruction(new(zone()) HSoftDeoptimize);
+ current_block()->MarkAsDeoptimizing();
+ info = TypeInfo::Unknown();
+ }
HInstruction* instr = NULL;
switch (expr->op()) {
case Token::ADD:
- instr = new HAdd(left, right);
+ if (info.IsString()) {
+ AddInstruction(new(zone()) HCheckNonSmi(left));
+ AddInstruction(HCheckInstanceType::NewIsString(left));
+ AddInstruction(new(zone()) HCheckNonSmi(right));
+ AddInstruction(HCheckInstanceType::NewIsString(right));
+ instr = new(zone()) HStringAdd(left, right);
+ } else {
+ instr = new(zone()) HAdd(left, right);
+ }
break;
case Token::SUB:
- instr = new HSub(left, right);
+ instr = new(zone()) HSub(left, right);
break;
case Token::MUL:
- instr = new HMul(left, right);
+ instr = new(zone()) HMul(left, right);
break;
case Token::MOD:
- instr = new HMod(left, right);
+ instr = new(zone()) HMod(left, right);
break;
case Token::DIV:
- instr = new HDiv(left, right);
+ instr = new(zone()) HDiv(left, right);
break;
case Token::BIT_XOR:
- instr = new HBitXor(left, right);
+ instr = new(zone()) HBitXor(left, right);
break;
case Token::BIT_AND:
- instr = new HBitAnd(left, right);
+ instr = new(zone()) HBitAnd(left, right);
break;
case Token::BIT_OR:
- instr = new HBitOr(left, right);
+ instr = new(zone()) HBitOr(left, right);
break;
case Token::SAR:
- instr = new HSar(left, right);
+ instr = new(zone()) HSar(left, right);
break;
case Token::SHR:
- instr = new HShr(left, right);
+ instr = new(zone()) HShr(left, right);
break;
case Token::SHL:
- instr = new HShl(left, right);
+ instr = new(zone()) HShl(left, right);
break;
default:
UNREACHABLE();
}
- TypeInfo info = oracle()->BinaryType(expr);
+
// If we hit an uninitialized binary op stub we will get type info
// for a smi operation. If one of the operands is a constant string
// do not generate code assuming it is a smi operation.
@@ -4914,15 +5312,13 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
(right->IsConstant() && HConstant::cast(right)->HasStringValue()))) {
return instr;
}
- if (FLAG_trace_representation) {
- PrintF("Info: %s/%s\n", info.ToString(), ToRepresentation(info).Mnemonic());
- }
Representation rep = ToRepresentation(info);
// We only generate either int32 or generic tagged bitwise operations.
if (instr->IsBitwiseBinaryOperation() && rep.IsDouble()) {
rep = Representation::Integer32();
}
- AssumeRepresentation(instr, rep);
+ TraceRepresentation(expr->op(), info, instr, rep);
+ instr->AssumeRepresentation(rep);
return instr;
}
@@ -4942,115 +5338,145 @@ static bool IsClassOfTest(CompareOperation* expr) {
void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- if (expr->op() == Token::COMMA) {
- VISIT_FOR_EFFECT(expr->left());
- // Visit the right subexpression in the same AST context as the entire
- // expression.
- Visit(expr->right());
-
- } else if (expr->op() == Token::AND || expr->op() == Token::OR) {
- bool is_logical_and = (expr->op() == Token::AND);
- if (ast_context()->IsTest()) {
- TestContext* context = TestContext::cast(ast_context());
- // Translate left subexpression.
- HBasicBlock* eval_right = graph()->CreateBasicBlock();
- if (is_logical_and) {
- VISIT_FOR_CONTROL(expr->left(), eval_right, context->if_false());
- } else {
- VISIT_FOR_CONTROL(expr->left(), context->if_true(), eval_right);
- }
- eval_right->SetJoinId(expr->RightId());
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ switch (expr->op()) {
+ case Token::COMMA:
+ return VisitComma(expr);
+ case Token::OR:
+ case Token::AND:
+ return VisitLogicalExpression(expr);
+ default:
+ return VisitArithmeticExpression(expr);
+ }
+}
+
+
+void HGraphBuilder::VisitComma(BinaryOperation* expr) {
+ CHECK_ALIVE(VisitForEffect(expr->left()));
+ // Visit the right subexpression in the same AST context as the entire
+ // expression.
+ Visit(expr->right());
+}
+
+
+void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+ bool is_logical_and = expr->op() == Token::AND;
+ if (ast_context()->IsTest()) {
+ TestContext* context = TestContext::cast(ast_context());
+ // Translate left subexpression.
+ HBasicBlock* eval_right = graph()->CreateBasicBlock();
+ if (is_logical_and) {
+ CHECK_BAILOUT(VisitForControl(expr->left(),
+ eval_right,
+ context->if_false()));
+ } else {
+ CHECK_BAILOUT(VisitForControl(expr->left(),
+ context->if_true(),
+ eval_right));
+ }
- // Translate right subexpression by visiting it in the same AST
- // context as the entire expression.
+ // Translate right subexpression by visiting it in the same AST
+ // context as the entire expression.
+ if (eval_right->HasPredecessor()) {
+ eval_right->SetJoinId(expr->RightId());
set_current_block(eval_right);
Visit(expr->right());
+ }
- } else if (ast_context()->IsValue()) {
- VISIT_FOR_VALUE(expr->left());
- ASSERT(current_block() != NULL);
-
- HValue* left = Top();
- HEnvironment* environment_copy = environment()->Copy();
- environment_copy->Pop();
- HSubgraph* right_subgraph;
- right_subgraph = CreateBranchSubgraph(environment_copy);
- ADD_TO_SUBGRAPH(right_subgraph, expr->right());
-
- ASSERT(current_block() != NULL &&
- right_subgraph->exit_block() != NULL);
- // We need an extra block to maintain edge-split form.
- HBasicBlock* empty_block = graph()->CreateBasicBlock();
- HBasicBlock* join_block = graph()->CreateBasicBlock();
-
- HTest* test = is_logical_and
- ? new HTest(left, right_subgraph->entry_block(), empty_block)
- : new HTest(left, empty_block, right_subgraph->entry_block());
- current_block()->Finish(test);
- empty_block->Goto(join_block);
- right_subgraph->exit_block()->Goto(join_block);
- join_block->SetJoinId(expr->id());
- set_current_block(join_block);
- ast_context()->ReturnValue(Pop());
+ } else if (ast_context()->IsValue()) {
+ CHECK_ALIVE(VisitForValue(expr->left()));
+ ASSERT(current_block() != NULL);
+
+ // We need an extra block to maintain edge-split form.
+ HBasicBlock* empty_block = graph()->CreateBasicBlock();
+ HBasicBlock* eval_right = graph()->CreateBasicBlock();
+ HTest* test = is_logical_and
+ ? new(zone()) HTest(Top(), eval_right, empty_block)
+ : new(zone()) HTest(Top(), empty_block, eval_right);
+ current_block()->Finish(test);
+
+ set_current_block(eval_right);
+ Drop(1); // Value of the left subexpression.
+ CHECK_BAILOUT(VisitForValue(expr->right()));
+
+ HBasicBlock* join_block =
+ CreateJoin(empty_block, current_block(), expr->id());
+ set_current_block(join_block);
+ ast_context()->ReturnValue(Pop());
+
+ } else {
+ ASSERT(ast_context()->IsEffect());
+ // In an effect context, we don't need the value of the left subexpression,
+ // only its control flow and side effects. We need an extra block to
+ // maintain edge-split form.
+ HBasicBlock* empty_block = graph()->CreateBasicBlock();
+ HBasicBlock* right_block = graph()->CreateBasicBlock();
+ if (is_logical_and) {
+ CHECK_BAILOUT(VisitForControl(expr->left(), right_block, empty_block));
} else {
- ASSERT(ast_context()->IsEffect());
- // In an effect context, we don't need the value of the left
- // subexpression, only its control flow and side effects. We need an
- // extra block to maintain edge-split form.
- HBasicBlock* empty_block = graph()->CreateBasicBlock();
- HBasicBlock* right_block = graph()->CreateBasicBlock();
- HBasicBlock* join_block = graph()->CreateBasicBlock();
- if (is_logical_and) {
- VISIT_FOR_CONTROL(expr->left(), right_block, empty_block);
- } else {
- VISIT_FOR_CONTROL(expr->left(), empty_block, right_block);
- }
- // TODO(kmillikin): Find a way to fix this. It's ugly that there are
- // actually two empty blocks (one here and one inserted by
- // TestContext::BuildBranch, and that they both have an HSimulate
- // though the second one is not a merge node, and that we really have
- // no good AST ID to put on that first HSimulate.
+ CHECK_BAILOUT(VisitForControl(expr->left(), empty_block, right_block));
+ }
+
+ // TODO(kmillikin): Find a way to fix this. It's ugly that there are
+ // actually two empty blocks (one here and one inserted by
+ // TestContext::BuildBranch, and that they both have an HSimulate though the
+ // second one is not a merge node, and that we really have no good AST ID to
+ // put on that first HSimulate.
+
+ if (empty_block->HasPredecessor()) {
empty_block->SetJoinId(expr->id());
+ } else {
+ empty_block = NULL;
+ }
+
+ if (right_block->HasPredecessor()) {
right_block->SetJoinId(expr->RightId());
set_current_block(right_block);
- VISIT_FOR_EFFECT(expr->right());
-
- empty_block->Goto(join_block);
- current_block()->Goto(join_block);
- join_block->SetJoinId(expr->id());
- set_current_block(join_block);
- // We did not materialize any value in the predecessor environments,
- // so there is no need to handle it here.
+ CHECK_BAILOUT(VisitForEffect(expr->right()));
+ right_block = current_block();
+ } else {
+ right_block = NULL;
}
- } else {
- VISIT_FOR_VALUE(expr->left());
- VISIT_FOR_VALUE(expr->right());
-
- HValue* right = Pop();
- HValue* left = Pop();
- HInstruction* instr = BuildBinaryOperation(expr, left, right);
- instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
+ HBasicBlock* join_block =
+ CreateJoin(empty_block, right_block, expr->id());
+ set_current_block(join_block);
+ // We did not materialize any value in the predecessor environments,
+ // so there is no need to handle it here.
}
}
-void HGraphBuilder::AssumeRepresentation(HValue* value, Representation r) {
- if (value->CheckFlag(HValue::kFlexibleRepresentation)) {
- if (FLAG_trace_representation) {
- PrintF("Assume representation for %s to be %s (%d)\n",
- value->Mnemonic(),
- r.Mnemonic(),
- graph_->GetMaximumValueID());
- }
- value->ChangeRepresentation(r);
- // The representation of the value is dictated by type feedback and
- // will not be changed later.
- value->ClearFlag(HValue::kFlexibleRepresentation);
- } else if (FLAG_trace_representation) {
- PrintF("No representation assumed\n");
- }
+void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
+ CHECK_ALIVE(VisitForValue(expr->left()));
+ CHECK_ALIVE(VisitForValue(expr->right()));
+ HValue* right = Pop();
+ HValue* left = Pop();
+ HInstruction* instr = BuildBinaryOperation(expr, left, right);
+ instr->set_position(expr->position());
+ ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::TraceRepresentation(Token::Value op,
+ TypeInfo info,
+ HValue* value,
+ Representation rep) {
+ if (!FLAG_trace_representation) return;
+ // TODO(svenpanne) Under which circumstances are we actually not flexible?
+ // At first glance, this looks a bit weird...
+ bool flexible = value->CheckFlag(HValue::kFlexibleRepresentation);
+ PrintF("Operation %s has type info %s, %schange representation assumption "
+ "for %s (ID %d) from %s to %s\n",
+ Token::Name(op),
+ info.ToString(),
+ flexible ? "" : " DO NOT ",
+ value->Mnemonic(),
+ graph_->GetMaximumValueID(),
+ value->representation().Mnemonic(),
+ rep.Mnemonic());
}
@@ -5063,42 +5489,73 @@ Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
}
+void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
+ Expression* expr,
+ Handle<String> check) {
+ CHECK_ALIVE(VisitForTypeOf(expr));
+ HValue* expr_value = Pop();
+ HInstruction* instr = new(zone()) HTypeofIs(expr_value, check);
+ instr->set_position(compare_expr->position());
+ ast_context()->ReturnInstruction(instr, compare_expr->id());
+}
+
+
+void HGraphBuilder::HandleLiteralCompareUndefined(
+ CompareOperation* compare_expr, Expression* expr) {
+ CHECK_ALIVE(VisitForValue(expr));
+ HValue* lhs = Pop();
+ HValue* rhs = graph()->GetConstantUndefined();
+ HInstruction* instr =
+ new(zone()) HCompareObjectEq(lhs, rhs);
+ instr->set_position(compare_expr->position());
+ ast_context()->ReturnInstruction(instr, compare_expr->id());
+}
+
+
void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
if (IsClassOfTest(expr)) {
CallRuntime* call = expr->left()->AsCallRuntime();
- VISIT_FOR_VALUE(call->arguments()->at(0));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
Literal* literal = expr->right()->AsLiteral();
Handle<String> rhs = Handle<String>::cast(literal->handle());
- HInstruction* instr = new HClassOfTest(value, rhs);
+ HInstruction* instr = new(zone()) HClassOfTest(value, rhs);
instr->set_position(expr->position());
ast_context()->ReturnInstruction(instr, expr->id());
return;
}
- // Check for the pattern: typeof <expression> == <string literal>.
- UnaryOperation* left_unary = expr->left()->AsUnaryOperation();
- Literal* right_literal = expr->right()->AsLiteral();
- if ((expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT) &&
- left_unary != NULL && left_unary->op() == Token::TYPEOF &&
- right_literal != NULL && right_literal->handle()->IsString()) {
- VISIT_FOR_VALUE(left_unary->expression());
- HValue* left = Pop();
- HInstruction* instr = new HTypeofIs(left,
- Handle<String>::cast(right_literal->handle()));
- instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
+ // Check for special cases that compare against literals.
+ Expression *sub_expr;
+ Handle<String> check;
+ if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+ HandleLiteralCompareTypeof(expr, sub_expr, check);
return;
}
- VISIT_FOR_VALUE(expr->left());
- VISIT_FOR_VALUE(expr->right());
+ if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+ HandleLiteralCompareUndefined(expr, sub_expr);
+ return;
+ }
+
+ TypeInfo type_info = oracle()->CompareType(expr);
+ // Check if this expression was ever executed according to type feedback.
+ if (type_info.IsUninitialized()) {
+ AddInstruction(new(zone()) HSoftDeoptimize);
+ current_block()->MarkAsDeoptimizing();
+ type_info = TypeInfo::Unknown();
+ }
+
+ CHECK_ALIVE(VisitForValue(expr->left()));
+ CHECK_ALIVE(VisitForValue(expr->right()));
HValue* right = Pop();
HValue* left = Pop();
Token::Value op = expr->op();
- TypeInfo info = oracle()->CompareType(expr);
HInstruction* instr = NULL;
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not
@@ -5107,12 +5564,11 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Handle<JSFunction> target = Handle<JSFunction>::null();
Variable* var = expr->right()->AsVariableProxy()->AsVariable();
bool global_function = (var != NULL) && var->is_global() && !var->is_this();
- CompilationInfo* info = graph()->info();
if (global_function &&
- info->has_global_object() &&
- !info->global_object()->IsAccessCheckNeeded()) {
+ info()->has_global_object() &&
+ !info()->global_object()->IsAccessCheckNeeded()) {
Handle<String> name = var->name();
- Handle<GlobalObject> global(info->global_object());
+ Handle<GlobalObject> global(info()->global_object());
LookupResult lookup;
global->Lookup(*name, &lookup);
if (lookup.IsProperty() &&
@@ -5121,7 +5577,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
- if (!Heap::InNewSpace(*candidate)) {
+ if (!isolate()->heap()->InNewSpace(*candidate)) {
target = candidate;
}
}
@@ -5130,33 +5586,39 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// If the target is not null we have found a known global function that is
// assumed to stay the same for this instanceof.
if (target.is_null()) {
- HContext* context = new HContext;
- AddInstruction(context);
- instr = new HInstanceOf(context, left, right);
+ HValue* context = environment()->LookupContext();
+ instr = new(zone()) HInstanceOf(context, left, right);
} else {
- AddInstruction(new HCheckFunction(right, target));
- instr = new HInstanceOfKnownGlobal(left, target);
+ AddInstruction(new(zone()) HCheckFunction(right, target));
+ instr = new(zone()) HInstanceOfKnownGlobal(left, target);
}
} else if (op == Token::IN) {
- BAILOUT("Unsupported comparison: in");
- } else if (info.IsNonPrimitive()) {
+ instr = new(zone()) HIn(left, right);
+ } else if (type_info.IsNonPrimitive()) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
- AddInstruction(new HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(left));
- AddInstruction(new HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(right));
- instr = new HCompareJSObjectEq(left, right);
+ AddInstruction(new(zone()) HCheckNonSmi(left));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
+ AddInstruction(new(zone()) HCheckNonSmi(right));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
+ instr = new(zone()) HCompareObjectEq(left, right);
break;
}
default:
- BAILOUT("Unsupported non-primitive compare");
+ return Bailout("Unsupported non-primitive compare");
break;
}
+ } else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
+ (op == Token::EQ || op == Token::EQ_STRICT)) {
+ AddInstruction(new(zone()) HCheckNonSmi(left));
+ AddInstruction(HCheckInstanceType::NewIsSymbol(left));
+ AddInstruction(new(zone()) HCheckNonSmi(right));
+ AddInstruction(HCheckInstanceType::NewIsSymbol(right));
+ instr = new(zone()) HCompareObjectEq(left, right);
} else {
- HCompare* compare = new HCompare(left, right, op);
- Representation r = ToRepresentation(info);
+ HCompare* compare = new(zone()) HCompare(left, right, op);
+ Representation r = ToRepresentation(type_info);
compare->SetInputRepresentation(r);
instr = compare;
}
@@ -5166,31 +5628,36 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
- VISIT_FOR_VALUE(expr->expression());
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
- HIsNull* compare = new HIsNull(value, expr->is_strict());
+ HIsNull* compare = new(zone()) HIsNull(value, expr->is_strict());
ast_context()->ReturnInstruction(compare, expr->id());
}
void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
- BAILOUT("ThisFunction");
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+ HThisFunction* self = new(zone()) HThisFunction;
+ return ast_context()->ReturnInstruction(self, expr->id());
}
void HGraphBuilder::VisitDeclaration(Declaration* decl) {
- // We allow only declarations that do not require code generation.
- // The following all require code generation: global variables and
- // functions, variables with slot type LOOKUP, declarations with
- // mode CONST, and functions.
+ // We support only declarations that do not require code generation.
Variable* var = decl->proxy()->var();
- Slot* slot = var->AsSlot();
- if (var->is_global() ||
- (slot != NULL && slot->type() == Slot::LOOKUP) ||
- decl->mode() == Variable::CONST ||
- decl->fun() != NULL) {
- BAILOUT("unsupported declaration");
+ if (!var->IsStackAllocated() || decl->fun() != NULL) {
+ return Bailout("unsupported declaration");
+ }
+
+ if (decl->mode() == Variable::CONST) {
+ ASSERT(var->IsStackAllocated());
+ environment()->Bind(var, graph()->GetConstantHole());
}
}
@@ -5199,107 +5666,132 @@ void HGraphBuilder::VisitDeclaration(Declaration* decl) {
// Support for types.
void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsSmi* result = new HIsSmi(value);
+ HIsSmi* result = new(zone()) HIsSmi(value);
ast_context()->ReturnInstruction(result, call->id());
}
void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceType* result =
- new HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE);
+ new(zone()) HHasInstanceType(value,
+ FIRST_SPEC_OBJECT_TYPE,
+ LAST_SPEC_OBJECT_TYPE);
ast_context()->ReturnInstruction(result, call->id());
}
void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceType* result = new HHasInstanceType(value, JS_FUNCTION_TYPE);
+ HHasInstanceType* result =
+ new(zone()) HHasInstanceType(value, JS_FUNCTION_TYPE);
ast_context()->ReturnInstruction(result, call->id());
}
void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasCachedArrayIndex* result = new HHasCachedArrayIndex(value);
+ HHasCachedArrayIndex* result = new(zone()) HHasCachedArrayIndex(value);
ast_context()->ReturnInstruction(result, call->id());
}
void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceType* result = new HHasInstanceType(value, JS_ARRAY_TYPE);
+ HHasInstanceType* result = new(zone()) HHasInstanceType(value, JS_ARRAY_TYPE);
ast_context()->ReturnInstruction(result, call->id());
}
void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceType* result = new HHasInstanceType(value, JS_REGEXP_TYPE);
+ HHasInstanceType* result =
+ new(zone()) HHasInstanceType(value, JS_REGEXP_TYPE);
ast_context()->ReturnInstruction(result, call->id());
}
void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsObject* test = new HIsObject(value);
+ HIsObject* test = new(zone()) HIsObject(value);
ast_context()->ReturnInstruction(test, call->id());
}
void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
- BAILOUT("inlined runtime function: IsNonNegativeSmi");
+ return Bailout("inlined runtime function: IsNonNegativeSmi");
}
void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
- BAILOUT("inlined runtime function: IsUndetectableObject");
+ ASSERT(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ ast_context()->ReturnInstruction(new(zone()) HIsUndetectable(value),
+ call->id());
}
void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
CallRuntime* call) {
- BAILOUT("inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
+ return Bailout(
+ "inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
}
// Support for construct call checks.
void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
ASSERT(call->arguments()->length() == 0);
- ast_context()->ReturnInstruction(new HIsConstructCall, call->id());
+ if (function_state()->outer() != NULL) {
+ // We are generating graph for inlined function. Currently
+ // constructor inlining is not supported and we can just return
+ // false from %_IsConstructCall().
+ ast_context()->ReturnValue(graph()->GetConstantFalse());
+ } else {
+ ast_context()->ReturnInstruction(new(zone()) HIsConstructCall, call->id());
+ }
}
// Support for arguments.length and arguments[?].
void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
+ // Our implementation of arguments (based on this stack frame or an
+ // adapter below it) does not work for inlined functions. This runtime
+ // function is blacklisted by AstNode::IsInlineable.
+ ASSERT(function_state()->outer() == NULL);
ASSERT(call->arguments()->length() == 0);
- HInstruction* elements = AddInstruction(new HArgumentsElements);
- HArgumentsLength* result = new HArgumentsLength(elements);
+ HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+ HArgumentsLength* result = new(zone()) HArgumentsLength(elements);
ast_context()->ReturnInstruction(result, call->id());
}
void HGraphBuilder::GenerateArguments(CallRuntime* call) {
+ // Our implementation of arguments (based on this stack frame or an
+ // adapter below it) does not work for inlined functions. This runtime
+ // function is blacklisted by AstNode::IsInlineable.
+ ASSERT(function_state()->outer() == NULL);
ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* index = Pop();
- HInstruction* elements = AddInstruction(new HArgumentsElements);
- HInstruction* length = AddInstruction(new HArgumentsLength(elements));
- HAccessArgumentsAt* result = new HAccessArgumentsAt(elements, length, index);
+ HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+ HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
+ HAccessArgumentsAt* result =
+ new(zone()) HAccessArgumentsAt(elements, length, index);
ast_context()->ReturnInstruction(result, call->id());
}
@@ -5308,29 +5800,29 @@ void HGraphBuilder::GenerateArguments(CallRuntime* call) {
void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
// The special form detected by IsClassOfTest is detected before we get here
// and does not cause a bailout.
- BAILOUT("inlined runtime function: ClassOf");
+ return Bailout("inlined runtime function: ClassOf");
}
void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HValueOf* result = new HValueOf(value);
+ HValueOf* result = new(zone()) HValueOf(value);
ast_context()->ReturnInstruction(result, call->id());
}
void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
- BAILOUT("inlined runtime function: SetValueOf");
+ return Bailout("inlined runtime function: SetValueOf");
}
// Fast support for charCodeAt(n).
void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- VISIT_FOR_VALUE(call->arguments()->at(1));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* index = Pop();
HValue* string = Pop();
HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
@@ -5340,19 +5832,24 @@ void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
// Fast support for string.charAt(n) and string[n].
void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
- BAILOUT("inlined runtime function: StringCharFromCode");
+ ASSERT(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* char_code = Pop();
+ HStringCharFromCode* result = new(zone()) HStringCharFromCode(char_code);
+ ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for string.charAt(n) and string[n].
void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
- ASSERT_EQ(2, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::StringCharAt, 2);
- Drop(2);
+ ASSERT(call->arguments()->length() == 2);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ HValue* index = Pop();
+ HValue* string = Pop();
+ HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
+ AddInstruction(char_code);
+ HStringCharFromCode* result = new(zone()) HStringCharFromCode(char_code);
ast_context()->ReturnInstruction(result, call->id());
}
@@ -5360,11 +5857,11 @@ void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
// Fast support for object equality testing.
void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
- VISIT_FOR_VALUE(call->arguments()->at(0));
- VISIT_FOR_VALUE(call->arguments()->at(1));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HCompareJSObjectEq* result = new HCompareJSObjectEq(left, right);
+ HCompareObjectEq* result = new(zone()) HCompareObjectEq(left, right);
ast_context()->ReturnInstruction(result, call->id());
}
@@ -5377,18 +5874,16 @@ void HGraphBuilder::GenerateLog(CallRuntime* call) {
// Fast support for Math.random().
void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
- BAILOUT("inlined runtime function: RandomHeapNumber");
+ return Bailout("inlined runtime function: RandomHeapNumber");
}
// Fast support for StringAdd.
void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::StringAdd, 2);
+ CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ HValue* context = environment()->LookupContext();
+ HCallStub* result = new(zone()) HCallStub(context, CodeStub::StringAdd, 2);
Drop(2);
ast_context()->ReturnInstruction(result, call->id());
}
@@ -5397,11 +5892,9 @@ void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
// Fast support for SubString.
void HGraphBuilder::GenerateSubString(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::SubString, 3);
+ CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ HValue* context = environment()->LookupContext();
+ HCallStub* result = new(zone()) HCallStub(context, CodeStub::SubString, 3);
Drop(3);
ast_context()->ReturnInstruction(result, call->id());
}
@@ -5410,11 +5903,10 @@ void HGraphBuilder::GenerateSubString(CallRuntime* call) {
// Fast support for StringCompare.
void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::StringCompare, 2);
+ CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ HValue* context = environment()->LookupContext();
+ HCallStub* result =
+ new(zone()) HCallStub(context, CodeStub::StringCompare, 2);
Drop(2);
ast_context()->ReturnInstruction(result, call->id());
}
@@ -5423,11 +5915,9 @@ void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
// Support for direct calls from JavaScript to native RegExp code.
void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
ASSERT_EQ(4, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::RegExpExec, 4);
+ CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ HValue* context = environment()->LookupContext();
+ HCallStub* result = new(zone()) HCallStub(context, CodeStub::RegExpExec, 4);
Drop(4);
ast_context()->ReturnInstruction(result, call->id());
}
@@ -5436,12 +5926,10 @@ void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
// Construct a RegExp exec result with two in-object properties.
void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
+ CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ HValue* context = environment()->LookupContext();
HCallStub* result =
- new HCallStub(context, CodeStub::RegExpConstructResult, 3);
+ new(zone()) HCallStub(context, CodeStub::RegExpConstructResult, 3);
Drop(3);
ast_context()->ReturnInstruction(result, call->id());
}
@@ -5449,18 +5937,17 @@ void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
// Support for fast native caches.
void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
- BAILOUT("inlined runtime function: GetFromCache");
+ return Bailout("inlined runtime function: GetFromCache");
}
// Fast support for number to string.
void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::NumberToString, 1);
+ CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ HValue* context = environment()->LookupContext();
+ HCallStub* result =
+ new(zone()) HCallStub(context, CodeStub::NumberToString, 1);
Drop(1);
ast_context()->ReturnInstruction(result, call->id());
}
@@ -5470,35 +5957,47 @@ void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
// indices. This should only be used if the indices are known to be
// non-negative and within bounds of the elements array at the call site.
void HGraphBuilder::GenerateSwapElements(CallRuntime* call) {
- BAILOUT("inlined runtime function: SwapElements");
+ return Bailout("inlined runtime function: SwapElements");
}
// Fast call for custom callbacks.
void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
- BAILOUT("inlined runtime function: CallFunction");
+ // 1 ~ The function to call is not itself an argument to the call.
+ int arg_count = call->arguments()->length() - 1;
+ ASSERT(arg_count >= 1); // There's always at least a receiver.
+
+ for (int i = 0; i < arg_count; ++i) {
+ CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
+ }
+ CHECK_ALIVE(VisitForValue(call->arguments()->last()));
+ HValue* function = Pop();
+ HValue* context = environment()->LookupContext();
+ HInvokeFunction* result =
+ new(zone()) HInvokeFunction(context, function, arg_count);
+ Drop(arg_count);
+ ast_context()->ReturnInstruction(result, call->id());
}
// Fast call to math functions.
void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
- VISIT_FOR_VALUE(call->arguments()->at(0));
- VISIT_FOR_VALUE(call->arguments()->at(1));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HPower* result = new HPower(left, right);
+ HPower* result = new(zone()) HPower(left, right);
ast_context()->ReturnInstruction(result, call->id());
}
void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1);
+ CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ HValue* context = environment()->LookupContext();
+ HCallStub* result =
+ new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::SIN);
Drop(1);
ast_context()->ReturnInstruction(result, call->id());
@@ -5507,11 +6006,10 @@ void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1);
+ CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ HValue* context = environment()->LookupContext();
+ HCallStub* result =
+ new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::COS);
Drop(1);
ast_context()->ReturnInstruction(result, call->id());
@@ -5520,11 +6018,10 @@ void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
- VisitArgumentList(call->arguments());
- CHECK_BAILOUT;
- HContext* context = new HContext;
- AddInstruction(context);
- HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1);
+ CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ HValue* context = environment()->LookupContext();
+ HCallStub* result =
+ new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::LOG);
Drop(1);
ast_context()->ReturnInstruction(result, call->id());
@@ -5532,35 +6029,32 @@ void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
- BAILOUT("inlined runtime function: MathSqrt");
+ return Bailout("inlined runtime function: MathSqrt");
}
// Check whether two RegExps are equivalent
void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
- BAILOUT("inlined runtime function: IsRegExpEquivalent");
+ return Bailout("inlined runtime function: IsRegExpEquivalent");
}
void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
- VISIT_FOR_VALUE(call->arguments()->at(0));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HGetCachedArrayIndex* result = new HGetCachedArrayIndex(value);
+ HGetCachedArrayIndex* result = new(zone()) HGetCachedArrayIndex(value);
ast_context()->ReturnInstruction(result, call->id());
}
void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
- BAILOUT("inlined runtime function: FastAsciiArrayJoin");
+ return Bailout("inlined runtime function: FastAsciiArrayJoin");
}
-#undef BAILOUT
#undef CHECK_BAILOUT
-#undef VISIT_FOR_EFFECT
-#undef VISIT_FOR_VALUE
-#undef ADD_TO_SUBGRAPH
+#undef CHECK_ALIVE
HEnvironment::HEnvironment(HEnvironment* outer,
@@ -5570,6 +6064,7 @@ HEnvironment::HEnvironment(HEnvironment* outer,
values_(0),
assigned_variables_(4),
parameter_count_(0),
+ specials_count_(1),
local_count_(0),
outer_(outer),
pop_count_(0),
@@ -5583,6 +6078,7 @@ HEnvironment::HEnvironment(const HEnvironment* other)
: values_(0),
assigned_variables_(0),
parameter_count_(0),
+ specials_count_(1),
local_count_(0),
outer_(NULL),
pop_count_(0),
@@ -5599,7 +6095,7 @@ void HEnvironment::Initialize(int parameter_count,
local_count_ = local_count;
// Avoid reallocating the temporaries' backing store on the first Push.
- int total = parameter_count + local_count + stack_height;
+ int total = parameter_count + specials_count_ + local_count + stack_height;
values_.Initialize(total + 4);
for (int i = 0; i < total; ++i) values_.Add(NULL);
}
@@ -5635,7 +6131,7 @@ void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
} else if (values_[i] != other->values_[i]) {
// There is a fresh value on the incoming edge, a phi is needed.
ASSERT(values_[i] != NULL && other->values_[i] != NULL);
- HPhi* phi = new HPhi(i);
+ HPhi* phi = new(block->zone()) HPhi(i);
HValue* old_value = values_[i];
for (int j = 0; j < block->predecessors()->length(); j++) {
phi->AddInput(old_value);
@@ -5658,12 +6154,12 @@ void HEnvironment::Bind(int index, HValue* value) {
bool HEnvironment::HasExpressionAt(int index) const {
- return index >= parameter_count_ + local_count_;
+ return index >= parameter_count_ + specials_count_ + local_count_;
}
bool HEnvironment::ExpressionStackIsEmpty() const {
- int first_expression = parameter_count() + local_count();
+ int first_expression = parameter_count() + specials_count() + local_count();
ASSERT(length() >= first_expression);
return length() == first_expression;
}
@@ -5692,7 +6188,7 @@ void HEnvironment::Drop(int count) {
HEnvironment* HEnvironment::Copy() const {
- return new HEnvironment(this);
+ return new(closure()->GetIsolate()->zone()) HEnvironment(this);
}
@@ -5706,7 +6202,7 @@ HEnvironment* HEnvironment::CopyWithoutHistory() const {
HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
HEnvironment* new_env = Copy();
for (int i = 0; i < values_.length(); ++i) {
- HPhi* phi = new HPhi(i);
+ HPhi* phi = new(loop_header->zone()) HPhi(i);
phi->AddInput(values_[i]);
new_env->values_[i] = phi;
loop_header->AddPhi(phi);
@@ -5716,36 +6212,37 @@ HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
}
-HEnvironment* HEnvironment::CopyForInlining(Handle<JSFunction> target,
- FunctionLiteral* function,
- bool is_speculative,
- HConstant* undefined) const {
+HEnvironment* HEnvironment::CopyForInlining(
+ Handle<JSFunction> target,
+ FunctionLiteral* function,
+ HConstant* undefined,
+ CallKind call_kind) const {
// Outer environment is a copy of this one without the arguments.
int arity = function->scope()->num_parameters();
HEnvironment* outer = Copy();
outer->Drop(arity + 1); // Including receiver.
outer->ClearHistory();
- HEnvironment* inner = new HEnvironment(outer, function->scope(), target);
+ Zone* zone = closure()->GetIsolate()->zone();
+ HEnvironment* inner =
+ new(zone) HEnvironment(outer, function->scope(), target);
// Get the argument values from the original environment.
- if (is_speculative) {
- for (int i = 0; i <= arity; ++i) { // Include receiver.
- HValue* push = ExpressionStackAt(arity - i);
- inner->SetValueAt(i, push);
- }
- } else {
- for (int i = 0; i <= arity; ++i) { // Include receiver.
- inner->SetValueAt(i, ExpressionStackAt(arity - i));
- }
+ for (int i = 0; i <= arity; ++i) { // Include receiver.
+ HValue* push = ExpressionStackAt(arity - i);
+ inner->SetValueAt(i, push);
}
-
- // Initialize the stack-allocated locals to undefined.
- int local_base = arity + 1;
- int local_count = function->scope()->num_stack_slots();
- for (int i = 0; i < local_count; ++i) {
- inner->SetValueAt(local_base + i, undefined);
+ // If the function we are inlining is a strict mode function or a
+ // builtin function, pass undefined as the receiver for function
+ // calls (instead of the global receiver).
+ if ((target->shared()->native() || function->strict_mode()) &&
+ call_kind == CALL_AS_FUNCTION) {
+ inner->SetValueAt(0, undefined);
+ }
+ inner->SetValueAt(arity + 1, outer->LookupContext());
+ for (int i = arity + 2; i < inner->length(); ++i) {
+ inner->SetValueAt(i, undefined);
}
- inner->set_ast_id(function->id());
+ inner->set_ast_id(AstNode::kFunctionEntryId);
return inner;
}
@@ -5753,8 +6250,11 @@ HEnvironment* HEnvironment::CopyForInlining(Handle<JSFunction> target,
void HEnvironment::PrintTo(StringStream* stream) {
for (int i = 0; i < length(); i++) {
if (i == 0) stream->Add("parameters\n");
- if (i == parameter_count()) stream->Add("locals\n");
- if (i == parameter_count() + local_count()) stream->Add("expressions");
+ if (i == parameter_count()) stream->Add("specials\n");
+ if (i == parameter_count() + specials_count()) stream->Add("locals\n");
+ if (i == parameter_count() + specials_count() + local_count()) {
+ stream->Add("expressions");
+ }
HValue* val = values_.at(i);
stream->Add("%d: ", i);
if (val != NULL) {
@@ -5816,15 +6316,15 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
PrintEmptyProperty("predecessors");
}
- if (current->end() == NULL || current->end()->FirstSuccessor() == NULL) {
+ if (current->end()->SuccessorCount() == 0) {
PrintEmptyProperty("successors");
- } else if (current->end()->SecondSuccessor() == NULL) {
- PrintBlockProperty("successors",
- current->end()->FirstSuccessor()->block_id());
- } else {
- PrintBlockProperty("successors",
- current->end()->FirstSuccessor()->block_id(),
- current->end()->SecondSuccessor()->block_id());
+ } else {
+ PrintIndent();
+ trace_.Add("successors");
+ for (HSuccessorIterator it(current->end()); !it.Done(); it.Advance()) {
+ trace_.Add(" \"B%d\"", it.Current()->block_id());
+ }
+ trace_.Add("\n");
}
PrintEmptyProperty("xhandlers");
@@ -5849,10 +6349,11 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
Tag states_tag(this, "states");
Tag locals_tag(this, "locals");
int total = current->phis()->length();
- trace_.Add("size %d\n", total);
- trace_.Add("method \"None\"");
+ PrintIntProperty("size", current->phis()->length());
+ PrintStringProperty("method", "None");
for (int j = 0; j < total; ++j) {
HPhi* phi = current->phis()->at(j);
+ PrintIndent();
trace_.Add("%d ", phi->merged_index());
phi->PrintNameTo(&trace_);
trace_.Add(" ");
@@ -5866,7 +6367,8 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
HInstruction* instruction = current->first();
while (instruction != NULL) {
int bci = 0;
- int uses = instruction->uses()->length();
+ int uses = instruction->UseCount();
+ PrintIndent();
trace_.Add("%d %d ", bci, uses);
instruction->PrintNameTo(&trace_);
trace_.Add(" ");
@@ -5886,6 +6388,7 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
for (int i = first_index; i <= last_index; ++i) {
LInstruction* linstr = instructions->at(i);
if (linstr != NULL) {
+ PrintIndent();
trace_.Add("%d ",
LifetimePosition::FromInstructionIndex(i).Value());
linstr->PrintTo(&trace_);
@@ -5902,12 +6405,12 @@ void HTracer::TraceLiveRanges(const char* name, LAllocator* allocator) {
Tag tag(this, "intervals");
PrintStringProperty("name", name);
- const ZoneList<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
+ const Vector<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
for (int i = 0; i < fixed_d->length(); ++i) {
TraceLiveRange(fixed_d->at(i), "fixed");
}
- const ZoneList<LiveRange*>* fixed = allocator->fixed_live_ranges();
+ const Vector<LiveRange*>* fixed = allocator->fixed_live_ranges();
for (int i = 0; i < fixed->length(); ++i) {
TraceLiveRange(fixed->at(i), "fixed");
}
@@ -5921,6 +6424,7 @@ void HTracer::TraceLiveRanges(const char* name, LAllocator* allocator) {
void HTracer::TraceLiveRange(LiveRange* range, const char* type) {
if (range != NULL && !range->IsEmpty()) {
+ PrintIndent();
trace_.Add("%d %s", range->id(), type);
if (range->HasRegisterAssigned()) {
LOperand* op = range->CreateAssignedOperand();
@@ -5978,6 +6482,11 @@ void HTracer::FlushToFile() {
}
+void HStatistics::Initialize(CompilationInfo* info) {
+ source_size_ += info->shared_info()->SourceSize();
+}
+
+
void HStatistics::Print() {
PrintF("Timing results:\n");
int64_t sum = 0;
@@ -5995,9 +6504,15 @@ void HStatistics::Print() {
double size_percent = static_cast<double>(size) * 100 / total_size_;
PrintF(" %8u bytes / %4.1f %%\n", size, size_percent);
}
- PrintF("%30s - %7.3f ms %8u bytes\n", "Sum",
- static_cast<double>(sum) / 1000,
- total_size_);
+ double source_size_in_kb = static_cast<double>(source_size_) / 1024;
+ double normalized_time = source_size_in_kb > 0
+ ? (static_cast<double>(sum) / 1000) / source_size_in_kb
+ : 0;
+ double normalized_bytes = source_size_in_kb > 0
+ ? total_size_ / source_size_in_kb
+ : 0;
+ PrintF("%30s - %7.3f ms %7.3f bytes\n", "Sum",
+ normalized_time, normalized_bytes);
PrintF("---------------------------------------------------------------\n");
PrintF("%30s - %7.3f ms (%.1f times slower than full code gen)\n",
"Total",
@@ -6042,13 +6557,13 @@ void HPhase::Begin(const char* name,
if (allocator != NULL && chunk_ == NULL) {
chunk_ = allocator->chunk();
}
- if (FLAG_time_hydrogen) start_ = OS::Ticks();
+ if (FLAG_hydrogen_stats) start_ = OS::Ticks();
start_allocation_size_ = Zone::allocation_size_;
}
void HPhase::End() const {
- if (FLAG_time_hydrogen) {
+ if (FLAG_hydrogen_stats) {
int64_t end = OS::Ticks();
unsigned size = Zone::allocation_size_ - start_allocation_size_;
HStatistics::Instance()->SaveTiming(name_, end - start_, size);
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index 16f0edeaa..9e3f316e7 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -30,16 +30,18 @@
#include "v8.h"
+#include "allocation.h"
#include "ast.h"
#include "compiler.h"
-#include "data-flow.h"
#include "hydrogen-instructions.h"
+#include "type-info.h"
#include "zone.h"
namespace v8 {
namespace internal {
// Forward declarations.
+class BitVector;
class HEnvironment;
class HGraph;
class HLoopInformation;
@@ -114,18 +116,18 @@ class HBasicBlock: public ZoneObject {
bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; }
- void SetJoinId(int id);
+ void SetJoinId(int ast_id);
void Finish(HControlInstruction* last);
void FinishExit(HControlInstruction* instruction);
- void Goto(HBasicBlock* block, bool include_stack_check = false);
+ void Goto(HBasicBlock* block);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
- void AddSimulate(int id) { AddInstruction(CreateSimulate(id)); }
+ void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
void AssignCommonDominator(HBasicBlock* other);
- void FinishExitWithDeoptimization() {
- FinishExit(CreateDeoptimize());
+ void FinishExitWithDeoptimization(HDeoptimize::UseEnvironment has_uses) {
+ FinishExit(CreateDeoptimize(has_uses));
}
// Add the inlined function exit sequence, adding an HLeaveInlined
@@ -141,6 +143,11 @@ class HBasicBlock: public ZoneObject {
bool IsInlineReturnTarget() const { return is_inline_return_target_; }
void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
+ bool IsDeoptimizing() const { return is_deoptimizing_; }
+ void MarkAsDeoptimizing() { is_deoptimizing_ = true; }
+
+ inline Zone* zone();
+
#ifdef DEBUG
void Verify();
#endif
@@ -149,8 +156,8 @@ class HBasicBlock: public ZoneObject {
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
- HSimulate* CreateSimulate(int id);
- HDeoptimize* CreateDeoptimize();
+ HSimulate* CreateSimulate(int ast_id);
+ HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
int block_id_;
HGraph* graph_;
@@ -171,13 +178,17 @@ class HBasicBlock: public ZoneObject {
ZoneList<int> deleted_phis_;
HBasicBlock* parent_loop_header_;
bool is_inline_return_target_;
+ bool is_deoptimizing_;
};
class HLoopInformation: public ZoneObject {
public:
explicit HLoopInformation(HBasicBlock* loop_header)
- : back_edges_(4), loop_header_(loop_header), blocks_(8) {
+ : back_edges_(4),
+ loop_header_(loop_header),
+ blocks_(8),
+ stack_check_(NULL) {
blocks_.Add(loop_header);
}
virtual ~HLoopInformation() {}
@@ -188,71 +199,52 @@ class HLoopInformation: public ZoneObject {
HBasicBlock* GetLastBackEdge() const;
void RegisterBackEdge(HBasicBlock* block);
+ HStackCheck* stack_check() const { return stack_check_; }
+ void set_stack_check(HStackCheck* stack_check) {
+ stack_check_ = stack_check;
+ }
+
private:
void AddBlock(HBasicBlock* block);
ZoneList<HBasicBlock*> back_edges_;
HBasicBlock* loop_header_;
ZoneList<HBasicBlock*> blocks_;
+ HStackCheck* stack_check_;
};
-class HSubgraph: public ZoneObject {
- public:
- explicit HSubgraph(HGraph* graph)
- : graph_(graph),
- entry_block_(NULL),
- exit_block_(NULL) {
- }
-
- HGraph* graph() const { return graph_; }
- HBasicBlock* entry_block() const { return entry_block_; }
- HBasicBlock* exit_block() const { return exit_block_; }
- void set_exit_block(HBasicBlock* block) {
- exit_block_ = block;
- }
-
- void Initialize(HBasicBlock* block) {
- ASSERT(entry_block_ == NULL);
- entry_block_ = block;
- exit_block_ = block;
- }
-
- protected:
- HGraph* graph_; // The graph this is a subgraph of.
- HBasicBlock* entry_block_;
- HBasicBlock* exit_block_;
-};
-
-
-class HGraph: public HSubgraph {
+class HGraph: public ZoneObject {
public:
explicit HGraph(CompilationInfo* info);
- CompilationInfo* info() const { return info_; }
-
- bool AllowCodeMotion() const;
+ Isolate* isolate() { return isolate_; }
+ Zone* zone() { return isolate_->zone(); }
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
- Handle<String> debug_name() const { return info_->function()->debug_name(); }
+ HBasicBlock* entry_block() const { return entry_block_; }
HEnvironment* start_environment() const { return start_environment_; }
void InitializeInferredTypes();
void InsertTypeConversions();
void InsertRepresentationChanges();
+ void MarkDeoptimizeOnUndefined();
void ComputeMinusZeroChecks();
bool ProcessArgumentsObject();
void EliminateRedundantPhis();
+ void EliminateUnreachablePhis();
void Canonicalize();
void OrderBlocks();
void AssignDominators();
+ void ReplaceCheckedValues();
+ void MarkAsDeoptimizingRecursively(HBasicBlock* block);
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
bool CollectPhis();
- Handle<Code> Compile();
+ Handle<Code> Compile(CompilationInfo* info);
void set_undefined_constant(HConstant* constant) {
undefined_constant_.set(constant);
@@ -262,6 +254,7 @@ class HGraph: public HSubgraph {
HConstant* GetConstantMinus1();
HConstant* GetConstantTrue();
HConstant* GetConstantFalse();
+ HConstant* GetConstantHole();
HBasicBlock* CreateBasicBlock();
HArgumentsObject* GetArgumentsObject() const {
@@ -273,9 +266,6 @@ class HGraph: public HSubgraph {
arguments_object_.set(object);
}
- // True iff. we are compiling for OSR and the statement is the entry.
- bool HasOsrEntryAt(IterationStatement* statement);
-
int GetMaximumValueID() const { return values_.length(); }
int GetNextBlockID() { return next_block_id_++; }
int GetNextValueID(HValue* value) {
@@ -305,17 +295,19 @@ class HGraph: public HSubgraph {
void InsertTypeConversions(HInstruction* instr);
void PropagateMinusZeroChecks(HValue* value, BitVector* visited);
+ void RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi);
void InsertRepresentationChangeForUse(HValue* value,
- HValue* use,
- Representation to,
- bool truncating);
- void InsertRepresentationChanges(HValue* current);
+ HValue* use_value,
+ int use_index,
+ Representation to);
+ void InsertRepresentationChangesForValue(HValue* value);
void InferTypes(ZoneList<HValue*>* worklist);
void InitializeInferredTypes(int from_inclusive, int to_inclusive);
void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
+ Isolate* isolate_;
int next_block_id_;
- CompilationInfo* info_;
+ HBasicBlock* entry_block_;
HEnvironment* start_environment_;
ZoneList<HBasicBlock*> blocks_;
ZoneList<HValue*> values_;
@@ -325,14 +317,16 @@ class HGraph: public HSubgraph {
SetOncePointer<HConstant> constant_minus1_;
SetOncePointer<HConstant> constant_true_;
SetOncePointer<HConstant> constant_false_;
+ SetOncePointer<HConstant> constant_hole_;
SetOncePointer<HArgumentsObject> arguments_object_;
- friend class HSubgraph;
-
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
+Zone* HBasicBlock::zone() { return graph_->zone(); }
+
+
class HEnvironment: public ZoneObject {
public:
HEnvironment(HEnvironment* outer,
@@ -346,6 +340,7 @@ class HEnvironment: public ZoneObject {
return &assigned_variables_;
}
int parameter_count() const { return parameter_count_; }
+ int specials_count() const { return specials_count_; }
int local_count() const { return local_count_; }
HEnvironment* outer() const { return outer_; }
int pop_count() const { return pop_count_; }
@@ -355,6 +350,9 @@ class HEnvironment: public ZoneObject {
void set_ast_id(int id) { ast_id_ = id; }
int length() const { return values_.length(); }
+ bool is_special_index(int i) const {
+ return i >= parameter_count() && i < parameter_count() + specials_count();
+ }
void Bind(Variable* variable, HValue* value) {
Bind(IndexFor(variable), value);
@@ -362,6 +360,10 @@ class HEnvironment: public ZoneObject {
void Bind(int index, HValue* value);
+ void BindContext(HValue* value) {
+ Bind(parameter_count(), value);
+ }
+
HValue* Lookup(Variable* variable) const {
return Lookup(IndexFor(variable));
}
@@ -372,6 +374,11 @@ class HEnvironment: public ZoneObject {
return result;
}
+ HValue* LookupContext() const {
+ // Return first special.
+ return Lookup(parameter_count());
+ }
+
void Push(HValue* value) {
ASSERT(value != NULL);
++push_count_;
@@ -392,6 +399,8 @@ class HEnvironment: public ZoneObject {
HValue* Top() const { return ExpressionStackAt(0); }
+ bool ExpressionStackIsEmpty() const;
+
HValue* ExpressionStackAt(int index_from_top) const {
int index = length() - index_from_top - 1;
ASSERT(HasExpressionAt(index));
@@ -406,20 +415,18 @@ class HEnvironment: public ZoneObject {
// Create an "inlined version" of this environment, where the original
// environment is the outer environment but the top expression stack
- // elements are moved to an inner environment as parameters. If
- // is_speculative, the argument values are expected to be PushArgument
- // instructions, otherwise they are the actual values.
+ // elements are moved to an inner environment as parameters.
HEnvironment* CopyForInlining(Handle<JSFunction> target,
FunctionLiteral* function,
- bool is_speculative,
- HConstant* undefined) const;
+ HConstant* undefined,
+ CallKind call_kind) const;
void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
void ClearHistory() {
pop_count_ = 0;
push_count_ = 0;
- assigned_variables_.Clear();
+ assigned_variables_.Rewind(0);
}
void SetValueAt(int index, HValue* value) {
@@ -436,8 +443,6 @@ class HEnvironment: public ZoneObject {
// True if index is included in the expression stack part of the environment.
bool HasExpressionAt(int index) const;
- bool ExpressionStackIsEmpty() const;
-
void Initialize(int parameter_count, int local_count, int stack_height);
void Initialize(const HEnvironment* other);
@@ -447,15 +452,18 @@ class HEnvironment: public ZoneObject {
int IndexFor(Variable* variable) const {
Slot* slot = variable->AsSlot();
ASSERT(slot != NULL && slot->IsStackAllocated());
- int shift = (slot->type() == Slot::PARAMETER) ? 1 : parameter_count_;
+ int shift = (slot->type() == Slot::PARAMETER)
+ ? 1
+ : parameter_count_ + specials_count_;
return slot->index() + shift;
}
Handle<JSFunction> closure_;
- // Value array [parameters] [locals] [temporaries].
+ // Value array [parameters] [specials] [locals] [temporaries].
ZoneList<HValue*> values_;
ZoneList<int> assigned_variables_;
int parameter_count_;
+ int specials_count_;
int local_count_;
HEnvironment* outer_;
int pop_count_;
@@ -466,6 +474,13 @@ class HEnvironment: public ZoneObject {
class HGraphBuilder;
+enum ArgumentsAllowedFlag {
+ ARGUMENTS_NOT_ALLOWED,
+ ARGUMENTS_ALLOWED
+};
+
+// This class is not BASE_EMBEDDED because our inlining implementation uses
+// new and delete.
class AstContext {
public:
bool IsEffect() const { return kind_ == Expression::kEffect; }
@@ -483,12 +498,17 @@ class AstContext {
// the instruction as value.
virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
+ void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
+ bool is_for_typeof() { return for_typeof_; }
+
protected:
AstContext(HGraphBuilder* owner, Expression::Context kind);
virtual ~AstContext();
HGraphBuilder* owner() const { return owner_; }
+ inline Zone* zone();
+
// We want to be able to assert, in a context-specific way, that the stack
// height makes sense when the context is filled.
#ifdef DEBUG
@@ -499,6 +519,7 @@ class AstContext {
HGraphBuilder* owner_;
Expression::Context kind_;
AstContext* outer_;
+ bool for_typeof_;
};
@@ -516,22 +537,29 @@ class EffectContext: public AstContext {
class ValueContext: public AstContext {
public:
- explicit ValueContext(HGraphBuilder* owner)
- : AstContext(owner, Expression::kValue) {
+ explicit ValueContext(HGraphBuilder* owner, ArgumentsAllowedFlag flag)
+ : AstContext(owner, Expression::kValue), flag_(flag) {
}
virtual ~ValueContext();
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+
+ bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
+
+ private:
+ ArgumentsAllowedFlag flag_;
};
class TestContext: public AstContext {
public:
TestContext(HGraphBuilder* owner,
+ Expression* condition,
HBasicBlock* if_true,
HBasicBlock* if_false)
: AstContext(owner, Expression::kTest),
+ condition_(condition),
if_true_(if_true),
if_false_(if_false) {
}
@@ -544,6 +572,7 @@ class TestContext: public AstContext {
return reinterpret_cast<TestContext*>(context);
}
+ Expression* condition() const { return condition_; }
HBasicBlock* if_true() const { return if_true_; }
HBasicBlock* if_false() const { return if_false_; }
@@ -552,11 +581,55 @@ class TestContext: public AstContext {
// control flow.
void BuildBranch(HValue* value);
+ Expression* condition_;
HBasicBlock* if_true_;
HBasicBlock* if_false_;
};
+class FunctionState BASE_EMBEDDED {
+ public:
+ FunctionState(HGraphBuilder* owner,
+ CompilationInfo* info,
+ TypeFeedbackOracle* oracle);
+ ~FunctionState();
+
+ CompilationInfo* compilation_info() { return compilation_info_; }
+ TypeFeedbackOracle* oracle() { return oracle_; }
+ AstContext* call_context() { return call_context_; }
+ HBasicBlock* function_return() { return function_return_; }
+ TestContext* test_context() { return test_context_; }
+ void ClearInlinedTestContext() {
+ delete test_context_;
+ test_context_ = NULL;
+ }
+
+ FunctionState* outer() { return outer_; }
+
+ private:
+ HGraphBuilder* owner_;
+
+ CompilationInfo* compilation_info_;
+ TypeFeedbackOracle* oracle_;
+
+ // During function inlining, expression context of the call being
+ // inlined. NULL when not inlining.
+ AstContext* call_context_;
+
+ // When inlining in an effect of value context, this is the return block.
+ // It is NULL otherwise. When inlining in a test context, there are a
+ // pair of return blocks in the context. When not inlining, there is no
+ // local return point.
+ HBasicBlock* function_return_;
+
+ // When inlining a call in a test context, a context containing a pair of
+ // return blocks. NULL in all other cases.
+ TestContext* test_context_;
+
+ FunctionState* outer_;
+};
+
+
class HGraphBuilder: public AstVisitor {
public:
enum BreakType { BREAK, CONTINUE };
@@ -606,42 +679,33 @@ class HGraphBuilder: public AstVisitor {
BreakAndContinueScope* next_;
};
- explicit HGraphBuilder(TypeFeedbackOracle* oracle)
- : oracle_(oracle),
- graph_(NULL),
- current_subgraph_(NULL),
- peeled_statement_(NULL),
- ast_context_(NULL),
- call_context_(NULL),
- function_return_(NULL),
- inlined_count_(0),
- break_scope_(NULL) {
- }
+ HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
- HGraph* CreateGraph(CompilationInfo* info);
+ HGraph* CreateGraph();
// Simple accessors.
HGraph* graph() const { return graph_; }
- HSubgraph* subgraph() const { return current_subgraph_; }
BreakAndContinueScope* break_scope() const { return break_scope_; }
void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
- HBasicBlock* current_block() const { return subgraph()->exit_block(); }
- void set_current_block(HBasicBlock* block) {
- subgraph()->set_exit_block(block);
- }
+ HBasicBlock* current_block() const { return current_block_; }
+ void set_current_block(HBasicBlock* block) { current_block_ = block; }
HEnvironment* environment() const {
return current_block()->last_environment();
}
+ bool inline_bailout() { return inline_bailout_; }
+
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(int id);
+ void AddSimulate(int ast_id);
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
HValue* Pop() { return environment()->Pop(); }
+ void Bailout(const char* reason);
+
private:
// Type of a member function that generates inline code for a native function.
typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@@ -660,11 +724,33 @@ class HGraphBuilder: public AstVisitor {
static const int kMaxSourceSize = 600;
// Simple accessors.
- TypeFeedbackOracle* oracle() const { return oracle_; }
+ FunctionState* function_state() const { return function_state_; }
+ void set_function_state(FunctionState* state) { function_state_ = state; }
+
AstContext* ast_context() const { return ast_context_; }
void set_ast_context(AstContext* context) { ast_context_ = context; }
- AstContext* call_context() const { return call_context_; }
- HBasicBlock* function_return() const { return function_return_; }
+
+ // Accessors forwarded to the function state.
+ CompilationInfo* info() const {
+ return function_state()->compilation_info();
+ }
+ TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
+
+ AstContext* call_context() const {
+ return function_state()->call_context();
+ }
+ HBasicBlock* function_return() const {
+ return function_state()->function_return();
+ }
+ TestContext* inlined_test_context() const {
+ return function_state()->test_context();
+ }
+ void ClearInlinedTestContext() {
+ function_state()->ClearInlinedTestContext();
+ }
+ bool function_strict_mode() {
+ return function_state()->compilation_info()->is_strict_mode();
+ }
// Generators for inline runtime functions.
#define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize) \
@@ -674,49 +760,65 @@ class HGraphBuilder: public AstVisitor {
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
#undef INLINE_FUNCTION_GENERATOR_DECLARATION
- void Bailout(const char* reason);
+ void VisitDelete(UnaryOperation* expr);
+ void VisitVoid(UnaryOperation* expr);
+ void VisitTypeof(UnaryOperation* expr);
+ void VisitAdd(UnaryOperation* expr);
+ void VisitSub(UnaryOperation* expr);
+ void VisitBitNot(UnaryOperation* expr);
+ void VisitNot(UnaryOperation* expr);
+
+ void VisitComma(BinaryOperation* expr);
+ void VisitLogicalExpression(BinaryOperation* expr);
+ void VisitArithmeticExpression(BinaryOperation* expr);
void PreProcessOsrEntry(IterationStatement* statement);
+ // True iff. we are compiling for OSR and the statement is the entry.
+ bool HasOsrEntryAt(IterationStatement* statement);
+ void VisitLoopBody(Statement* body,
+ HBasicBlock* loop_entry,
+ BreakAndContinueInfo* break_info);
HBasicBlock* CreateJoin(HBasicBlock* first,
HBasicBlock* second,
int join_id);
- HBasicBlock* CreateWhile(IterationStatement* statement,
- HBasicBlock* loop_entry,
- HBasicBlock* cond_false,
- HBasicBlock* body_exit,
- HBasicBlock* break_block);
- HBasicBlock* CreateDoWhile(IterationStatement* statement,
- HBasicBlock* body_entry,
- HBasicBlock* go_back,
- HBasicBlock* exit_block,
- HBasicBlock* break_block);
- HBasicBlock* CreateEndless(IterationStatement* statement,
- HBasicBlock* body_entry,
- HBasicBlock* body_exit,
- HBasicBlock* break_block);
+
+ // Create a back edge in the flow graph. body_exit is the predecessor
+ // block and loop_entry is the successor block. loop_successor is the
+ // block where control flow exits the loop normally (e.g., via failure of
+ // the condition) and break_block is the block where control flow breaks
+ // from the loop. All blocks except loop_entry can be NULL. The return
+ // value is the new successor block which is the join of loop_successor
+ // and break_block, or NULL.
+ HBasicBlock* CreateLoop(IterationStatement* statement,
+ HBasicBlock* loop_entry,
+ HBasicBlock* body_exit,
+ HBasicBlock* loop_successor,
+ HBasicBlock* break_block);
+
HBasicBlock* JoinContinue(IterationStatement* statement,
HBasicBlock* exit_block,
HBasicBlock* continue_block);
-
- void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts);
- void AddToSubgraph(HSubgraph* graph, Statement* stmt);
- void AddToSubgraph(HSubgraph* graph, Expression* expr);
-
HValue* Top() const { return environment()->Top(); }
void Drop(int n) { environment()->Drop(n); }
void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
- void VisitForValue(Expression* expr);
+ // The value of the arguments object is allowed in some but not most value
+ // contexts. (It's allowed in all effect contexts and disallowed in all
+ // test contexts.)
+ void VisitForValue(Expression* expr,
+ ArgumentsAllowedFlag flag = ARGUMENTS_NOT_ALLOWED);
+ void VisitForTypeOf(Expression* expr);
void VisitForEffect(Expression* expr);
void VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block);
// Visit an argument subexpression and emit a push to the outgoing
- // arguments.
- void VisitArgument(Expression* expr);
+ // arguments. Returns the hydrogen value that was pushed.
+ HValue* VisitArgument(Expression* expr);
+
void VisitArgumentList(ZoneList<Expression*>* arguments);
// Visit a list of expressions from left to right, each in a value context.
@@ -730,7 +832,11 @@ class HGraphBuilder: public AstVisitor {
// to push them as outgoing parameters.
template <int V> HInstruction* PreProcessCall(HCall<V>* call);
- void AssumeRepresentation(HValue* value, Representation r);
+ void TraceRepresentation(Token::Value op,
+ TypeInfo info,
+ HValue* value,
+ Representation rep);
+ void AssumeRepresentation(HValue* value, Representation rep);
static Representation ToRepresentation(TypeInfo info);
void SetupScope(Scope* scope);
@@ -741,18 +847,16 @@ class HGraphBuilder: public AstVisitor {
#undef DECLARE_VISIT
HBasicBlock* CreateBasicBlock(HEnvironment* env);
- HSubgraph* CreateEmptySubgraph();
- HSubgraph* CreateGotoSubgraph(HEnvironment* env);
- HSubgraph* CreateBranchSubgraph(HEnvironment* env);
- HBasicBlock* CreateLoopHeader();
- HSubgraph* CreateInlinedSubgraph(HEnvironment* outer,
- Handle<JSFunction> target,
- FunctionLiteral* function);
+ HBasicBlock* CreateLoopHeaderBlock();
// Helpers for flow graph construction.
- void LookupGlobalPropertyCell(Variable* var,
- LookupResult* lookup,
- bool is_store);
+ enum GlobalPropertyAccess {
+ kUseCell,
+ kUseGeneric
+ };
+ GlobalPropertyAccess LookupGlobalProperty(Variable* var,
+ LookupResult* lookup,
+ bool is_store);
bool TryArgumentsAccess(Property* expr);
bool TryCallApply(Call* expr);
@@ -761,7 +865,13 @@ class HGraphBuilder: public AstVisitor {
HValue* receiver,
Handle<Map> receiver_map,
CheckType check_type);
- void TraceInline(Handle<JSFunction> target, bool result);
+
+ // If --trace-inlining, print a line of the inlining trace. Inlining
+ // succeeded if the reason string is NULL and failed if there is a
+ // non-NULL reason string.
+ void TraceInline(Handle<JSFunction> target,
+ Handle<JSFunction> caller,
+ const char* failure_reason);
void HandleGlobalVariableAssignment(Variable* var,
HValue* value,
@@ -770,10 +880,6 @@ class HGraphBuilder: public AstVisitor {
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicLoadNamedField(Property* expr,
- HValue* object,
- ZoneMapList* types,
- Handle<String> name);
void HandlePolymorphicStoreNamedField(Assignment* expr,
HValue* object,
HValue* value,
@@ -783,27 +889,56 @@ class HGraphBuilder: public AstVisitor {
HValue* receiver,
ZoneMapList* types,
Handle<String> name);
+ void HandleLiteralCompareTypeof(CompareOperation* compare_expr,
+ Expression* expr,
+ Handle<String> check);
+ void HandleLiteralCompareUndefined(CompareOperation* compare_expr,
+ Expression* expr);
HStringCharCodeAt* BuildStringCharCodeAt(HValue* string,
HValue* index);
HInstruction* BuildBinaryOperation(BinaryOperation* expr,
HValue* left,
HValue* right);
- HInstruction* BuildIncrement(HValue* value, bool increment);
+ HInstruction* BuildIncrement(bool returns_original_input,
+ CountOperation* expr);
HLoadNamedField* BuildLoadNamedField(HValue* object,
Property* expr,
Handle<Map> type,
LookupResult* result,
bool smi_and_map_check);
HInstruction* BuildLoadNamedGeneric(HValue* object, Property* expr);
- HInstruction* BuildLoadKeyedFastElement(HValue* object,
- HValue* key,
- Property* expr);
- HInstruction* BuildLoadKeyedPixelArrayElement(HValue* object,
- HValue* key,
- Property* expr);
HInstruction* BuildLoadKeyedGeneric(HValue* object,
HValue* key);
+ HInstruction* BuildExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ JSObject::ElementsKind elements_kind,
+ bool is_store);
+
+ HInstruction* BuildMonomorphicElementAccess(HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* expr,
+ bool is_store);
+ HValue* HandlePolymorphicElementAccess(HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* prop,
+ int ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects);
+
+ HValue* HandleKeyedElementAccess(HValue* obj,
+ HValue* key,
+ HValue* val,
+ Expression* expr,
+ int ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects);
HInstruction* BuildLoadNamed(HValue* object,
Property* prop,
@@ -825,20 +960,6 @@ class HGraphBuilder: public AstVisitor {
HValue* key,
HValue* value);
- HInstruction* BuildStoreKeyedFastElement(HValue* object,
- HValue* key,
- HValue* val,
- Expression* expr);
-
- HInstruction* BuildStoreKeyedPixelArrayElement(HValue* object,
- HValue* key,
- HValue* val,
- Expression* expr);
-
- HCompare* BuildSwitchCompare(HSubgraph* subgraph,
- HValue* switch_value,
- CaseClause* clause);
-
HValue* BuildContextChainWalk(Variable* var);
void AddCheckConstantFunction(Call* expr,
@@ -846,40 +967,40 @@ class HGraphBuilder: public AstVisitor {
Handle<Map> receiver_map,
bool smi_and_map_check);
+ Zone* zone() { return zone_; }
- HBasicBlock* BuildTypeSwitch(HValue* receiver,
- ZoneMapList* maps,
- ZoneList<HSubgraph*>* body_graphs,
- HSubgraph* default_graph,
- int join_id);
+ // The translation state of the currently-being-translated function.
+ FunctionState* function_state_;
+
+ // The base of the function state stack.
+ FunctionState initial_function_state_;
- TypeFeedbackOracle* oracle_;
- HGraph* graph_;
- HSubgraph* current_subgraph_;
- IterationStatement* peeled_statement_;
// Expression context of the currently visited subexpression. NULL when
// visiting statements.
AstContext* ast_context_;
- // During function inlining, expression context of the call being
- // inlined. NULL when not inlining.
- AstContext* call_context_;
+ // A stack of breakable statements entered.
+ BreakAndContinueScope* break_scope_;
- // When inlining a call in an effect or value context, the return
- // block. NULL otherwise. When inlining a call in a test context, there
- // are a pair of target blocks in the call context.
- HBasicBlock* function_return_;
+ HGraph* graph_;
+ HBasicBlock* current_block_;
int inlined_count_;
- BreakAndContinueScope* break_scope_;
+ Zone* zone_;
+
+ bool inline_bailout_;
+ friend class FunctionState; // Pushes and pops the state stack.
friend class AstContext; // Pushes and pops the AST context stack.
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
+Zone* AstContext::zone() { return owner_->zone(); }
+
+
class HValueMap: public ZoneObject {
public:
HValueMap()
@@ -902,7 +1023,12 @@ class HValueMap: public ZoneObject {
}
HValue* Lookup(HValue* value) const;
- HValueMap* Copy() const { return new HValueMap(this); }
+
+ HValueMap* Copy(Zone* zone) const {
+ return new(zone) HValueMap(zone, this);
+ }
+
+ bool IsEmpty() const { return count_ == 0; }
private:
// A linked list of HValue* values. Stored in arrays.
@@ -915,7 +1041,7 @@ class HValueMap: public ZoneObject {
// Must be a power of 2.
static const int kInitialSize = 16;
- explicit HValueMap(const HValueMap* other);
+ HValueMap(Zone* zone, const HValueMap* other);
void Resize(int new_size);
void ResizeLists(int new_size);
@@ -935,6 +1061,7 @@ class HValueMap: public ZoneObject {
class HStatistics: public Malloced {
public:
+ void Initialize(CompilationInfo* info);
void Print();
void SaveTiming(const char* name, int64_t ticks, unsigned size);
static HStatistics* Instance() {
@@ -946,14 +1073,14 @@ class HStatistics: public Malloced {
}
private:
-
HStatistics()
: timing_(5),
names_(5),
sizes_(5),
total_(0),
total_size_(0),
- full_code_gen_(0) { }
+ full_code_gen_(0),
+ source_size_(0) { }
List<int64_t> timing_;
List<const char*> names_;
@@ -961,6 +1088,7 @@ class HStatistics: public Malloced {
int64_t total_;
unsigned total_size_;
int64_t full_code_gen_;
+ double source_size_;
};
@@ -1068,11 +1196,6 @@ class HTracer: public Malloced {
trace_.Add("%s \"B%d\"\n", name, block_id);
}
- void PrintBlockProperty(const char* name, int block_id1, int block_id2) {
- PrintIndent();
- trace_.Add("%s \"B%d\" \"B%d\"\n", name, block_id1, block_id2);
- }
-
void PrintIntProperty(const char* name, int value) {
PrintIndent();
trace_.Add("%s %d\n", name, value);
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index d5fd7b87b..0ca2d6b4a 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// A light-weight IA32 Assembler.
@@ -204,11 +204,12 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
+ // TODO(isolates): Get a cached isolate below.
+ } else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ IsPatchedDebugBreakSlotSequence())) &&
+ Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
@@ -218,25 +219,25 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
template<typename StaticVisitor>
-void RelocInfo::Visit() {
+void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(target_object_address());
+ StaticVisitor::VisitPointer(heap, target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(this);
+ StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(this);
+ StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
+ } else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(this);
+ StaticVisitor::VisitDebugTarget(heap, this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this);
@@ -266,7 +267,7 @@ Immediate::Immediate(Label* internal_offset) {
Immediate::Immediate(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!Heap::InNewSpace(obj));
+ ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
x_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
@@ -299,7 +300,7 @@ void Assembler::emit(uint32_t x) {
void Assembler::emit(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!Heap::InNewSpace(obj));
+ ASSERT(!isolate()->heap()->InNewSpace(obj));
if (obj->IsHeapObject()) {
emit(reinterpret_cast<intptr_t>(handle.location()),
RelocInfo::EMBEDDED_OBJECT);
@@ -310,8 +311,12 @@ void Assembler::emit(Handle<Object> handle) {
}
-void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
- if (rmode != RelocInfo::NONE) RecordRelocInfo(rmode);
+void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, unsigned id) {
+ if (rmode == RelocInfo::CODE_TARGET && id != kNoASTId) {
+ RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, static_cast<intptr_t>(id));
+ } else if (rmode != RelocInfo::NONE) {
+ RecordRelocInfo(rmode);
+ }
emit(x);
}
@@ -375,6 +380,18 @@ void Assembler::emit_disp(Label* L, Displacement::Type type) {
}
+void Assembler::emit_near_disp(Label* L) {
+ byte disp = 0x00;
+ if (L->is_near_linked()) {
+ int offset = L->near_link_pos() - pc_offset();
+ ASSERT(is_int8(offset));
+ disp = static_cast<byte>(offset & 0xFF);
+ }
+ L->link_to(pc_offset(), Label::kNear);
+ *pc_++ = disp;
+}
+
+
void Operand::set_modrm(int mod, Register rm) {
ASSERT((mod & -4) == 0);
buf_[0] = mod << 6 | rm.code();
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 6652df27c..0dc519407 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -32,7 +32,7 @@
// The original source code covered by the above license above has been modified
// significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -48,23 +48,37 @@ namespace internal {
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
-// Safe default is no features.
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
uint64_t CpuFeatures::supported_ = 0;
-uint64_t CpuFeatures::enabled_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
-// The Probe method needs executable memory, so it uses Heap::CreateCode.
-// Allocation failure is silent and leads to safe default.
-void CpuFeatures::Probe(bool portable) {
- ASSERT(Heap::HasBeenSetup());
+void CpuFeatures::Probe() {
+ ASSERT(!initialized_);
ASSERT(supported_ == 0);
- if (portable && Serializer::enabled()) {
+#ifdef DEBUG
+ initialized_ = true;
+#endif
+ if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
}
- Assembler assm(NULL, 0);
+ const int kBufferSize = 4 * KB;
+ VirtualMemory* memory = new VirtualMemory(kBufferSize);
+ if (!memory->IsReserved()) {
+ delete memory;
+ return;
+ }
+ ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
+ if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
+ delete memory;
+ return;
+ }
+
+ Assembler assm(NULL, memory->address(), kBufferSize);
Label cpuid, done;
#define __ assm.
// Save old esp, since we are going to modify the stack.
@@ -118,26 +132,15 @@ void CpuFeatures::Probe(bool portable) {
__ ret(0);
#undef __
- CodeDesc desc;
- assm.GetCode(&desc);
-
- Object* code;
- { MaybeObject* maybe_code = Heap::CreateCode(desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>::null());
- if (!maybe_code->ToObject(&code)) return;
- }
- if (!code->IsCode()) return;
-
- PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
- Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+ F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
supported_ = probe();
found_by_runtime_probing_ = supported_;
uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
supported_ |= os_guarantees;
- found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
+ found_by_runtime_probing_ &= ~os_guarantees;
+
+ delete memory;
}
@@ -295,19 +298,18 @@ bool Operand::is_reg(Register reg) const {
static void InitCoverageLog();
#endif
-// Spare buffer.
-byte* Assembler::spare_buffer_ = NULL;
-
-Assembler::Assembler(void* buffer, int buffer_size)
- : positions_recorder_(this) {
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
+ positions_recorder_(this),
+ emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
- if (spare_buffer_ != NULL) {
- buffer = spare_buffer_;
- spare_buffer_ = NULL;
+ if (isolate()->assembler_spare_buffer() != NULL) {
+ buffer = isolate()->assembler_spare_buffer();
+ isolate()->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
@@ -339,7 +341,6 @@ Assembler::Assembler(void* buffer, int buffer_size)
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- last_pc_ = NULL;
#ifdef GENERATED_CODE_COVERAGE
InitCoverageLog();
#endif
@@ -348,8 +349,9 @@ Assembler::Assembler(void* buffer, int buffer_size)
Assembler::~Assembler() {
if (own_buffer_) {
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
@@ -367,8 +369,6 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
-
- Counters::reloc_info_size.Increment(desc->reloc_size);
}
@@ -388,7 +388,6 @@ void Assembler::CodeTargetAlign() {
void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0xA2);
}
@@ -396,35 +395,30 @@ void Assembler::cpuid() {
void Assembler::pushad() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x60);
}
void Assembler::popad() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x61);
}
void Assembler::pushfd() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x9C);
}
void Assembler::popfd() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x9D);
}
void Assembler::push(const Immediate& x) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (x.is_int8()) {
EMIT(0x6a);
EMIT(x.x_);
@@ -444,140 +438,33 @@ void Assembler::push_imm32(int32_t imm32) {
void Assembler::push(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x50 | src.code());
}
void Assembler::push(const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xFF);
emit_operand(esi, src);
}
+void Assembler::push(Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x68);
+ emit(handle);
+}
+
+
void Assembler::pop(Register dst) {
ASSERT(reloc_info_writer.last_pc() != NULL);
- if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
- // (last_pc_ != NULL) is rolled into the above check.
- // If a last_pc_ is set, we need to make sure that there has not been any
- // relocation information generated between the last instruction and this
- // pop instruction.
- byte instr = last_pc_[0];
- if ((instr & ~0x7) == 0x50) {
- int push_reg_code = instr & 0x7;
- if (push_reg_code == dst.code()) {
- pc_ = last_pc_;
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
- }
- } else {
- // Convert 'push src; pop dst' to 'mov dst, src'.
- last_pc_[0] = 0x8b;
- Register src = { push_reg_code };
- EnsureSpace ensure_space(this);
- emit_operand(dst, Operand(src));
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (reg->reg) eliminated\n", pc_offset());
- }
- }
- last_pc_ = NULL;
- return;
- } else if (instr == 0xff) { // push of an operand, convert to a move
- byte op1 = last_pc_[1];
- // Check if the operation is really a push.
- if ((op1 & 0x38) == (6 << 3)) {
- op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3);
- last_pc_[0] = 0x8b;
- last_pc_[1] = op1;
- last_pc_ = NULL;
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (op->reg) eliminated\n", pc_offset());
- }
- return;
- }
- } else if ((instr == 0x89) &&
- (last_pc_[1] == 0x04) &&
- (last_pc_[2] == 0x24)) {
- // 0x71283c 396 890424 mov [esp],eax
- // 0x71283f 399 58 pop eax
- if (dst.is(eax)) {
- // change to
- // 0x710fac 216 83c404 add esp,0x4
- last_pc_[0] = 0x83;
- last_pc_[1] = 0xc4;
- last_pc_[2] = 0x04;
- last_pc_ = NULL;
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (mov-pop) eliminated\n", pc_offset());
- }
- return;
- }
- } else if (instr == 0x6a && dst.is(eax)) { // push of immediate 8 bit
- byte imm8 = last_pc_[1];
- if (imm8 == 0) {
- // 6a00 push 0x0
- // 58 pop eax
- last_pc_[0] = 0x31;
- last_pc_[1] = 0xc0;
- // change to
- // 31c0 xor eax,eax
- last_pc_ = NULL;
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
- }
- return;
- } else {
- // 6a00 push 0xXX
- // 58 pop eax
- last_pc_[0] = 0xb8;
- EnsureSpace ensure_space(this);
- if ((imm8 & 0x80) != 0) {
- EMIT(0xff);
- EMIT(0xff);
- EMIT(0xff);
- // change to
- // b8XXffffff mov eax,0xffffffXX
- } else {
- EMIT(0x00);
- EMIT(0x00);
- EMIT(0x00);
- // change to
- // b8XX000000 mov eax,0x000000XX
- }
- last_pc_ = NULL;
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
- }
- return;
- }
- } else if (instr == 0x68 && dst.is(eax)) { // push of immediate 32 bit
- // 68XXXXXXXX push 0xXXXXXXXX
- // 58 pop eax
- last_pc_[0] = 0xb8;
- last_pc_ = NULL;
- // change to
- // b8XXXXXXXX mov eax,0xXXXXXXXX
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
- }
- return;
- }
-
- // Other potential patterns for peephole:
- // 0x712716 102 890424 mov [esp], eax
- // 0x712719 105 8b1424 mov edx, [esp]
- }
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x58 | dst.code());
}
void Assembler::pop(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x8F);
emit_operand(eax, dst);
}
@@ -585,7 +472,6 @@ void Assembler::pop(const Operand& dst) {
void Assembler::enter(const Immediate& size) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xC8);
emit_w(size);
EMIT(0);
@@ -594,7 +480,6 @@ void Assembler::enter(const Immediate& size) {
void Assembler::leave() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xC9);
}
@@ -602,7 +487,6 @@ void Assembler::leave() {
void Assembler::mov_b(Register dst, const Operand& src) {
ASSERT(dst.code() < 4);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x8A);
emit_operand(dst, src);
}
@@ -610,7 +494,6 @@ void Assembler::mov_b(Register dst, const Operand& src) {
void Assembler::mov_b(const Operand& dst, int8_t imm8) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xC6);
emit_operand(eax, dst);
EMIT(imm8);
@@ -620,7 +503,6 @@ void Assembler::mov_b(const Operand& dst, int8_t imm8) {
void Assembler::mov_b(const Operand& dst, Register src) {
ASSERT(src.code() < 4);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x88);
emit_operand(src, dst);
}
@@ -628,7 +510,6 @@ void Assembler::mov_b(const Operand& dst, Register src) {
void Assembler::mov_w(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x8B);
emit_operand(dst, src);
@@ -637,7 +518,6 @@ void Assembler::mov_w(Register dst, const Operand& src) {
void Assembler::mov_w(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x89);
emit_operand(src, dst);
@@ -646,7 +526,6 @@ void Assembler::mov_w(const Operand& dst, Register src) {
void Assembler::mov(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xB8 | dst.code());
emit(imm32);
}
@@ -654,7 +533,6 @@ void Assembler::mov(Register dst, int32_t imm32) {
void Assembler::mov(Register dst, const Immediate& x) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xB8 | dst.code());
emit(x);
}
@@ -662,7 +540,6 @@ void Assembler::mov(Register dst, const Immediate& x) {
void Assembler::mov(Register dst, Handle<Object> handle) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xB8 | dst.code());
emit(handle);
}
@@ -670,7 +547,6 @@ void Assembler::mov(Register dst, Handle<Object> handle) {
void Assembler::mov(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x8B);
emit_operand(dst, src);
}
@@ -678,7 +554,6 @@ void Assembler::mov(Register dst, const Operand& src) {
void Assembler::mov(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x89);
EMIT(0xC0 | src.code() << 3 | dst.code());
}
@@ -686,7 +561,6 @@ void Assembler::mov(Register dst, Register src) {
void Assembler::mov(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xC7);
emit_operand(eax, dst);
emit(x);
@@ -695,7 +569,6 @@ void Assembler::mov(const Operand& dst, const Immediate& x) {
void Assembler::mov(const Operand& dst, Handle<Object> handle) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xC7);
emit_operand(eax, dst);
emit(handle);
@@ -704,7 +577,6 @@ void Assembler::mov(const Operand& dst, Handle<Object> handle) {
void Assembler::mov(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x89);
emit_operand(src, dst);
}
@@ -712,7 +584,6 @@ void Assembler::mov(const Operand& dst, Register src) {
void Assembler::movsx_b(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0xBE);
emit_operand(dst, src);
@@ -721,7 +592,6 @@ void Assembler::movsx_b(Register dst, const Operand& src) {
void Assembler::movsx_w(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0xBF);
emit_operand(dst, src);
@@ -730,7 +600,6 @@ void Assembler::movsx_w(Register dst, const Operand& src) {
void Assembler::movzx_b(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0xB6);
emit_operand(dst, src);
@@ -739,7 +608,6 @@ void Assembler::movzx_b(Register dst, const Operand& src) {
void Assembler::movzx_w(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0xB7);
emit_operand(dst, src);
@@ -749,7 +617,6 @@ void Assembler::movzx_w(Register dst, const Operand& src) {
void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
UNIMPLEMENTED();
USE(cc);
USE(dst);
@@ -760,7 +627,6 @@ void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
UNIMPLEMENTED();
USE(cc);
USE(dst);
@@ -771,7 +637,6 @@ void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: 0f 40 + cc /r.
EMIT(0x0F);
EMIT(0x40 + cc);
@@ -781,14 +646,12 @@ void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
void Assembler::cld() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xFC);
}
void Assembler::rep_movs() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF3);
EMIT(0xA5);
}
@@ -796,7 +659,6 @@ void Assembler::rep_movs() {
void Assembler::rep_stos() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF3);
EMIT(0xAB);
}
@@ -804,14 +666,12 @@ void Assembler::rep_stos() {
void Assembler::stos() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xAB);
}
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.is(eax) || dst.is(eax)) { // Single-byte encoding.
EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
} else {
@@ -823,14 +683,12 @@ void Assembler::xchg(Register dst, Register src) {
void Assembler::adc(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(2, Operand(dst), Immediate(imm32));
}
void Assembler::adc(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x13);
emit_operand(dst, src);
}
@@ -838,7 +696,6 @@ void Assembler::adc(Register dst, const Operand& src) {
void Assembler::add(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x03);
emit_operand(dst, src);
}
@@ -846,24 +703,7 @@ void Assembler::add(Register dst, const Operand& src) {
void Assembler::add(const Operand& dst, const Immediate& x) {
ASSERT(reloc_info_writer.last_pc() != NULL);
- if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
- byte instr = last_pc_[0];
- if ((instr & 0xf8) == 0x50) {
- // Last instruction was a push. Check whether this is a pop without a
- // result.
- if ((dst.is_reg(esp)) &&
- (x.x_ == kPointerSize) && (x.rmode_ == RelocInfo::NONE)) {
- pc_ = last_pc_;
- last_pc_ = NULL;
- if (FLAG_print_peephole_optimization) {
- PrintF("%d push/pop(noreg) eliminated\n", pc_offset());
- }
- return;
- }
- }
- }
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(0, dst, x);
}
@@ -875,14 +715,12 @@ void Assembler::and_(Register dst, int32_t imm32) {
void Assembler::and_(Register dst, const Immediate& x) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(4, Operand(dst), x);
}
void Assembler::and_(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x23);
emit_operand(dst, src);
}
@@ -890,14 +728,12 @@ void Assembler::and_(Register dst, const Operand& src) {
void Assembler::and_(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(4, dst, x);
}
void Assembler::and_(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x21);
emit_operand(src, dst);
}
@@ -905,7 +741,6 @@ void Assembler::and_(const Operand& dst, Register src) {
void Assembler::cmpb(const Operand& op, int8_t imm8) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x80);
emit_operand(edi, op); // edi == 7
EMIT(imm8);
@@ -915,7 +750,6 @@ void Assembler::cmpb(const Operand& op, int8_t imm8) {
void Assembler::cmpb(const Operand& dst, Register src) {
ASSERT(src.is_byte_register());
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x38);
emit_operand(src, dst);
}
@@ -924,7 +758,6 @@ void Assembler::cmpb(const Operand& dst, Register src) {
void Assembler::cmpb(Register dst, const Operand& src) {
ASSERT(dst.is_byte_register());
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x3A);
emit_operand(dst, src);
}
@@ -933,7 +766,6 @@ void Assembler::cmpb(Register dst, const Operand& src) {
void Assembler::cmpw(const Operand& op, Immediate imm16) {
ASSERT(imm16.is_int16());
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x81);
emit_operand(edi, op);
@@ -943,21 +775,18 @@ void Assembler::cmpw(const Operand& op, Immediate imm16) {
void Assembler::cmp(Register reg, int32_t imm32) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(7, Operand(reg), Immediate(imm32));
}
void Assembler::cmp(Register reg, Handle<Object> handle) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(7, Operand(reg), Immediate(handle));
}
void Assembler::cmp(Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x3B);
emit_operand(reg, op);
}
@@ -965,21 +794,18 @@ void Assembler::cmp(Register reg, const Operand& op) {
void Assembler::cmp(const Operand& op, const Immediate& imm) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(7, op, imm);
}
void Assembler::cmp(const Operand& op, Handle<Object> handle) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(7, op, Immediate(handle));
}
void Assembler::cmpb_al(const Operand& op) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x38); // CMP r/m8, r8
emit_operand(eax, op); // eax has same code as register al.
}
@@ -987,7 +813,6 @@ void Assembler::cmpb_al(const Operand& op) {
void Assembler::cmpw_ax(const Operand& op) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x39); // CMP r/m16, r16
emit_operand(eax, op); // eax has same code as register ax.
@@ -996,7 +821,6 @@ void Assembler::cmpw_ax(const Operand& op) {
void Assembler::dec_b(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xFE);
EMIT(0xC8 | dst.code());
}
@@ -1004,7 +828,6 @@ void Assembler::dec_b(Register dst) {
void Assembler::dec_b(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xFE);
emit_operand(ecx, dst);
}
@@ -1012,14 +835,12 @@ void Assembler::dec_b(const Operand& dst) {
void Assembler::dec(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x48 | dst.code());
}
void Assembler::dec(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xFF);
emit_operand(ecx, dst);
}
@@ -1027,14 +848,12 @@ void Assembler::dec(const Operand& dst) {
void Assembler::cdq() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x99);
}
void Assembler::idiv(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF7);
EMIT(0xF8 | src.code());
}
@@ -1042,7 +861,6 @@ void Assembler::idiv(Register src) {
void Assembler::imul(Register reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF7);
EMIT(0xE8 | reg.code());
}
@@ -1050,7 +868,6 @@ void Assembler::imul(Register reg) {
void Assembler::imul(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0xAF);
emit_operand(dst, src);
@@ -1059,7 +876,6 @@ void Assembler::imul(Register dst, const Operand& src) {
void Assembler::imul(Register dst, Register src, int32_t imm32) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (is_int8(imm32)) {
EMIT(0x6B);
EMIT(0xC0 | dst.code() << 3 | src.code());
@@ -1074,14 +890,12 @@ void Assembler::imul(Register dst, Register src, int32_t imm32) {
void Assembler::inc(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x40 | dst.code());
}
void Assembler::inc(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xFF);
emit_operand(eax, dst);
}
@@ -1089,7 +903,6 @@ void Assembler::inc(const Operand& dst) {
void Assembler::lea(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x8D);
emit_operand(dst, src);
}
@@ -1097,7 +910,6 @@ void Assembler::lea(Register dst, const Operand& src) {
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF7);
EMIT(0xE0 | src.code());
}
@@ -1105,7 +917,6 @@ void Assembler::mul(Register src) {
void Assembler::neg(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF7);
EMIT(0xD8 | dst.code());
}
@@ -1113,7 +924,6 @@ void Assembler::neg(Register dst) {
void Assembler::not_(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF7);
EMIT(0xD0 | dst.code());
}
@@ -1121,14 +931,12 @@ void Assembler::not_(Register dst) {
void Assembler::or_(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(1, Operand(dst), Immediate(imm32));
}
void Assembler::or_(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0B);
emit_operand(dst, src);
}
@@ -1136,14 +944,12 @@ void Assembler::or_(Register dst, const Operand& src) {
void Assembler::or_(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(1, dst, x);
}
void Assembler::or_(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x09);
emit_operand(src, dst);
}
@@ -1151,7 +957,6 @@ void Assembler::or_(const Operand& dst, Register src) {
void Assembler::rcl(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
EMIT(0xD1);
@@ -1166,7 +971,6 @@ void Assembler::rcl(Register dst, uint8_t imm8) {
void Assembler::rcr(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
EMIT(0xD1);
@@ -1181,7 +985,6 @@ void Assembler::rcr(Register dst, uint8_t imm8) {
void Assembler::sar(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
EMIT(0xD1);
@@ -1196,7 +999,6 @@ void Assembler::sar(Register dst, uint8_t imm8) {
void Assembler::sar_cl(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD3);
EMIT(0xF8 | dst.code());
}
@@ -1204,7 +1006,6 @@ void Assembler::sar_cl(Register dst) {
void Assembler::sbb(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x1B);
emit_operand(dst, src);
}
@@ -1212,7 +1013,6 @@ void Assembler::sbb(Register dst, const Operand& src) {
void Assembler::shld(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0xA5);
emit_operand(dst, src);
@@ -1221,7 +1021,6 @@ void Assembler::shld(Register dst, const Operand& src) {
void Assembler::shl(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
EMIT(0xD1);
@@ -1236,7 +1035,6 @@ void Assembler::shl(Register dst, uint8_t imm8) {
void Assembler::shl_cl(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD3);
EMIT(0xE0 | dst.code());
}
@@ -1244,7 +1042,6 @@ void Assembler::shl_cl(Register dst) {
void Assembler::shrd(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0xAD);
emit_operand(dst, src);
@@ -1253,7 +1050,6 @@ void Assembler::shrd(Register dst, const Operand& src) {
void Assembler::shr(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
EMIT(0xD1);
@@ -1268,7 +1064,6 @@ void Assembler::shr(Register dst, uint8_t imm8) {
void Assembler::shr_cl(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD3);
EMIT(0xE8 | dst.code());
}
@@ -1276,7 +1071,6 @@ void Assembler::shr_cl(Register dst) {
void Assembler::subb(const Operand& op, int8_t imm8) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (op.is_reg(eax)) {
EMIT(0x2c);
} else {
@@ -1289,14 +1083,12 @@ void Assembler::subb(const Operand& op, int8_t imm8) {
void Assembler::sub(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(5, dst, x);
}
void Assembler::sub(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x2B);
emit_operand(dst, src);
}
@@ -1305,7 +1097,6 @@ void Assembler::sub(Register dst, const Operand& src) {
void Assembler::subb(Register dst, const Operand& src) {
ASSERT(dst.code() < 4);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x2A);
emit_operand(dst, src);
}
@@ -1313,7 +1104,6 @@ void Assembler::subb(Register dst, const Operand& src) {
void Assembler::sub(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x29);
emit_operand(src, dst);
}
@@ -1321,7 +1111,6 @@ void Assembler::sub(const Operand& dst, Register src) {
void Assembler::test(Register reg, const Immediate& imm) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Only use test against byte for registers that have a byte
// variant: eax, ebx, ecx, and edx.
if (imm.rmode_ == RelocInfo::NONE && is_uint8(imm.x_) && reg.code() < 4) {
@@ -1348,7 +1137,6 @@ void Assembler::test(Register reg, const Immediate& imm) {
void Assembler::test(Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x85);
emit_operand(reg, op);
}
@@ -1356,7 +1144,6 @@ void Assembler::test(Register reg, const Operand& op) {
void Assembler::test_b(Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x84);
emit_operand(reg, op);
}
@@ -1364,7 +1151,6 @@ void Assembler::test_b(Register reg, const Operand& op) {
void Assembler::test(const Operand& op, const Immediate& imm) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF7);
emit_operand(eax, op);
emit(imm);
@@ -1373,7 +1159,6 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
void Assembler::test_b(const Operand& op, uint8_t imm8) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF6);
emit_operand(eax, op);
EMIT(imm8);
@@ -1382,14 +1167,12 @@ void Assembler::test_b(const Operand& op, uint8_t imm8) {
void Assembler::xor_(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(6, Operand(dst), Immediate(imm32));
}
void Assembler::xor_(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x33);
emit_operand(dst, src);
}
@@ -1397,7 +1180,6 @@ void Assembler::xor_(Register dst, const Operand& src) {
void Assembler::xor_(const Operand& src, Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x31);
emit_operand(dst, src);
}
@@ -1405,14 +1187,12 @@ void Assembler::xor_(const Operand& src, Register dst) {
void Assembler::xor_(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_arith(6, dst, x);
}
void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0xA3);
emit_operand(src, dst);
@@ -1421,7 +1201,6 @@ void Assembler::bt(const Operand& dst, Register src) {
void Assembler::bts(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0xAB);
emit_operand(src, dst);
@@ -1430,21 +1209,18 @@ void Assembler::bts(const Operand& dst, Register src) {
void Assembler::hlt() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF4);
}
void Assembler::int3() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xCC);
}
void Assembler::nop() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x90);
}
@@ -1452,7 +1228,6 @@ void Assembler::nop() {
void Assembler::rdtsc() {
ASSERT(CpuFeatures::IsEnabled(RDTSC));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0x31);
}
@@ -1460,7 +1235,6 @@ void Assembler::rdtsc() {
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint16(imm16));
if (imm16 == 0) {
EMIT(0xC3);
@@ -1506,7 +1280,6 @@ void Assembler::print(Label* L) {
void Assembler::bind_to(Label* L, int pos) {
EnsureSpace ensure_space(this);
- last_pc_ = NULL;
ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
while (L->is_linked()) {
Displacement disp = disp_at(L);
@@ -1524,36 +1297,35 @@ void Assembler::bind_to(Label* L, int pos) {
}
disp.next(L);
}
+ while (L->is_near_linked()) {
+ int fixup_pos = L->near_link_pos();
+ int offset_to_next =
+ static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
+ ASSERT(offset_to_next <= 0);
+ // Relative address, relative to point after address.
+ int disp = pos - fixup_pos - sizeof(int8_t);
+ ASSERT(0 <= disp && disp <= 127);
+ set_byte_at(fixup_pos, disp);
+ if (offset_to_next < 0) {
+ L->link_to(fixup_pos + offset_to_next, Label::kNear);
+ } else {
+ L->UnuseNear();
+ }
+ }
L->bind_to(pos);
}
void Assembler::bind(Label* L) {
EnsureSpace ensure_space(this);
- last_pc_ = NULL;
ASSERT(!L->is_bound()); // label can only be bound once
bind_to(L, pc_offset());
}
-void Assembler::bind(NearLabel* L) {
- ASSERT(!L->is_bound());
- last_pc_ = NULL;
- while (L->unresolved_branches_ > 0) {
- int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
- int disp = pc_offset() - branch_pos;
- ASSERT(is_int8(disp));
- set_byte_at(branch_pos - sizeof(int8_t), disp);
- L->unresolved_branches_--;
- }
- L->bind_to(pc_offset());
-}
-
-
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (L->is_bound()) {
const int long_size = 5;
int offs = L->pos() - pc_offset();
@@ -1572,35 +1344,44 @@ void Assembler::call(Label* L) {
void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(!RelocInfo::IsCodeTarget(rmode));
EMIT(0xE8);
emit(entry - (pc_ + sizeof(int32_t)), rmode);
}
+int Assembler::CallSize(const Operand& adr) {
+ // Call size is 1 (opcode) + adr.len_ (operand).
+ return 1 + adr.len_;
+}
+
+
void Assembler::call(const Operand& adr) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xFF);
emit_operand(edx, adr);
}
-void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
+int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
+ return 1 /* EMIT */ + sizeof(uint32_t) /* emit */;
+}
+
+
+void Assembler::call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(RelocInfo::IsCodeTarget(rmode));
EMIT(0xE8);
- emit(reinterpret_cast<intptr_t>(code.location()), rmode);
+ emit(reinterpret_cast<intptr_t>(code.location()), rmode, ast_id);
}
-void Assembler::jmp(Label* L) {
+void Assembler::jmp(Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (L->is_bound()) {
const int short_size = 2;
const int long_size = 5;
@@ -1615,6 +1396,9 @@ void Assembler::jmp(Label* L) {
EMIT(0xE9);
emit(offs - long_size);
}
+ } else if (distance == Label::kNear) {
+ EMIT(0xEB);
+ emit_near_disp(L);
} else {
// 1110 1001 #32-bit disp.
EMIT(0xE9);
@@ -1625,7 +1409,6 @@ void Assembler::jmp(Label* L) {
void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(!RelocInfo::IsCodeTarget(rmode));
EMIT(0xE9);
emit(entry - (pc_ + sizeof(int32_t)), rmode);
@@ -1634,7 +1417,6 @@ void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
void Assembler::jmp(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xFF);
emit_operand(esp, adr);
}
@@ -1642,37 +1424,15 @@ void Assembler::jmp(const Operand& adr) {
void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(RelocInfo::IsCodeTarget(rmode));
EMIT(0xE9);
emit(reinterpret_cast<intptr_t>(code.location()), rmode);
}
-void Assembler::jmp(NearLabel* L) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (L->is_bound()) {
- const int short_size = 2;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- ASSERT(is_int8(offs - short_size));
- // 1110 1011 #8-bit disp.
- EMIT(0xEB);
- EMIT((offs - short_size) & 0xFF);
- } else {
- EMIT(0xEB);
- EMIT(0x00); // The displacement will be resolved later.
- L->link_to(pc_offset());
- }
-}
-
-
-void Assembler::j(Condition cc, Label* L, Hint hint) {
+void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(0 <= cc && cc < 16);
- if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
if (L->is_bound()) {
const int short_size = 2;
const int long_size = 6;
@@ -1688,6 +1448,9 @@ void Assembler::j(Condition cc, Label* L, Hint hint) {
EMIT(0x80 | cc);
emit(offs - long_size);
}
+ } else if (distance == Label::kNear) {
+ EMIT(0x70 | cc);
+ emit_near_disp(L);
} else {
// 0000 1111 1000 tttn #32-bit disp
// Note: could eliminate cond. jumps to this jump if condition
@@ -1699,11 +1462,9 @@ void Assembler::j(Condition cc, Label* L, Hint hint) {
}
-void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint) {
+void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT((0 <= cc) && (cc < 16));
- if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
// 0000 1111 1000 tttn #32-bit disp.
EMIT(0x0F);
EMIT(0x80 | cc);
@@ -1711,10 +1472,8 @@ void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint) {
}
-void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
+void Assembler::j(Condition cc, Handle<Code> code) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
// 0000 1111 1000 tttn #32-bit disp
EMIT(0x0F);
EMIT(0x80 | cc);
@@ -1722,46 +1481,22 @@ void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
}
-void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(0 <= cc && cc < 16);
- if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
- if (L->is_bound()) {
- const int short_size = 2;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- ASSERT(is_int8(offs - short_size));
- // 0111 tttn #8-bit disp
- EMIT(0x70 | cc);
- EMIT((offs - short_size) & 0xFF);
- } else {
- EMIT(0x70 | cc);
- EMIT(0x00); // The displacement will be resolved later.
- L->link_to(pc_offset());
- }
-}
-
-
// FPU instructions.
void Assembler::fld(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xD9, 0xC0, i);
}
void Assembler::fstp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDD, 0xD8, i);
}
void Assembler::fld1() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xE8);
}
@@ -1769,7 +1504,6 @@ void Assembler::fld1() {
void Assembler::fldpi() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xEB);
}
@@ -1777,7 +1511,6 @@ void Assembler::fldpi() {
void Assembler::fldz() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xEE);
}
@@ -1785,7 +1518,6 @@ void Assembler::fldz() {
void Assembler::fldln2() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xED);
}
@@ -1793,7 +1525,6 @@ void Assembler::fldln2() {
void Assembler::fld_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
emit_operand(eax, adr);
}
@@ -1801,7 +1532,6 @@ void Assembler::fld_s(const Operand& adr) {
void Assembler::fld_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDD);
emit_operand(eax, adr);
}
@@ -1809,7 +1539,6 @@ void Assembler::fld_d(const Operand& adr) {
void Assembler::fstp_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
emit_operand(ebx, adr);
}
@@ -1817,7 +1546,6 @@ void Assembler::fstp_s(const Operand& adr) {
void Assembler::fstp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDD);
emit_operand(ebx, adr);
}
@@ -1825,7 +1553,6 @@ void Assembler::fstp_d(const Operand& adr) {
void Assembler::fst_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDD);
emit_operand(edx, adr);
}
@@ -1833,7 +1560,6 @@ void Assembler::fst_d(const Operand& adr) {
void Assembler::fild_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDB);
emit_operand(eax, adr);
}
@@ -1841,7 +1567,6 @@ void Assembler::fild_s(const Operand& adr) {
void Assembler::fild_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDF);
emit_operand(ebp, adr);
}
@@ -1849,7 +1574,6 @@ void Assembler::fild_d(const Operand& adr) {
void Assembler::fistp_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDB);
emit_operand(ebx, adr);
}
@@ -1858,7 +1582,6 @@ void Assembler::fistp_s(const Operand& adr) {
void Assembler::fisttp_s(const Operand& adr) {
ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDB);
emit_operand(ecx, adr);
}
@@ -1867,7 +1590,6 @@ void Assembler::fisttp_s(const Operand& adr) {
void Assembler::fisttp_d(const Operand& adr) {
ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDD);
emit_operand(ecx, adr);
}
@@ -1875,7 +1597,6 @@ void Assembler::fisttp_d(const Operand& adr) {
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDB);
emit_operand(edx, adr);
}
@@ -1883,7 +1604,6 @@ void Assembler::fist_s(const Operand& adr) {
void Assembler::fistp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDF);
emit_operand(edi, adr);
}
@@ -1891,7 +1611,6 @@ void Assembler::fistp_d(const Operand& adr) {
void Assembler::fabs() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xE1);
}
@@ -1899,7 +1618,6 @@ void Assembler::fabs() {
void Assembler::fchs() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xE0);
}
@@ -1907,7 +1625,6 @@ void Assembler::fchs() {
void Assembler::fcos() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xFF);
}
@@ -1915,7 +1632,6 @@ void Assembler::fcos() {
void Assembler::fsin() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xFE);
}
@@ -1923,7 +1639,6 @@ void Assembler::fsin() {
void Assembler::fyl2x() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xF1);
}
@@ -1931,21 +1646,18 @@ void Assembler::fyl2x() {
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDC, 0xC0, i);
}
void Assembler::fsub(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDC, 0xE8, i);
}
void Assembler::fisub_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDA);
emit_operand(esp, adr);
}
@@ -1953,56 +1665,48 @@ void Assembler::fisub_s(const Operand& adr) {
void Assembler::fmul(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDC, 0xC8, i);
}
void Assembler::fdiv(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDC, 0xF8, i);
}
void Assembler::faddp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xC0, i);
}
void Assembler::fsubp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xE8, i);
}
void Assembler::fsubrp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xE0, i);
}
void Assembler::fmulp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xC8, i);
}
void Assembler::fdivp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xF8, i);
}
void Assembler::fprem() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xF8);
}
@@ -2010,7 +1714,6 @@ void Assembler::fprem() {
void Assembler::fprem1() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xF5);
}
@@ -2018,14 +1721,12 @@ void Assembler::fprem1() {
void Assembler::fxch(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xD9, 0xC8, i);
}
void Assembler::fincstp() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xF7);
}
@@ -2033,14 +1734,12 @@ void Assembler::fincstp() {
void Assembler::ffree(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDD, 0xC0, i);
}
void Assembler::ftst() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xE4);
}
@@ -2048,14 +1747,12 @@ void Assembler::ftst() {
void Assembler::fucomp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDD, 0xE8, i);
}
void Assembler::fucompp() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDA);
EMIT(0xE9);
}
@@ -2063,7 +1760,6 @@ void Assembler::fucompp() {
void Assembler::fucomi(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDB);
EMIT(0xE8 + i);
}
@@ -2071,7 +1767,6 @@ void Assembler::fucomi(int i) {
void Assembler::fucomip() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDF);
EMIT(0xE9);
}
@@ -2079,7 +1774,6 @@ void Assembler::fucomip() {
void Assembler::fcompp() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDE);
EMIT(0xD9);
}
@@ -2087,7 +1781,6 @@ void Assembler::fcompp() {
void Assembler::fnstsw_ax() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDF);
EMIT(0xE0);
}
@@ -2095,14 +1788,12 @@ void Assembler::fnstsw_ax() {
void Assembler::fwait() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x9B);
}
void Assembler::frndint() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xFC);
}
@@ -2110,7 +1801,6 @@ void Assembler::frndint() {
void Assembler::fnclex() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xDB);
EMIT(0xE2);
}
@@ -2118,7 +1808,6 @@ void Assembler::fnclex() {
void Assembler::sahf() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x9E);
}
@@ -2126,7 +1815,6 @@ void Assembler::sahf() {
void Assembler::setcc(Condition cc, Register reg) {
ASSERT(reg.is_byte_register());
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0x90 | cc);
EMIT(0xC0 | reg.code());
@@ -2136,7 +1824,6 @@ void Assembler::setcc(Condition cc, Register reg) {
void Assembler::cvttss2si(Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF3);
EMIT(0x0F);
EMIT(0x2C);
@@ -2147,7 +1834,6 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
void Assembler::cvttsd2si(Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x2C);
@@ -2158,7 +1844,6 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x2A);
@@ -2169,7 +1854,6 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF3);
EMIT(0x0F);
EMIT(0x5A);
@@ -2177,10 +1861,19 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x5A);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x58);
@@ -2191,7 +1884,6 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x59);
@@ -2202,7 +1894,6 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x5C);
@@ -2213,7 +1904,6 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x5E);
@@ -2224,7 +1914,6 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x57);
@@ -2232,9 +1921,16 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x51);
@@ -2244,7 +1940,6 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x54);
@@ -2255,7 +1950,6 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x2E);
@@ -2266,7 +1960,6 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
void Assembler::movmskpd(Register dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x50);
@@ -2277,7 +1970,6 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF2);
EMIT(0x0F);
EMIT(0xC2);
@@ -2289,7 +1981,6 @@ void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0x28);
emit_sse_operand(dst, src);
@@ -2299,7 +1990,6 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x7F);
@@ -2310,7 +2000,6 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) {
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x6F);
@@ -2321,7 +2010,6 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF3);
EMIT(0x0F);
EMIT(0x7F);
@@ -2332,7 +2020,6 @@ void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
void Assembler::movdqu(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF3);
EMIT(0x0F);
EMIT(0x6F);
@@ -2343,7 +2030,6 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) {
void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x38);
@@ -2355,7 +2041,6 @@ void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
void Assembler::movntdq(const Operand& dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0xE7);
@@ -2366,7 +2051,6 @@ void Assembler::movntdq(const Operand& dst, XMMRegister src) {
void Assembler::prefetch(const Operand& src, int level) {
ASSERT(is_uint2(level));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x0F);
EMIT(0x18);
XMMRegister code = { level }; // Emit hint number in Reg position of RegR/M.
@@ -2376,14 +2060,12 @@ void Assembler::prefetch(const Operand& src, int level) {
void Assembler::movdbl(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
movsd(dst, src);
}
void Assembler::movdbl(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
movsd(dst, src);
}
@@ -2391,7 +2073,6 @@ void Assembler::movdbl(const Operand& dst, XMMRegister src) {
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF2); // double
EMIT(0x0F);
EMIT(0x11); // store
@@ -2402,7 +2083,6 @@ void Assembler::movsd(const Operand& dst, XMMRegister src ) {
void Assembler::movsd(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF2); // double
EMIT(0x0F);
EMIT(0x10); // load
@@ -2413,7 +2093,6 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x10);
@@ -2421,10 +2100,39 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::movss(const Operand& dst, XMMRegister src ) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3); // float
+ EMIT(0x0F);
+ EMIT(0x11); // store
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movss(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3); // float
+ EMIT(0x0F);
+ EMIT(0x10); // load
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movss(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0x10);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::movd(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x6E);
@@ -2435,7 +2143,6 @@ void Assembler::movd(XMMRegister dst, const Operand& src) {
void Assembler::movd(const Operand& dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x7E);
@@ -2446,7 +2153,6 @@ void Assembler::movd(const Operand& dst, XMMRegister src) {
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0xDB);
@@ -2457,7 +2163,6 @@ void Assembler::pand(XMMRegister dst, XMMRegister src) {
void Assembler::pxor(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0xEF);
@@ -2468,7 +2173,6 @@ void Assembler::pxor(XMMRegister dst, XMMRegister src) {
void Assembler::por(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0xEB);
@@ -2479,7 +2183,6 @@ void Assembler::por(XMMRegister dst, XMMRegister src) {
void Assembler::ptest(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x38);
@@ -2491,7 +2194,6 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) {
void Assembler::psllq(XMMRegister reg, int8_t shift) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x73);
@@ -2503,7 +2205,6 @@ void Assembler::psllq(XMMRegister reg, int8_t shift) {
void Assembler::psllq(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0xF3);
@@ -2514,7 +2215,6 @@ void Assembler::psllq(XMMRegister dst, XMMRegister src) {
void Assembler::psrlq(XMMRegister reg, int8_t shift) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x73);
@@ -2526,7 +2226,6 @@ void Assembler::psrlq(XMMRegister reg, int8_t shift) {
void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0xD3);
@@ -2537,7 +2236,6 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x70);
@@ -2549,7 +2247,6 @@ void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x3A);
@@ -2562,7 +2259,6 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x3A);
@@ -2629,7 +2325,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > Heap::MaxOldGenerationSize())) {
+ (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -2652,17 +2348,15 @@ void Assembler::GrowBuffer() {
reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
buffer_ = desc.buffer;
buffer_size_ = desc.buffer_size;
pc_ += pc_delta;
- if (last_pc_ != NULL) {
- last_pc_ += pc_delta;
- }
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
@@ -2761,7 +2455,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Serializer::TooLateToEnableNow();
}
#endif
- if (!Serializer::enabled() && !FLAG_debug_code) {
+ if (!Serializer::enabled() && !emit_debug_code()) {
return;
}
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index b60157c75..6609b4fed 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -37,6 +37,7 @@
#ifndef V8_IA32_ASSEMBLER_IA32_H_
#define V8_IA32_ASSEMBLER_IA32_H_
+#include "isolate.h"
#include "serialize.h"
namespace v8 {
@@ -248,23 +249,6 @@ inline Condition ReverseCondition(Condition cc) {
}
-enum Hint {
- no_hint = 0,
- not_taken = 0x2e,
- taken = 0x3e
-};
-
-
-// The result of negating a hint is as if the corresponding condition
-// were negated by NegateCondition. That is, no_hint is mapped to
-// itself and not_taken and taken are mapped to each other.
-inline Hint NegateHint(Hint hint) {
- return (hint == no_hint)
- ? no_hint
- : ((hint == not_taken) ? taken : not_taken);
-}
-
-
// -----------------------------------------------------------------------------
// Machine instruction Immediates
@@ -295,6 +279,7 @@ class Immediate BASE_EMBEDDED {
RelocInfo::Mode rmode_;
friend class Assembler;
+ friend class MacroAssembler;
};
@@ -447,14 +432,13 @@ class Displacement BASE_EMBEDDED {
// }
class CpuFeatures : public AllStatic {
public:
- // Detect features of the target CPU. If the portable flag is set,
- // the method sets safe defaults if the serializer is enabled
- // (snapshots must be portable).
- static void Probe(bool portable);
- static void Clear() { supported_ = 0; }
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
@@ -462,10 +446,22 @@ class CpuFeatures : public AllStatic {
if (f == RDTSC && !FLAG_enable_rdtsc) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
+
+#ifdef DEBUG
// Check whether a feature is currently enabled.
static bool IsEnabled(CpuFeature f) {
- return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
+ ASSERT(initialized_);
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL) {
+ // When no isolate is available, work as if we're running in
+ // release mode.
+ return IsSupported(f);
+ }
+ uint64_t enabled = isolate->enabled_cpu_features();
+ return (enabled & (static_cast<uint64_t>(1) << f)) != 0;
}
+#endif
+
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
@@ -473,26 +469,68 @@ class CpuFeatures : public AllStatic {
explicit Scope(CpuFeature f) {
uint64_t mask = static_cast<uint64_t>(1) << f;
ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0);
- old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= mask;
+ ASSERT(!Serializer::enabled() ||
+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+ isolate_ = Isolate::UncheckedCurrent();
+ old_enabled_ = 0;
+ if (isolate_ != NULL) {
+ old_enabled_ = isolate_->enabled_cpu_features();
+ isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+ }
+ }
+ ~Scope() {
+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+ if (isolate_ != NULL) {
+ isolate_->set_enabled_cpu_features(old_enabled_);
+ }
}
- ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
+ Isolate* isolate_;
uint64_t old_enabled_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
+
+ class TryForceFeatureScope BASE_EMBEDDED {
+ public:
+ explicit TryForceFeatureScope(CpuFeature f)
+ : old_supported_(CpuFeatures::supported_) {
+ if (CanForce()) {
+ CpuFeatures::supported_ |= (static_cast<uint64_t>(1) << f);
+ }
+ }
+
+ ~TryForceFeatureScope() {
+ if (CanForce()) {
+ CpuFeatures::supported_ = old_supported_;
+ }
+ }
+
+ private:
+ static bool CanForce() {
+ // It's only safe to temporarily force support of CPU features
+ // when there's only a single isolate, which is guaranteed when
+ // the serializer is enabled.
+ return Serializer::enabled();
+ }
+
+ const uint64_t old_supported_;
+ };
+
private:
+#ifdef DEBUG
+ static bool initialized_;
+#endif
static uint64_t supported_;
- static uint64_t enabled_;
static uint64_t found_by_runtime_probing_;
+
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
-class Assembler : public Malloced {
+class Assembler : public AssemblerBase {
private:
// We check before assembling an instruction that there is sufficient
// space to write an instruction and its relocation information.
@@ -519,9 +557,13 @@ class Assembler : public Malloced {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- Assembler(void* buffer, int buffer_size);
+ // TODO(vitalyr): the assembler does not need an isolate.
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -617,6 +659,7 @@ class Assembler : public Malloced {
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
+ void push(Handle<Object> handle);
void pop(Register dst);
void pop(const Operand& dst);
@@ -784,30 +827,30 @@ class Assembler : public Malloced {
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
- void bind(NearLabel* L);
// Calls
void call(Label* L);
void call(byte* entry, RelocInfo::Mode rmode);
+ int CallSize(const Operand& adr);
void call(const Operand& adr);
- void call(Handle<Code> code, RelocInfo::Mode rmode);
+ int CallSize(Handle<Code> code, RelocInfo::Mode mode);
+ void call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id = kNoASTId);
// Jumps
- void jmp(Label* L); // unconditional jump to L
+ // unconditional jump to L
+ void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(byte* entry, RelocInfo::Mode rmode);
void jmp(const Operand& adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
- // Short jump
- void jmp(NearLabel* L);
-
// Conditional jumps
- void j(Condition cc, Label* L, Hint hint = no_hint);
- void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
- void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
-
- // Conditional short jump
- void j(Condition cc, NearLabel* L, Hint hint = no_hint);
+ void j(Condition cc,
+ Label* L,
+ Label::Distance distance = Label::kFar);
+ void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
+ void j(Condition cc, Handle<Code> code);
// Floating-point operations
void fld(int i);
@@ -885,12 +928,14 @@ class Assembler : public Malloced {
void cvtsi2sd(XMMRegister dst, const Operand& src);
void cvtss2sd(XMMRegister dst, XMMRegister src);
+ void cvtsd2ss(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
+ void xorps(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
@@ -915,6 +960,10 @@ class Assembler : public Malloced {
void movd(const Operand& src, XMMRegister dst);
void movsd(XMMRegister dst, XMMRegister src);
+ void movss(XMMRegister dst, const Operand& src);
+ void movss(const Operand& src, XMMRegister dst);
+ void movss(XMMRegister dst, XMMRegister src);
+
void pand(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, XMMRegister src);
void por(XMMRegister dst, XMMRegister src);
@@ -982,6 +1031,8 @@ class Assembler : public Malloced {
static const int kMinimalBufferSize = 4*KB;
protected:
+ bool emit_debug_code() const { return emit_debug_code_; }
+
void movsd(XMMRegister dst, const Operand& src);
void movsd(const Operand& dst, XMMRegister src);
@@ -989,7 +1040,8 @@ class Assembler : public Malloced {
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(Register dst, XMMRegister src);
- byte* addr_at(int pos) { return buffer_ + pos; }
+ byte* addr_at(int pos) { return buffer_ + pos; }
+
private:
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
@@ -1004,7 +1056,9 @@ class Assembler : public Malloced {
void GrowBuffer();
inline void emit(uint32_t x);
inline void emit(Handle<Object> handle);
- inline void emit(uint32_t x, RelocInfo::Mode rmode);
+ inline void emit(uint32_t x,
+ RelocInfo::Mode rmode,
+ unsigned ast_id = kNoASTId);
inline void emit(const Immediate& x);
inline void emit_w(const Immediate& x);
@@ -1032,6 +1086,7 @@ class Assembler : public Malloced {
inline Displacement disp_at(Label* L);
inline void disp_at_put(Label* L, Displacement disp);
inline void emit_disp(Label* L, Displacement::Type type);
+ inline void emit_near_disp(Label* L);
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
@@ -1045,18 +1100,15 @@ class Assembler : public Malloced {
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
- // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
- static byte* spare_buffer_;
// code generation
byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
- // push-pop elimination
- byte* last_pc_;
-
PositionsRecorder positions_recorder_;
+ bool emit_debug_code_;
+
friend class PositionsRecorder;
};
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index f15fd1cd8..f8a85de98 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
@@ -70,7 +70,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects eax to contain the number of arguments
// including the receiver and the extra arguments.
__ add(Operand(eax), Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id));
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -82,8 +82,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
Label non_function_call;
// Check that function is not a smi.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &non_function_call);
+ __ JumpIfSmi(edi, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function_call);
@@ -100,8 +99,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
+ Handle<Code> arguments_adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ SetCallKind(ecx, CALL_AS_METHOD);
+ __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
}
@@ -128,7 +129,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address();
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
__ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
__ j(not_equal, &rt_call);
#endif
@@ -138,8 +139,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// edi: constructor
__ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &rt_call);
+ __ JumpIfSmi(eax, &rt_call);
// edi: constructor
// eax: initial map (if proven valid below)
__ CmpObjectType(eax, MAP_TYPE, ebx);
@@ -184,7 +184,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// ebx: JSObject
// edi: start of next object
__ mov(Operand(ebx, JSObject::kMapOffset), eax);
- __ mov(ecx, Factory::empty_fixed_array());
+ Factory* factory = masm->isolate()->factory();
+ __ mov(ecx, factory->empty_fixed_array());
__ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
__ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
// Set extra fields in the newly allocated object.
@@ -194,9 +195,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{ Label loop, entry;
// To allow for truncation.
if (count_constructions) {
- __ mov(edx, Factory::one_pointer_filler_map());
+ __ mov(edx, factory->one_pointer_filler_map());
} else {
- __ mov(edx, Factory::undefined_value());
+ __ mov(edx, factory->undefined_value());
}
__ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
__ jmp(&entry);
@@ -252,7 +253,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// edi: FixedArray
// edx: number of elements
// ecx: start of next object
- __ mov(eax, Factory::fixed_array_map());
+ __ mov(eax, factory->fixed_array_map());
__ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
__ SmiTag(edx);
__ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
@@ -262,7 +263,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// edi: FixedArray
// ecx: start of next object
{ Label loop, entry;
- __ mov(edx, Factory::undefined_value());
+ __ mov(edx, factory->undefined_value());
__ lea(eax, Operand(edi, FixedArray::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
@@ -334,14 +335,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Call the function.
if (is_api_function) {
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- Handle<Code> code = Handle<Code>(
- Builtins::builtin(Builtins::HandleApiCallConstruct));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
// Restore context from the frame.
@@ -353,13 +355,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label use_receiver, exit;
// If the result is a smi, it is *not* an object in the ECMA sense.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &use_receiver, not_taken);
+ __ JumpIfSmi(eax, &use_receiver);
// If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &exit, not_taken);
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &exit);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
@@ -376,7 +377,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ pop(ecx);
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
- __ IncrementCounter(&Counters::constructed_objects, 1);
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
__ ret(0);
}
@@ -436,11 +437,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code.
if (is_construct) {
- __ call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+ __ call(masm->isolate()->builtins()->JSConstructCall(),
RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
// Exit the JS frame. Notice that this also removes the empty
@@ -465,19 +467,25 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
__ EnterInternalFrame();
- // Push a copy of the function onto the stack.
+ // Push a copy of the function.
__ push(edi);
+ // Push call kind information.
+ __ push(ecx);
__ push(edi); // Function is also the parameter to the runtime call.
__ CallRuntime(Runtime::kLazyCompile, 1);
+
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
__ pop(edi);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
- __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(Operand(ecx));
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(Operand(eax));
}
@@ -487,17 +495,23 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Push a copy of the function onto the stack.
__ push(edi);
+ // Push call kind information.
+ __ push(ecx);
__ push(edi); // Function is also the parameter to the runtime call.
__ CallRuntime(Runtime::kLazyRecompile, 1);
- // Restore function and tear down temporary frame.
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
__ pop(edi);
+
+ // Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
- __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(Operand(ecx));
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(Operand(eax));
}
@@ -518,15 +532,15 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ SmiUntag(ecx);
// Switch on the state.
- NearLabel not_no_registers, not_tos_eax;
+ Label not_no_registers, not_tos_eax;
__ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
- __ j(not_equal, &not_no_registers);
+ __ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ mov(eax, Operand(esp, 2 * kPointerSize));
__ cmp(ecx, FullCodeGenerator::TOS_REG);
- __ j(not_equal, &not_tos_eax);
+ __ j(not_equal, &not_tos_eax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, eax.
__ bind(&not_tos_eax);
@@ -561,12 +575,14 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ Factory* factory = masm->isolate()->factory();
+
// 1. Make sure we have at least one argument.
{ Label done;
__ test(eax, Operand(eax));
- __ j(not_zero, &done, taken);
+ __ j(not_zero, &done);
__ pop(ebx);
- __ push(Immediate(Factory::undefined_value()));
+ __ push(Immediate(factory->undefined_value()));
__ push(ebx);
__ inc(eax);
__ bind(&done);
@@ -577,10 +593,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Label non_function;
// 1 ~ return address.
__ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &non_function, not_taken);
+ __ JumpIfSmi(edi, &non_function);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &non_function, not_taken);
+ __ j(not_equal, &non_function);
// 3a. Patch the first argument if necessary when calling a function.
@@ -595,22 +610,24 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
__ j(not_equal, &shift_arguments);
+ // Do not transform the receiver for natives (shared already in ebx).
+ __ test_b(FieldOperand(ebx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &shift_arguments);
+
// Compute the receiver in non-strict mode.
__ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &convert_to_object);
- __ cmp(ebx, Factory::null_value());
+ // Call ToObject on the receiver if it is not an object, or use the
+ // global object if it is null or undefined.
+ __ JumpIfSmi(ebx, &convert_to_object);
+ __ cmp(ebx, factory->null_value());
__ j(equal, &use_global_receiver);
- __ cmp(ebx, Factory::undefined_value());
+ __ cmp(ebx, factory->undefined_value());
__ j(equal, &use_global_receiver);
-
- // We don't use IsObjectJSObjectType here because we jump on success.
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
- __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
- __ j(below_equal, &shift_arguments);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &shift_arguments);
__ bind(&convert_to_object);
__ EnterInternalFrame(); // In order to preserve argument count.
@@ -671,10 +688,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
{ Label function;
__ test(edi, Operand(edi));
- __ j(not_zero, &function, taken);
+ __ j(not_zero, &function);
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ __ SetCallKind(ecx, CALL_AS_METHOD);
+ __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
}
@@ -687,11 +705,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ SmiUntag(ebx);
+ __ SetCallKind(ecx, CALL_AS_METHOD);
__ cmp(eax, Operand(ebx));
- __ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
+ __ j(not_equal,
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
ParameterCount expected(0);
- __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION);
+ __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
@@ -707,7 +728,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// limit" is checked.
Label okay;
ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit();
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
__ mov(edi, Operand::StaticVariable(real_stack_limit));
// Make ecx the space we have left. The stack might already be overflowed
// here which will cause ecx to become negative.
@@ -719,7 +740,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ shl(edx, kPointerSizeLog2 - kSmiTagSize);
// Check if the arguments will overflow the stack.
__ cmp(ecx, Operand(edx));
- __ j(greater, &okay, taken); // Signed comparison.
+ __ j(greater, &okay); // Signed comparison.
// Out of stack space.
__ push(Operand(ebp, 4 * kPointerSize)); // push this
@@ -750,24 +771,25 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
__ j(not_equal, &push_receiver);
+ Factory* factory = masm->isolate()->factory();
+
+ // Do not transform the receiver for natives (shared already in ecx).
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &push_receiver);
+
// Compute the receiver in non-strict mode.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &call_to_object);
- __ cmp(ebx, Factory::null_value());
+ // Call ToObject on the receiver if it is not an object, or use the
+ // global object if it is null or undefined.
+ __ JumpIfSmi(ebx, &call_to_object);
+ __ cmp(ebx, factory->null_value());
__ j(equal, &use_global_receiver);
- __ cmp(ebx, Factory::undefined_value());
+ __ cmp(ebx, factory->undefined_value());
__ j(equal, &use_global_receiver);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &push_receiver);
- // If given receiver is already a JavaScript object then there's no
- // reason for converting it.
- // We don't use IsObjectJSObjectType here because we jump on success.
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
- __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
- __ j(below_equal, &push_receiver);
-
- // Convert the receiver to an object.
__ bind(&call_to_object);
__ push(ebx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
@@ -795,7 +817,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(edx, Operand(ebp, 2 * kPointerSize)); // load arguments
// Use inline caching to speed up access to arguments.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
__ call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
@@ -818,7 +840,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
ParameterCount actual(eax);
__ SmiUntag(eax);
__ mov(edi, Operand(ebp, 4 * kPointerSize));
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
__ LeaveInternalFrame();
__ ret(3 * kPointerSize); // remove this, receiver, and arguments
@@ -866,8 +889,9 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// scratch1: initial map
// scratch2: start of next object
__ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
+ Factory* factory = masm->isolate()->factory();
__ mov(FieldOperand(result, JSArray::kPropertiesOffset),
- Factory::empty_fixed_array());
+ factory->empty_fixed_array());
// Field JSArray::kElementsOffset is initialized later.
__ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
@@ -875,7 +899,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// fixed array.
if (initial_capacity == 0) {
__ mov(FieldOperand(result, JSArray::kElementsOffset),
- Factory::empty_fixed_array());
+ factory->empty_fixed_array());
return;
}
@@ -892,7 +916,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// scratch1: elements array
// scratch2: start of next object
__ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
- Factory::fixed_array_map());
+ factory->fixed_array_map());
__ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(initial_capacity)));
@@ -903,7 +927,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
- __ mov(scratch3, Factory::the_hole_value());
+ __ mov(scratch3, factory->the_hole_value());
for (int i = 0; i < initial_capacity; i++) {
__ mov(FieldOperand(scratch1,
FixedArray::kHeaderSize + i * kPointerSize),
@@ -913,7 +937,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label loop, entry;
__ jmp(&entry);
__ bind(&loop);
- __ mov(Operand(scratch1, 0), Factory::the_hole_value());
+ __ mov(Operand(scratch1, 0), factory->the_hole_value());
__ add(Operand(scratch1), Immediate(kPointerSize));
__ bind(&entry);
__ cmp(scratch1, Operand(scratch2));
@@ -968,7 +992,8 @@ static void AllocateJSArray(MacroAssembler* masm,
// elements_array_end: start of next object
// array_size: size of array (smi)
__ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ mov(elements_array, Factory::empty_fixed_array());
+ Factory* factory = masm->isolate()->factory();
+ __ mov(elements_array, factory->empty_fixed_array());
__ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
@@ -987,7 +1012,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// elements_array_end: start of next object
// array_size: size of array (smi)
__ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
- Factory::fixed_array_map());
+ factory->fixed_array_map());
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
@@ -999,7 +1024,7 @@ static void AllocateJSArray(MacroAssembler* masm,
__ SmiUntag(array_size);
__ lea(edi, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
- __ mov(eax, Factory::the_hole_value());
+ __ mov(eax, factory->the_hole_value());
__ cld();
// Do not use rep stos when filling less than kRepStosThreshold
// words.
@@ -1063,7 +1088,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
edi,
kPreallocatedArrayElements,
&prepare_generic_code_call);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ __ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
__ pop(ebx);
if (construct_call) {
__ pop(edi);
@@ -1119,7 +1144,8 @@ static void ArrayNativeCode(MacroAssembler* masm,
edi,
true,
&prepare_generic_code_call);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->array_function_native(), 1);
__ mov(eax, ebx);
__ pop(ebx);
if (construct_call) {
@@ -1146,7 +1172,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
edi,
false,
&prepare_generic_code_call);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ __ IncrementCounter(counters->array_function_native(), 1);
__ mov(eax, ebx);
__ pop(ebx);
if (construct_call) {
@@ -1232,8 +1258,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Jump to the generic array code in case the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
- Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
- Handle<Code> array_code(code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->ArrayCodeGeneric();
__ jmp(array_code, RelocInfo::CODE_TARGET);
}
@@ -1248,11 +1274,9 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
Label generic_constructor;
if (FLAG_debug_code) {
- // The array construct code is only set for the builtin Array function which
- // does always have a map.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ebx);
- __ cmp(edi, Operand(ebx));
- __ Assert(equal, "Unexpected Array function");
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
// Initial map for the builtin Array function should be a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
@@ -1268,8 +1292,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
- Handle<Code> generic_construct_stub(code);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
}
@@ -1282,7 +1306,8 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- __ IncrementCounter(&Counters::string_ctor_calls, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1);
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
@@ -1311,7 +1336,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
edx, // Scratch 2.
false, // Input is known to be smi?
&not_cached);
- __ IncrementCounter(&Counters::string_ctor_cached_number, 1);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
// ----------- S t a t e -------------
// -- ebx : argument converted to string
@@ -1340,7 +1365,8 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
// Set properties and elements.
- __ Set(ecx, Immediate(Factory::empty_fixed_array()));
+ Factory* factory = masm->isolate()->factory();
+ __ Set(ecx, Immediate(factory->empty_fixed_array()));
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
@@ -1358,17 +1384,16 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
Label convert_argument;
__ bind(&not_cached);
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &convert_argument);
+ __ JumpIfSmi(eax, &convert_argument);
Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
__ j(NegateCondition(is_string), &convert_argument);
__ mov(ebx, eax);
- __ IncrementCounter(&Counters::string_ctor_string_value, 1);
+ __ IncrementCounter(counters->string_ctor_string_value(), 1);
__ jmp(&argument_is_string);
// Invoke the conversion builtin and put the result into ebx.
__ bind(&convert_argument);
- __ IncrementCounter(&Counters::string_ctor_conversions, 1);
+ __ IncrementCounter(counters->string_ctor_conversions(), 1);
__ EnterInternalFrame();
__ push(edi); // Preserve the function.
__ push(eax);
@@ -1381,7 +1406,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the empty string into ebx, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ Set(ebx, Immediate(Factory::empty_string()));
+ __ Set(ebx, Immediate(factory->empty_string()));
__ pop(ecx);
__ lea(esp, Operand(esp, kPointerSize));
__ push(ecx);
@@ -1390,7 +1415,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// At this point the argument is already a string. Call runtime to
// create a string wrapper.
__ bind(&gc_required);
- __ IncrementCounter(&Counters::string_ctor_gc_required, 1);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1);
__ EnterInternalFrame();
__ push(ebx);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
@@ -1409,12 +1434,12 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Push the function on the stack.
__ push(edi);
- // Preserve the number of arguments on the stack. Must preserve both
- // eax and ebx because these registers are used when copying the
+ // Preserve the number of arguments on the stack. Must preserve eax,
+ // ebx and ecx because these registers are used when copying the
// arguments and the receiver.
ASSERT(kSmiTagSize == 1);
- __ lea(ecx, Operand(eax, eax, times_1, kSmiTag));
- __ push(ecx);
+ __ lea(edi, Operand(eax, eax, times_1, kSmiTag));
+ __ push(edi);
}
@@ -1437,11 +1462,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
// -- ebx : expected number of arguments
+ // -- ecx : call kind information
// -- edx : code entry to call
// -----------------------------------
Label invoke, dont_adapt_arguments;
- __ IncrementCounter(&Counters::arguments_adaptors, 1);
+ __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
Label enough, too_few;
__ cmp(eax, Operand(ebx));
@@ -1456,14 +1482,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(eax, Operand(ebp, eax, times_4, offset));
- __ mov(ecx, -1); // account for receiver
+ __ mov(edi, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ inc(ecx);
+ __ inc(edi);
__ push(Operand(eax, 0));
__ sub(Operand(eax), Immediate(kPointerSize));
- __ cmp(ecx, Operand(ebx));
+ __ cmp(edi, Operand(ebx));
__ j(less, &copy);
__ jmp(&invoke);
}
@@ -1475,30 +1501,33 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(edi, Operand(ebp, eax, times_4, offset));
- __ mov(ecx, -1); // account for receiver
+ // ebx = expected - actual.
+ __ sub(ebx, Operand(eax));
+ // eax = -actual - 1
+ __ neg(eax);
+ __ sub(Operand(eax), Immediate(1));
Label copy;
__ bind(&copy);
- __ inc(ecx);
+ __ inc(eax);
__ push(Operand(edi, 0));
__ sub(Operand(edi), Immediate(kPointerSize));
- __ cmp(ecx, Operand(eax));
- __ j(less, &copy);
+ __ test(eax, Operand(eax));
+ __ j(not_zero, &copy);
// Fill remaining expected arguments with undefined values.
Label fill;
__ bind(&fill);
- __ inc(ecx);
- __ push(Immediate(Factory::undefined_value()));
- __ cmp(ecx, Operand(ebx));
+ __ inc(eax);
+ __ push(Immediate(masm->isolate()->factory()->undefined_value()));
+ __ cmp(eax, Operand(ebx));
__ j(less, &fill);
-
- // Restore function pointer.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
// Call the entry point.
__ bind(&invoke);
+ // Restore function pointer.
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ call(Operand(edx));
// Leave frame and return.
@@ -1514,10 +1543,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // We shouldn't be performing on-stack replacement in the first
- // place if the CPU features we need for the optimized Crankshaft
- // code aren't supported.
- CpuFeatures::Probe(false);
+ CpuFeatures::TryForceFeatureScope scope(SSE2);
if (!CpuFeatures::IsSupported(SSE2)) {
__ Abort("Unreachable code: Cannot optimize without SSE2 support.");
return;
@@ -1552,19 +1578,19 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
- NearLabel skip;
+ Label skip;
__ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
- __ j(not_equal, &skip);
+ __ j(not_equal, &skip, Label::kNear);
__ ret(0);
// If we decide not to perform on-stack replacement we perform a
// stack guard check to enable interrupts.
__ bind(&stack_check);
- NearLabel ok;
+ Label ok;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
+ ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, taken);
+ __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ TailCallStub(&stub);
__ Abort("Unreachable code: returned from tail call.");
@@ -1584,7 +1610,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
#undef __
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 506e37f60..d97206d0b 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -29,8 +29,9 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "code-stubs.h"
#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "isolate.h"
#include "jsregexp.h"
#include "regexp-macro-assembler.h"
@@ -41,15 +42,15 @@ namespace internal {
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in eax.
- NearLabel check_heap_number, call_builtin;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &check_heap_number);
+ Label check_heap_number, call_builtin;
+ __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
__ ret(0);
__ bind(&check_heap_number);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
- __ j(not_equal, &call_builtin);
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
+ __ j(not_equal, &call_builtin, Label::kNear);
__ ret(0);
__ bind(&call_builtin);
@@ -69,25 +70,30 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Get the function info from the stack.
__ mov(edx, Operand(esp, 1 * kPointerSize));
+ int map_index = strict_mode_ == kStrictMode
+ ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+ : Context::FUNCTION_MAP_INDEX;
+
// Compute the function map in the current global context and set that
// as the map of the allocated object.
__ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
- __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index)));
__ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
- __ mov(ebx, Immediate(Factory::empty_fixed_array()));
+ Factory* factory = masm->isolate()->factory();
+ __ mov(ebx, Immediate(factory->empty_fixed_array()));
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
__ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(Factory::the_hole_value()));
+ Immediate(factory->the_hole_value()));
__ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
__ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
__ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
__ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
- Immediate(Factory::undefined_value()));
+ Immediate(factory->undefined_value()));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
@@ -104,7 +110,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ pop(edx);
__ push(esi);
__ push(edx);
- __ push(Immediate(Factory::false_value()));
+ __ push(Immediate(factory->false_value()));
__ push(ecx); // Restore return address.
__ TailCallRuntime(Runtime::kNewClosure, 3, 1);
}
@@ -121,26 +127,24 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ mov(ecx, Operand(esp, 1 * kPointerSize));
// Setup the object header.
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
+ Factory* factory = masm->isolate()->factory();
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+ factory->function_context_map());
__ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(length)));
// Setup the fixed slots.
__ Set(ebx, Immediate(0)); // Set to NULL.
__ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
- __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
- __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
+ __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
__ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
- // Copy the global object from the surrounding context. We go through the
- // context in the function (ecx) to match the allocation behavior we have
- // in the runtime system (see Heap::AllocateFunctionContext).
- __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
- __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Copy the global object from the previous context.
+ __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
// Initialize the rest of the slots to undefined.
- __ mov(ebx, Factory::undefined_value());
+ __ mov(ebx, factory->undefined_value());
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ mov(Operand(eax, Context::SlotOffset(i)), ebx);
}
@@ -151,7 +155,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+ __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
}
@@ -176,7 +180,8 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
__ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(ecx, Factory::undefined_value());
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(ecx, factory->undefined_value());
__ j(equal, &slow_case);
if (FLAG_debug_code) {
@@ -184,11 +189,11 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
Handle<Map> expected_map;
if (mode_ == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
- expected_map = Factory::fixed_array_map();
+ expected_map = factory->fixed_array_map();
} else {
ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
- expected_map = Factory::fixed_cow_array_map();
+ expected_map = factory->fixed_cow_array_map();
}
__ push(ecx);
__ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
@@ -231,215 +236,74 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
-// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
+// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
- NearLabel false_result, true_result, not_string;
+ Label false_result, true_result, not_string;
+ Factory* factory = masm->isolate()->factory();
+ const Register map = edx;
+
__ mov(eax, Operand(esp, 1 * kPointerSize));
- // 'null' => false.
- __ cmp(eax, Factory::null_value());
+ // undefined -> false
+ __ cmp(eax, factory->undefined_value());
__ j(equal, &false_result);
- // Get the map and type of the heap object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ // Boolean -> its value
+ __ cmp(eax, factory->false_value());
+ __ j(equal, &false_result);
+ __ cmp(eax, factory->true_value());
+ __ j(equal, &true_result);
+
+ // Smis: 0 -> false, all other -> true
+ __ test(eax, Operand(eax));
+ __ j(zero, &false_result);
+ __ JumpIfSmi(eax, &true_result);
+
+ // 'null' -> false.
+ __ cmp(eax, factory->null_value());
+ __ j(equal, &false_result, Label::kNear);
- // Undetectable => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ // Get the map of the heap object.
+ __ mov(map, FieldOperand(eax, HeapObject::kMapOffset));
+
+ // Undetectable -> false.
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
- __ j(not_zero, &false_result);
+ __ j(not_zero, &false_result, Label::kNear);
- // JavaScript object => true.
- __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
- __ j(above_equal, &true_result);
+ // JavaScript object -> true.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &true_result, Label::kNear);
- // String value => false iff empty.
- __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string);
- STATIC_ASSERT(kSmiTag == 0);
+ // String value -> false iff empty.
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &not_string, Label::kNear);
__ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
- __ j(zero, &false_result);
- __ jmp(&true_result);
+ __ j(zero, &false_result, Label::kNear);
+ __ jmp(&true_result, Label::kNear);
__ bind(&not_string);
- // HeapNumber => false iff +0, -0, or NaN.
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &true_result);
+ // HeapNumber -> false iff +0, -0, or NaN.
+ __ cmp(map, factory->heap_number_map());
+ __ j(not_equal, &true_result, Label::kNear);
__ fldz();
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ FCmp();
- __ j(zero, &false_result);
+ __ j(zero, &false_result, Label::kNear);
// Fall through to |true_result|.
- // Return 1/0 for true/false in eax.
+ // Return 1/0 for true/false in tos_.
__ bind(&true_result);
- __ mov(eax, 1);
+ __ mov(tos_, 1);
__ ret(1 * kPointerSize);
__ bind(&false_result);
- __ mov(eax, 0);
+ __ mov(tos_, 0);
__ ret(1 * kPointerSize);
}
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ mov(right_arg, right);
- } else if (right.is(right_arg)) {
- __ mov(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ mov(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ mov(left_arg, left);
- __ mov(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ mov(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ mov(right_arg, right);
- __ mov(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ mov(left_arg, left);
- __ mov(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(Immediate(right));
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (left.is(left_arg)) {
- __ mov(right_arg, Immediate(right));
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ mov(left_arg, Immediate(right));
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ mov(left_arg, left);
- __ mov(right_arg, Immediate(right));
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(Immediate(left));
- __ push(right);
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (right.is(right_arg)) {
- __ mov(left_arg, Immediate(left));
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ mov(right_arg, Immediate(left));
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ mov(right_arg, right);
- __ mov(left_arg, Immediate(left));
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
class FloatingPointHelper : public AllStatic {
public:
-
enum ArgLocation {
ARGS_ON_STACK,
ARGS_IN_REGISTERS
@@ -477,14 +341,6 @@ class FloatingPointHelper : public AllStatic {
// Takes the operands in edx and eax and loads them as integers in eax
// and ecx.
- static void LoadAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* operand_conversion_failure);
- static void LoadNumbersAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* operand_conversion_failure);
static void LoadUnknownsAsIntegers(MacroAssembler* masm,
bool use_sse3,
Label* operand_conversion_failure);
@@ -520,641 +376,287 @@ class FloatingPointHelper : public AllStatic {
};
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = eax;
- right = ebx;
- if (HasArgsInRegisters()) {
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ mov(right, Operand(esp, 1 * kPointerSize));
- __ mov(left, Operand(esp, 2 * kPointerSize));
- }
+// Get the integer part of a heap number. Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
+// trashed registers.
+static void IntegerConvert(MacroAssembler* masm,
+ Register source,
+ bool use_sse3,
+ Label* conversion_failure) {
+ ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
+ Label done, right_exponent, normal_exponent;
+ Register scratch = ebx;
+ Register scratch2 = edi;
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ if (use_sse3) {
+ CpuFeatures::Scope scope(SSE3);
+ // Check whether the exponent is too big for a 64 bit signed integer.
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+ __ j(greater_equal, conversion_failure);
+ // Load x87 register with heap number.
+ __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+ // Reserve space for 64 bit answer.
+ __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(esp, 0));
+ __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
+ __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ } else {
+ // Load ecx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(ecx, Operand(ecx));
+ // Check whether the exponent matches a 32 bit signed int that cannot be
+ // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
+ // exponent is 30 (biased). This is the exponent that we are fastest at and
+ // also the highest exponent we can handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+ // If we have a match of the int32-but-not-Smi exponent then skip some
+ // logic.
+ __ j(equal, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ j(less, &normal_exponent);
- if (static_operands_type_.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- __ or_(right, Operand(left));
- GenerateReturn(masm);
- return;
- } else if (op_ == Token::BIT_AND) {
- __ and_(right, Operand(left));
- GenerateReturn(masm);
- return;
- } else if (op_ == Token::BIT_XOR) {
- __ xor_(right, Operand(left));
- GenerateReturn(masm);
- return;
+ {
+ // Handle a big exponent. The only reason we have this code is that the
+ // >>> operator has a tendency to generate numbers with an exponent of 31.
+ const uint32_t big_non_smi_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+ __ j(not_equal, conversion_failure);
+ // We have the big exponent, typically from >>>. This means the number is
+ // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch2, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to use the full unsigned range so we subtract 1 bit from the
+ // shift distance.
+ const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ __ shl(scratch2, big_shift_distance);
+ // Get the second half of the double.
+ __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 21 bits to get the most significant 11 bits or the low
+ // mantissa word.
+ __ shr(ecx, 32 - big_shift_distance);
+ __ or_(ecx, Operand(scratch2));
+ // We have the answer in ecx, but we may need to negate it.
+ __ test(scratch, Operand(scratch));
+ __ j(positive, &done);
+ __ neg(ecx);
+ __ jmp(&done);
}
- }
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op_) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left)); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, Operand(left));
- break;
+ __ bind(&normal_exponent);
+ // Exponent word in scratch, exponent part of exponent word in scratch2.
+ // Zero in ecx.
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ sub(Operand(scratch2), Immediate(zero_exponent));
+ // ecx already has a Smi zero.
+ __ j(less, &done);
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left));
- combined = right;
- break;
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ mov(ecx, Immediate(30));
+ __ sub(ecx, Operand(scratch2));
- default:
- break;
+ __ bind(&right_exponent);
+ // Here ecx is the shift, scratch is the exponent word.
+ // Get the top bits of the mantissa.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We have kExponentShift + 1 significant bits int he low end of the
+ // word. Shift them to the top bits.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ shl(scratch, shift_distance);
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the most significant 10 bits or the low
+ // mantissa word.
+ __ shr(scratch2, 32 - shift_distance);
+ __ or_(scratch2, Operand(scratch));
+ // Move down according to the exponent.
+ __ shr_cl(scratch2);
+ // Now the unsigned answer is in scratch2. We need to move it to ecx and
+ // we may need to fix the sign.
+ Label negative;
+ __ xor_(ecx, Operand(ecx));
+ __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
+ __ j(greater, &negative, Label::kNear);
+ __ mov(ecx, scratch2);
+ __ jmp(&done, Label::kNear);
+ __ bind(&negative);
+ __ sub(ecx, Operand(scratch2));
+ __ bind(&done);
}
+}
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ test(combined, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smis, not_taken);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, Operand(left)); // Bitwise xor is commutative.
- break;
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, Operand(left)); // Bitwise and is commutative.
- break;
+const char* UnaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name = NULL; // Make g++ happy.
+ switch (mode_) {
+ case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+ }
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "UnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ UnaryOpIC::GetName(operand_type_));
+ return name_;
+}
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operand_type_) {
+ case UnaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, Operand(left)); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
+ case UnaryOpIC::SMI:
+ GenerateSmiStub(masm);
break;
-
- case Token::SUB:
- __ sub(left, Operand(right));
- __ j(overflow, &use_fp_on_smis, not_taken);
- __ mov(eax, left);
+ case UnaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, Operand(left)); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
+ case UnaryOpIC::GENERIC:
+ GenerateGenericStub(masm);
break;
+ }
+}
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &use_fp_on_smis, not_taken);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, Operand(edx));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &not_smis, not_taken);
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ __ pop(ecx); // Save return address.
+ __ push(eax);
+ // the argument is now on top.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ push(Immediate(Smi::FromInt(MinorKey())));
+ __ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(operand_type_)));
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
+ __ push(ecx); // Push return address.
- default:
- UNREACHABLE();
- }
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
+ masm->isolate()), 4, 1);
+}
- // 5. Emit return of result in eax.
- GenerateReturn(masm);
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
switch (op_) {
- case Token::SHL: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(left));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- // It's OK to overwrite the right argument on the stack because we
- // are about to return.
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- } else {
- ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
- __ jmp(slow);
- }
- break;
- }
-
- case Token::ADD:
case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op_) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, Operand(left));
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, Operand(right));
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- GenerateReturn(masm);
- } else {
- ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
- __ jmp(slow);
- }
- break;
- }
-
- default:
+ GenerateSmiStubSub(masm);
break;
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- switch (op_) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
+ case Token::BIT_NOT:
+ GenerateSmiStubBitNot(masm);
break;
-
default:
- break;
+ UNREACHABLE();
}
}
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+ Label non_smi, undo, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
+ Label::kNear, Label::kNear, Label::kNear);
+ __ bind(&undo);
+ GenerateSmiCodeUndo(masm);
+ __ bind(&non_smi);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
- if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
- Label slow;
- if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- }
- // Generate fast case smi code if requested. This flag is set when the fast
- // case smi code is not generated by the caller. Generating it here will speed
- // up common operations.
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+ Label non_smi;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateTypeTransition(masm);
+}
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx);
- __ AbortIfNotNumber(eax);
- }
- if (static_operands_type_.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(edx);
- __ AbortIfNotSmi(eax);
- }
- FloatingPointHelper::LoadSSE2Smis(masm, ecx);
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm);
- }
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- }
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+ Label* non_smi,
+ Label* undo,
+ Label* slow,
+ Label::Distance non_smi_near,
+ Label::Distance undo_near,
+ Label::Distance slow_near) {
+ // Check whether the value is a smi.
+ __ JumpIfNotSmi(eax, non_smi, non_smi_near);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- } else { // SSE2 not available, use FPU.
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx);
- __ AbortIfNotNumber(eax);
- }
- } else {
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- }
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- GenerateReturn(masm);
- __ bind(&after_alloc_failure);
- __ ffree();
- __ jmp(&call_runtime);
- }
- __ bind(&not_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // Try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm,
- static_operands_type_,
- use_sse3_,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- GenerateReturn(masm);
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
- NearLabel skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
+ // We can't handle -0 with smis, so use a type transition for that case.
+ __ test(eax, Operand(eax));
+ __ j(zero, slow, slow_near);
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
+ // Try optimistic subtraction '0 - value', saving operand in eax for undo.
+ __ mov(edx, Operand(eax));
+ __ Set(eax, Immediate(0));
+ __ sub(eax, Operand(edx));
+ __ j(overflow, undo, undo_near);
+ __ ret(0);
+}
- // Avoid hitting the string ADD code below when allocation fails in
- // the floating point code above.
- if (op_ != Token::ADD) {
- __ bind(&call_runtime);
- }
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
+void UnaryOpStub::GenerateSmiCodeBitNot(
+ MacroAssembler* masm,
+ Label* non_smi,
+ Label::Distance non_smi_near) {
+ // Check whether the value is a smi.
+ __ JumpIfNotSmi(eax, non_smi, non_smi_near);
+
+ // Flip bits and revert inverted smi-tag.
+ __ not_(eax);
+ __ and_(eax, ~kSmiTagMask);
+ __ ret(0);
+}
- switch (op_) {
- case Token::ADD: {
- // Test for string arguments before calling runtime.
- // If this stub has already generated FP-specific code then the arguments
- // are already in edx, eax
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
+void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
+ __ mov(eax, Operand(edx));
+}
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
- if (HasArgsReversed()) {
- lhs = eax;
- rhs = edx;
- } else {
- lhs = edx;
- rhs = eax;
- }
- // Test if left operand is a string.
- NearLabel lhs_not_string;
- __ test(lhs, Immediate(kSmiTagMask));
- __ j(zero, &lhs_not_string);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &lhs_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- __ TailCallStub(&string_add_left_stub);
-
- NearLabel call_runtime_with_args;
- // Left operand is not a string, test right.
- __ bind(&lhs_not_string);
- __ test(rhs, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime_with_args);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime_with_args);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
- __ bind(&call_runtime_with_args);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ switch (op_) {
case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ GenerateHeapNumberStubSub(masm);
break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ case Token::BIT_NOT:
+ GenerateHeapNumberStubBitNot(masm);
break;
default:
UNREACHABLE();
@@ -1162,128 +664,182 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
-void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure) {
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, Operand(ebx));
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, Operand(edx));
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+ Label non_smi, undo, slow, call_builtin;
+ GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&undo);
+ GenerateSmiCodeUndo(masm);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+ __ bind(&call_builtin);
+ GenerateGenericCodeFallback(masm);
}
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- // If arguments are not passed in registers read them from the stack.
- ASSERT(!HasArgsInRegisters());
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
+void UnaryOpStub::GenerateHeapNumberStubBitNot(
+ MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
}
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+ Label* slow) {
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, slow);
+
+ if (mode_ == UNARY_OVERWRITE) {
+ __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
+ Immediate(HeapNumber::kSignMask)); // Flip sign.
} else {
- __ ret(0);
- }
-}
+ __ mov(edx, Operand(eax));
+ // edx: operand
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(ecx);
- if (HasArgsReversed()) {
- __ push(eax);
- __ push(edx);
- } else {
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
__ push(edx);
- __ push(eax);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ pop(edx);
+ __ LeaveInternalFrame();
+
+ __ bind(&heapnumber_allocated);
+ // eax: allocated 'empty' number
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+ __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
}
- __ push(ecx);
+ __ ret(0);
}
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
+ Label* slow) {
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, slow);
+
+ // Convert the heap number in eax to an untagged integer in ecx.
+ IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
+
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ not_(ecx);
+ __ cmp(ecx, 0xc0000000);
+ __ j(sign, &try_float, Label::kNear);
+
+ // Tag the result as a smi and we're done.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ lea(eax, Operand(ecx, times_2, kSmiTag));
+ __ ret(0);
+
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (mode_ == UNARY_NO_OVERWRITE) {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ mov(ebx, eax);
+ __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ // Push the original HeapNumber on the stack. The integer value can't
+ // be stored since it's untagged and not in the smi range (so we can't
+ // smi-tag it). We'll recalculate the value after the GC instead.
+ __ push(ebx);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ // New HeapNumber is in eax.
+ __ pop(edx);
+ __ LeaveInternalFrame();
+ // IntegerConvert uses ebx and edi as scratch registers.
+ // This conversion won't go slow-case.
+ IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
+ __ not_(ecx);
+
+ __ bind(&heapnumber_allocated);
+ }
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ecx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ push(ecx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ecx);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
}
+ __ ret(0);
+}
- __ pop(ecx); // Save return address.
- // Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateGenericStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateGenericStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
- __ push(ecx); // Push return address.
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+ Label non_smi, undo, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&undo);
+ GenerateSmiCodeUndo(masm);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
}
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
}
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
- TRBinaryOpIC::TypeInfo type_info,
- TRBinaryOpIC::TypeInfo result_type_info) {
- TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
+void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
+ // Handle the slow case by jumping to the corresponding JavaScript builtin.
+ __ pop(ecx); // pop return address.
+ __ push(eax);
+ __ push(ecx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
__ push(edx);
__ push(eax);
@@ -1299,7 +855,8 @@ void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+ masm->isolate()),
5,
1);
}
@@ -1307,8 +864,7 @@ void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Prepare for a type transition runtime call when the args are already on
// the stack, under the return address.
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
// Left and right arguments are already on top of the stack.
// Push this stub's key. Although the operation and the type info are
@@ -1322,33 +878,37 @@ void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+ masm->isolate()),
5,
1);
}
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
switch (operands_type_) {
- case TRBinaryOpIC::UNINITIALIZED:
+ case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
break;
- case TRBinaryOpIC::SMI:
+ case BinaryOpIC::SMI:
GenerateSmiStub(masm);
break;
- case TRBinaryOpIC::INT32:
+ case BinaryOpIC::INT32:
GenerateInt32Stub(masm);
break;
- case TRBinaryOpIC::HEAP_NUMBER:
+ case BinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm);
break;
- case TRBinaryOpIC::ODDBALL:
+ case BinaryOpIC::ODDBALL:
GenerateOddballStub(masm);
break;
- case TRBinaryOpIC::STRING:
+ case BinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
+ case BinaryOpIC::STRING:
GenerateStringStub(masm);
break;
- case TRBinaryOpIC::GENERIC:
+ case BinaryOpIC::GENERIC:
GenerateGeneric(masm);
break;
default:
@@ -1357,10 +917,11 @@ void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
}
-const char* TypeRecordingBinaryOpStub::GetName() {
+const char* BinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
@@ -1372,15 +933,16 @@ const char* TypeRecordingBinaryOpStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "TypeRecordingBinaryOpStub_%s_%s_%s",
+ "BinaryOpStub_%s_%s_%s",
op_name,
overwrite_name,
- TRBinaryOpIC::GetName(operands_type_));
+ BinaryOpIC::GetName(operands_type_));
return name_;
}
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+ MacroAssembler* masm,
Label* slow,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
// 1. Move arguments into edx, eax except for DIV and MOD, which need the
@@ -1439,8 +1001,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
// 3. Perform the smi check of the operands.
STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ test(combined, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smis, not_taken);
+ __ JumpIfNotSmi(combined, &not_smis);
// 4. Operands are both smis, perform the operation leaving the result in
// eax and check the result if necessary.
@@ -1469,7 +1030,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
__ shl_cl(left);
// Check that the *signed* result fits in a smi.
__ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis, not_taken);
+ __ j(sign, &use_fp_on_smis);
// Tag the result and store it in register eax.
__ SmiTag(left);
__ mov(eax, left);
@@ -1499,7 +1060,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
// Smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi.
__ test(left, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
+ __ j(not_zero, &use_fp_on_smis);
// Tag the result and store it in register eax.
__ SmiTag(left);
__ mov(eax, left);
@@ -1508,12 +1069,12 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
case Token::ADD:
ASSERT(right.is(eax));
__ add(right, Operand(left)); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
+ __ j(overflow, &use_fp_on_smis);
break;
case Token::SUB:
__ sub(left, Operand(right));
- __ j(overflow, &use_fp_on_smis, not_taken);
+ __ j(overflow, &use_fp_on_smis);
__ mov(eax, left);
break;
@@ -1527,7 +1088,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
__ SmiUntag(right);
// Do multiplication.
__ imul(right, Operand(left)); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
+ __ j(overflow, &use_fp_on_smis);
// Check for negative zero result. Use combined = left | right.
__ NegativeZeroTest(right, combined, &use_fp_on_smis);
break;
@@ -1538,7 +1099,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
__ mov(edi, left);
// Check for 0 divisor.
__ test(right, Operand(right));
- __ j(zero, &use_fp_on_smis, not_taken);
+ __ j(zero, &use_fp_on_smis);
// Sign extend left into edx:eax.
ASSERT(left.is(eax));
__ cdq();
@@ -1562,7 +1123,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
case Token::MOD:
// Check for 0 divisor.
__ test(right, Operand(right));
- __ j(zero, &not_smis, not_taken);
+ __ j(zero, &not_smis);
// Sign extend left into edx:eax.
ASSERT(left.is(eax));
@@ -1635,26 +1196,35 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
} else {
ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
switch (op_) {
- case Token::SHL: {
+ case Token::SHL:
+ case Token::SHR: {
Comment perform_float(masm, "-- Perform float operation on smis");
__ bind(&use_fp_on_smis);
// Result we want is in left == edx, so we can put the allocated heap
// number in eax.
__ AllocateHeapNumber(eax, ecx, ebx, slow);
// Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(left));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- // It's OK to overwrite the right argument on the stack because we
- // are about to return.
+ // It's OK to overwrite the arguments on the stack because we
+ // are about to return.
+ if (op_ == Token::SHR) {
__ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
+ __ fild_d(Operand(esp, 1 * kPointerSize));
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ } else {
+ ASSERT_EQ(Token::SHL, op_);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(left));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(Operand(esp, 1 * kPointerSize), left);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
}
- __ ret(2 * kPointerSize);
- break;
+ __ ret(2 * kPointerSize);
+ break;
}
case Token::ADD:
@@ -1746,7 +1316,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
}
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label call_runtime;
switch (op_) {
@@ -1768,8 +1338,8 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
UNREACHABLE();
}
- if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
- result_type_ == TRBinaryOpIC::SMI) {
+ if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+ result_type_ == BinaryOpIC::SMI) {
GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
} else {
GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
@@ -1797,19 +1367,49 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// Try to add arguments as strings, otherwise, transition to the generic
- // TRBinaryOpIC type.
+ // BinaryOpIC type.
GenerateAddStrings(masm);
GenerateTypeTransition(masm);
}
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &call_runtime);
+
+ // Test if right operand is a string.
+ __ JumpIfSmi(right, &call_runtime);
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &call_runtime);
+
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
+ GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+ ASSERT(operands_type_ == BinaryOpIC::INT32);
// Floating point case.
switch (op_) {
@@ -1831,7 +1431,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
default: UNREACHABLE();
}
// Check result type if it is currently Int32.
- if (result_type_ <= TRBinaryOpIC::INT32) {
+ if (result_type_ <= BinaryOpIC::INT32) {
__ cvttsd2si(ecx, Operand(xmm0));
__ cvtsi2sd(xmm2, Operand(ecx));
__ ucomisd(xmm0, xmm2);
@@ -1922,7 +1522,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
__ mov(ebx, Operand(eax)); // ebx: result
- NearLabel skip_allocation;
+ Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
case OVERWRITE_RIGHT:
@@ -1930,8 +1530,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// allocation of a heap number.
__ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -2009,32 +1608,32 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
if (op_ == Token::ADD) {
// Handle string addition here, because it is the only operation
// that does not do a ToNumber conversion on the operands.
GenerateAddStrings(masm);
}
+ Factory* factory = masm->isolate()->factory();
+
// Convert odd ball arguments to numbers.
- NearLabel check, done;
- __ cmp(edx, Factory::undefined_value());
- __ j(not_equal, &check);
+ Label check, done;
+ __ cmp(edx, factory->undefined_value());
+ __ j(not_equal, &check, Label::kNear);
if (Token::IsBitOp(op_)) {
__ xor_(edx, Operand(edx));
} else {
- __ mov(edx, Immediate(Factory::nan_value()));
+ __ mov(edx, Immediate(factory->nan_value()));
}
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&check);
- __ cmp(eax, Factory::undefined_value());
- __ j(not_equal, &done);
+ __ cmp(eax, factory->undefined_value());
+ __ j(not_equal, &done, Label::kNear);
if (Token::IsBitOp(op_)) {
__ xor_(eax, Operand(eax));
} else {
- __ mov(eax, Immediate(Factory::nan_value()));
+ __ mov(eax, Immediate(factory->nan_value()));
}
__ bind(&done);
@@ -2042,7 +1641,7 @@ void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label call_runtime;
// Floating point case.
@@ -2137,7 +1736,7 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
__ mov(ebx, Operand(eax)); // ebx: result
- NearLabel skip_allocation;
+ Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
case OVERWRITE_RIGHT:
@@ -2145,8 +1744,7 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
// allocation of a heap number.
__ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -2223,10 +1821,11 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime;
- __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
switch (op_) {
case Token::ADD:
@@ -2336,7 +1935,7 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
__ mov(ebx, Operand(eax)); // ebx: result
- NearLabel skip_allocation;
+ Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
case OVERWRITE_RIGHT:
@@ -2344,8 +1943,7 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
// allocation of a heap number.
__ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -2419,19 +2017,18 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
- NearLabel left_not_string, call_runtime;
+ Label left_not_string, call_runtime;
// Registers containing left and right operands respectively.
Register left = edx;
Register right = eax;
// Test if left operand is a string.
- __ test(left, Immediate(kSmiTagMask));
- __ j(zero, &left_not_string);
+ __ JumpIfSmi(left, &left_not_string, Label::kNear);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &left_not_string);
+ __ j(above_equal, &left_not_string, Label::kNear);
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
GenerateRegisterArgsPush(masm);
@@ -2439,10 +2036,9 @@ void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
// Left operand is not a string, test right.
__ bind(&left_not_string);
- __ test(right, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime);
+ __ JumpIfSmi(right, &call_runtime, Label::kNear);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime);
+ __ j(above_equal, &call_runtime, Label::kNear);
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
@@ -2453,7 +2049,7 @@ void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+void BinaryOpStub::GenerateHeapResultAllocation(
MacroAssembler* masm,
Label* alloc_failure) {
Label skip_allocation;
@@ -2462,8 +2058,7 @@ void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
case OVERWRITE_LEFT: {
// If the argument in edx is already an object, we skip the
// allocation of a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
+ __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
// Allocate a heap number for the result. Keep eax and edx intact
// for the possible runtime call.
__ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
@@ -2478,8 +2073,7 @@ void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
case OVERWRITE_RIGHT:
// If the argument in eax is already an object, we skip the
// allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
// Allocate a heap number for the result. Keep eax and edx intact
@@ -2495,7 +2089,7 @@ void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
}
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
__ pop(ecx);
__ push(edx);
__ push(eax);
@@ -2523,11 +2117,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
// Test that eax is a number.
- NearLabel input_not_smi;
- NearLabel loaded;
+ Label input_not_smi;
+ Label loaded;
__ mov(eax, Operand(esp, kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &input_not_smi);
+ __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the low and high words of the double into ebx, edx.
STATIC_ASSERT(kSmiTagSize == 1);
@@ -2538,11 +2131,12 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ fst_d(Operand(esp, 0));
__ pop(edx);
__ pop(ebx);
- __ jmp(&loaded);
+ __ jmp(&loaded, Label::kNear);
__ bind(&input_not_smi);
// Check if input is a HeapNumber.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
__ j(not_equal, &runtime_call);
// Input is a HeapNumber. Push it on the FPU stack and load its
// low and high words into ebx, edx.
@@ -2575,24 +2169,27 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ mov(eax, ecx);
__ sar(eax, 8);
__ xor_(ecx, Operand(eax));
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ and_(Operand(ecx),
+ Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
// ST[0] or xmm1 == double value.
// ebx = low 32 bits of double value.
// edx = high 32 bits of double value.
// ecx = TranscendentalCache::hash(double value).
- __ mov(eax,
- Immediate(ExternalReference::transcendental_cache_array_address()));
- // Eax points to cache array.
- __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ ExternalReference cache_array =
+ ExternalReference::transcendental_cache_array_address(masm->isolate());
+ __ mov(eax, Immediate(cache_array));
+ int cache_array_index =
+ type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
+ __ mov(eax, Operand(eax, cache_array_index));
// Eax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ test(eax, Operand(eax));
__ j(zero, &runtime_call_clear_stack);
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
- { TranscendentalCache::Element test_elem[2];
+ { TranscendentalCache::SubCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
@@ -2608,11 +2205,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ lea(ecx, Operand(ecx, ecx, times_2, 0));
__ lea(ecx, Operand(eax, ecx, times_4, 0));
// Check if cache matches: Double value is stored in uint32_t[2] array.
- NearLabel cache_miss;
+ Label cache_miss;
__ cmp(ebx, Operand(ecx, 0));
- __ j(not_equal, &cache_miss);
+ __ j(not_equal, &cache_miss, Label::kNear);
__ cmp(edx, Operand(ecx, kIntSize));
- __ j(not_equal, &cache_miss);
+ __ j(not_equal, &cache_miss, Label::kNear);
// Cache hit!
__ mov(eax, Operand(ecx, 2 * kIntSize));
if (tagged) {
@@ -2671,7 +2268,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&runtime_call_clear_stack);
__ fstp(0);
__ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+ ExternalReference runtime =
+ ExternalReference(RuntimeFunction(), masm->isolate());
+ __ TailCallExternalReference(runtime, 1, 1);
} else { // UNTAGGED.
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
@@ -2708,7 +2307,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// Both fsin and fcos require arguments in the range +/-2^63 and
// return NaN for infinities and NaN. They can share all code except
// the actual fsin/fcos operation.
- NearLabel in_range, done;
+ Label in_range, done;
// If argument is outside the range -2^63..2^63, fsin/cos doesn't
// work. We must reduce it to the appropriate range.
__ mov(edi, edx);
@@ -2716,11 +2315,11 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
int supported_exponent_limit =
(63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
__ cmp(Operand(edi), Immediate(supported_exponent_limit));
- __ j(below, &in_range, taken);
+ __ j(below, &in_range, Label::kNear);
// Check for infinity and NaN. Both return NaN for sin.
__ cmp(Operand(edi), Immediate(0x7ff00000));
- NearLabel non_nan_result;
- __ j(not_equal, &non_nan_result, taken);
+ Label non_nan_result;
+ __ j(not_equal, &non_nan_result, Label::kNear);
// Input is +/-Infinity or NaN. Result is NaN.
__ fstp(0);
// NaN is represented by 0x7ff8000000000000.
@@ -2728,7 +2327,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ push(Immediate(0));
__ fld_d(Operand(esp, 0));
__ add(Operand(esp), Immediate(2 * kPointerSize));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&non_nan_result);
@@ -2739,19 +2338,19 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ fld(1);
// FPU Stack: input, 2*pi, input.
{
- NearLabel no_exceptions;
+ Label no_exceptions;
__ fwait();
__ fnstsw_ax();
// Clear if Illegal Operand or Zero Division exceptions are set.
__ test(Operand(eax), Immediate(5));
- __ j(zero, &no_exceptions);
+ __ j(zero, &no_exceptions, Label::kNear);
__ fnclex();
__ bind(&no_exceptions);
}
// Compute st(0) % st(1)
{
- NearLabel partial_remainder_loop;
+ Label partial_remainder_loop;
__ bind(&partial_remainder_loop);
__ fprem1();
__ fwait();
@@ -2788,203 +2387,6 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
}
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
-// trashed registers.
-void IntegerConvert(MacroAssembler* masm,
- Register source,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
- Label done, right_exponent, normal_exponent;
- Register scratch = ebx;
- Register scratch2 = edi;
- if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
- return;
- }
- if (!type_info.IsInteger32() || !use_sse3) {
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- }
- if (use_sse3) {
- CpuFeatures::Scope scope(SSE3);
- if (!type_info.IsInteger32()) {
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- }
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, Operand(ecx));
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent);
-
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch2, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, big_shift_distance);
- // Get the second half of the double.
- __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, Operand(scratch2));
- // We have the answer in ecx, but we may need to negate it.
- __ test(scratch, Operand(scratch));
- __ j(positive, &done);
- __ neg(ecx);
- __ jmp(&done);
- }
-
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent part of exponent word in scratch2.
- // Zero in ecx.
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(Operand(scratch2), Immediate(zero_exponent));
- // ecx already has a Smi zero.
- __ j(less, &done);
-
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ mov(ecx, Immediate(30));
- __ sub(ecx, Operand(scratch2));
-
- __ bind(&right_exponent);
- // Here ecx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, shift_distance);
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, Operand(scratch));
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to ecx and
- // we may need to fix the sign.
- NearLabel negative;
- __ xor_(ecx, Operand(ecx));
- __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative);
- __ mov(ecx, scratch2);
- __ jmp(&done);
- __ bind(&negative);
- __ sub(ecx, Operand(scratch2));
- __ bind(&done);
- }
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- if (!type_info.IsDouble()) {
- if (!type_info.IsSmi()) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(edx);
- }
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
- }
-
- __ bind(&arg1_is_object);
-
- // Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
- if (!type_info.IsDouble()) {
- // Test if arg2 is a Smi.
- if (!type_info.IsSmi()) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(eax);
- }
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
- }
-
- __ bind(&arg2_is_object);
-
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
// Input: edx, eax are the left and right objects of a bit op.
// Output: eax, ecx are left and right integers for a bit op.
void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
@@ -2996,38 +2398,33 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
Label load_arg2, done;
// Test if arg1 is a Smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
+ __ JumpIfNotSmi(edx, &arg1_is_object);
__ SmiUntag(edx);
__ jmp(&load_arg2);
// If the argument is undefined it converts to zero (ECMA-262, section 9.5).
__ bind(&check_undefined_arg1);
- __ cmp(edx, Factory::undefined_value());
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(edx, factory->undefined_value());
__ j(not_equal, conversion_failure);
__ mov(edx, Immediate(0));
__ jmp(&load_arg2);
__ bind(&arg1_is_object);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, Factory::heap_number_map());
+ __ cmp(ebx, factory->heap_number_map());
__ j(not_equal, &check_undefined_arg1);
// Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm,
- edx,
- TypeInfo::Unknown(),
- use_sse3,
- conversion_failure);
+ IntegerConvert(masm, edx, use_sse3, conversion_failure);
__ mov(edx, ecx);
// Here edx has the untagged integer, eax has a Smi or a heap number.
__ bind(&load_arg2);
// Test if arg2 is a Smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
+ __ JumpIfNotSmi(eax, &arg2_is_object);
__ SmiUntag(eax);
__ mov(ecx, eax);
@@ -3035,39 +2432,23 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
// If the argument is undefined it converts to zero (ECMA-262, section 9.5).
__ bind(&check_undefined_arg2);
- __ cmp(eax, Factory::undefined_value());
+ __ cmp(eax, factory->undefined_value());
__ j(not_equal, conversion_failure);
__ mov(ecx, Immediate(0));
__ jmp(&done);
__ bind(&arg2_is_object);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, Factory::heap_number_map());
+ __ cmp(ebx, factory->heap_number_map());
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm,
- eax,
- TypeInfo::Unknown(),
- use_sse3,
- conversion_failure);
+ IntegerConvert(masm, eax, use_sse3, conversion_failure);
__ bind(&done);
__ mov(eax, edx);
}
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- if (type_info.IsNumber()) {
- LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
- } else {
- LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
- }
-}
-
-
void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
bool use_sse3,
Label* not_int32) {
@@ -3077,12 +2458,11 @@ void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
- NearLabel load_smi, done;
+ Label load_smi, done;
- __ test(number, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, not_taken);
+ __ JumpIfSmi(number, &load_smi, Label::kNear);
__ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&load_smi);
__ SmiUntag(number);
@@ -3095,18 +2475,16 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
- NearLabel load_smi_edx, load_eax, load_smi_eax, done;
+ Label load_smi_edx, load_eax, load_smi_eax, done;
// Load operand in edx into xmm0.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
+ __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
+ __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
@@ -3125,19 +2503,18 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Label* not_numbers) {
- NearLabel load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
+ Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
// Load operand in edx into xmm0, or branch to not_numbers.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
__ j(not_equal, not_numbers); // Argument in edx is not a number.
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1, or branch to not_numbers.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
- __ j(equal, &load_float_eax);
+ __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
+ __ j(equal, &load_float_eax, Label::kNear);
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
@@ -3148,7 +2525,7 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
__ SmiUntag(eax); // Untag smi before converting to float.
__ cvtsi2sd(xmm1, Operand(eax));
__ SmiTag(eax); // Retag smi for heap number overwriting test.
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&load_float_eax);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ bind(&done);
@@ -3189,14 +2566,13 @@ void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register scratch,
ArgLocation arg_location) {
- NearLabel load_smi_1, load_smi_2, done_load_1, done;
+ Label load_smi_1, load_smi_2, done_load_1, done;
if (arg_location == ARGS_IN_REGISTERS) {
__ mov(scratch, edx);
} else {
__ mov(scratch, Operand(esp, 2 * kPointerSize));
}
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_1, not_taken);
+ __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
__ bind(&done_load_1);
@@ -3205,10 +2581,9 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
} else {
__ mov(scratch, Operand(esp, 1 * kPointerSize));
}
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_2, not_taken);
+ __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&load_smi_1);
__ SmiUntag(scratch);
@@ -3248,20 +2623,19 @@ void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
- NearLabel test_other, done;
+ Label test_other, done;
// Test if both operands are floats or smi -> scratch=k_is_float;
// Otherwise scratch = k_not_float.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &test_other, not_taken); // argument in edx is OK
+ __ JumpIfSmi(edx, &test_other, Label::kNear);
__ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(scratch, factory->heap_number_map());
__ j(not_equal, non_float); // argument in edx is not a number -> NaN
__ bind(&test_other);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done); // argument in eax is OK
+ __ JumpIfSmi(eax, &done, Label::kNear);
__ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
+ __ cmp(scratch, factory->heap_number_map());
__ j(not_equal, non_float); // argument in eax is not a number -> NaN
// Fall-through: Both operands are numbers.
@@ -3275,140 +2649,6 @@ void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
}
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done, undo;
-
- if (op_ == Token::SUB) {
- if (include_smi_code_) {
- // Check whether the value is a smi.
- NearLabel try_float;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &try_float, not_taken);
-
- if (negative_zero_ == kStrictNegativeZero) {
- // Go slow case if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ test(eax, Operand(eax));
- __ j(zero, &slow, not_taken);
- }
-
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- __ mov(edx, Operand(eax));
- __ Set(eax, Immediate(0));
- __ sub(eax, Operand(edx));
- __ j(overflow, &undo, not_taken);
- __ StubReturn(1);
-
- // Try floating point case.
- __ bind(&try_float);
- } else if (FLAG_debug_code) {
- __ AbortIfSmi(eax);
- }
-
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &slow);
- if (overwrite_ == UNARY_OVERWRITE) {
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
- } else {
- __ mov(edx, Operand(eax));
- // edx: operand
- __ AllocateHeapNumber(eax, ebx, ecx, &undo);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- }
- } else if (op_ == Token::BIT_NOT) {
- if (include_smi_code_) {
- Label non_smi;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi);
- __ not_(eax);
- __ and_(eax, ~kSmiTagMask); // Remove inverted smi-tag.
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- __ AbortIfSmi(eax);
- }
-
- // Check if the operand is a heap number.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &slow, not_taken);
-
- // Convert the heap number in eax to an untagged integer in ecx.
- IntegerConvert(masm,
- eax,
- TypeInfo::Unknown(),
- CpuFeatures::IsSupported(SSE3),
- &slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- NearLabel try_float;
- __ not_(ecx);
- __ cmp(ecx, 0xc0000000);
- __ j(sign, &try_float, not_taken);
-
- // Tag the result as a smi and we're done.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(eax, Operand(ecx, times_2, kSmiTag));
- __ jmp(&done);
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (overwrite_ == UNARY_NO_OVERWRITE) {
- // Allocate a fresh heap number, but don't overwrite eax until
- // we're sure we can do it without going through the slow case
- // that needs the value in eax.
- __ AllocateHeapNumber(ebx, edx, edi, &slow);
- __ mov(eax, Operand(ebx));
- }
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ecx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- } else {
- UNIMPLEMENTED();
- }
-
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
-
- // Restore eax and go slow case.
- __ bind(&undo);
- __ mov(eax, Operand(edx));
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ pop(ecx); // pop return address.
- __ push(eax);
- __ push(ecx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
// Registers are used as follows:
// edx = base
@@ -3429,10 +2669,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label exponent_nonsmi;
Label base_nonsmi;
// If the exponent is a heap number go to that specific case.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &exponent_nonsmi);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &base_nonsmi);
+ __ JumpIfNotSmi(eax, &exponent_nonsmi);
+ __ JumpIfNotSmi(edx, &base_nonsmi);
// Optimized version when both exponent and base are smis.
Label powi;
@@ -3441,8 +2679,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&powi);
// exponent is smi and base is a heapnumber.
__ bind(&base_nonsmi);
+ Factory* factory = masm->isolate()->factory();
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory->heap_number_map());
__ j(not_equal, &call_runtime);
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
@@ -3457,20 +2696,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ mov(edx, eax);
// Get absolute value of exponent.
- NearLabel no_neg;
+ Label no_neg;
__ cmp(eax, 0);
- __ j(greater_equal, &no_neg);
+ __ j(greater_equal, &no_neg, Label::kNear);
__ neg(eax);
__ bind(&no_neg);
// Load xmm1 with 1.
__ movsd(xmm1, xmm3);
- NearLabel while_true;
- NearLabel no_multiply;
+ Label while_true;
+ Label no_multiply;
__ bind(&while_true);
__ shr(eax, 1);
- __ j(not_carry, &no_multiply);
+ __ j(not_carry, &no_multiply, Label::kNear);
__ mulsd(xmm1, xmm0);
__ bind(&no_multiply);
__ mulsd(xmm0, xmm0);
@@ -3494,24 +2733,23 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// on doubles.
__ bind(&exponent_nonsmi);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory->heap_number_map());
__ j(not_equal, &call_runtime);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
// Test if exponent is nan.
__ ucomisd(xmm1, xmm1);
__ j(parity_even, &call_runtime);
- NearLabel base_not_smi;
- NearLabel handle_special_cases;
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &base_not_smi);
+ Label base_not_smi;
+ Label handle_special_cases;
+ __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
__ SmiUntag(edx);
__ cvtsi2sd(xmm0, Operand(edx));
- __ jmp(&handle_special_cases);
+ __ jmp(&handle_special_cases, Label::kNear);
__ bind(&base_not_smi);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory->heap_number_map());
__ j(not_equal, &call_runtime);
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ and_(ecx, HeapNumber::kExponentMask);
@@ -3522,7 +2760,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// base is in xmm0 and exponent is in xmm1.
__ bind(&handle_special_cases);
- NearLabel not_minus_half;
+ Label not_minus_half;
// Test for -0.5.
// Load xmm2 with -0.5.
__ mov(ecx, Immediate(0xBF000000));
@@ -3530,11 +2768,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cvtss2sd(xmm2, xmm2);
// xmm2 now has -0.5.
__ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half);
+ __ j(not_equal, &not_minus_half, Label::kNear);
// Calculates reciprocal of square root.
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
+ __ xorps(xmm1, xmm1);
__ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
__ divsd(xmm3, xmm1);
@@ -3551,7 +2789,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &call_runtime);
// Calculates square root.
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
+ __ xorps(xmm1, xmm1);
__ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
@@ -3576,21 +2814,20 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Check that the key is a smi.
Label slow;
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
+ __ JumpIfNotSmi(edx, &slow);
// Check if the calling frame is an arguments adaptor frame.
- NearLabel adaptor;
+ Label adaptor;
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
__ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor);
+ __ j(equal, &adaptor, Label::kNear);
// Check index against formal parameters count limit passed in
// through register eax. Use unsigned comparison to get negative
// check for free.
__ cmp(edx, Operand(eax));
- __ j(above_equal, &slow, not_taken);
+ __ j(above_equal, &slow);
// Read the argument from the stack and return it.
STATIC_ASSERT(kSmiTagSize == 1);
@@ -3606,7 +2843,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ bind(&adaptor);
__ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ cmp(edx, Operand(ecx));
- __ j(above_equal, &slow, not_taken);
+ __ j(above_equal, &slow);
// Read the argument from the stack and return it.
STATIC_ASSERT(kSmiTagSize == 1);
@@ -3626,16 +2863,259 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
// esp[0] : return address
// esp[4] : number of parameters
// esp[8] : receiver displacement
- // esp[16] : function
+ // esp[12] : function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &runtime, Label::kNear);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters (tagged)
+ // esp[8] : receiver displacement
+ // esp[12] : function
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
+ // ebx = parameter count (tagged)
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ // TODO(rossberg): Factor out some of the bits that are shared with the other
+ // Generate* functions.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame, Label::kNear);
+
+ // No adaptor, parameter count = argument count.
+ __ mov(ecx, ebx);
+ __ jmp(&try_allocate, Label::kNear);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ // ebx = parameter count (tagged)
+ // ecx = argument count (tagged)
+ // esp[4] = parameter count (tagged)
+ // esp[8] = address of receiver argument
+ // Compute the mapped parameter count = min(ebx, ecx) in ebx.
+ __ cmp(ebx, Operand(ecx));
+ __ j(less_equal, &try_allocate, Label::kNear);
+ __ mov(ebx, ecx);
+
+ __ bind(&try_allocate);
+
+ // Save mapped parameter count.
+ __ push(ebx);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ Label no_parameter_map;
+ __ test(ebx, Operand(ebx));
+ __ j(zero, &no_parameter_map, Label::kNear);
+ __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
+ __ bind(&no_parameter_map);
+
+ // 2. Backing store.
+ __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
+
+ // eax = address of new object(s) (tagged)
+ // ecx = argument count (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Get the arguments boilerplate from the current (global) context into edi.
+ Label has_mapped_parameters, copy;
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ test(ebx, Operand(ebx));
+ __ j(not_zero, &has_mapped_parameters, Label::kNear);
+ __ mov(edi, Operand(edi,
+ Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
+ __ jmp(&copy, Label::kNear);
+
+ __ bind(&has_mapped_parameters);
+ __ mov(edi, Operand(edi,
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
+ __ bind(&copy);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (tagged)
+ // edi = address of boilerplate object (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ mov(edx, FieldOperand(edi, i));
+ __ mov(FieldOperand(eax, i), edx);
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize),
+ edx);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ ecx);
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, edi will point there, otherwise to the
+ // backing store.
+ __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (tagged)
+ // edi = address of parameter map or backing store (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Free a register.
+ __ push(eax);
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ test(ebx, Operand(ebx));
+ __ j(zero, &skip_parameter_map);
+
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(FACTORY->non_strict_arguments_elements_map()));
+ __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
+ __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ push(ecx);
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ add(ebx, Operand(esp, 4 * kPointerSize));
+ __ sub(ebx, Operand(eax));
+ __ mov(ecx, FACTORY->the_hole_value());
+ __ mov(edx, edi);
+ __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
+ // eax = loop variable (tagged)
+ // ebx = mapping index (tagged)
+ // ecx = the hole value
+ // edx = address of parameter map (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = argument count (tagged)
+ // esp[4] = address of new object (tagged)
+ // esp[8] = mapped parameter count (tagged)
+ // esp[16] = parameter count (tagged)
+ // esp[20] = address of receiver argument
+ __ jmp(&parameters_test, Label::kNear);
+
+ __ bind(&parameters_loop);
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
+ __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
+ __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+ __ bind(&parameters_test);
+ __ test(eax, Operand(eax));
+ __ j(not_zero, &parameters_loop, Label::kNear);
+ __ pop(ecx);
+
+ __ bind(&skip_parameter_map);
+
+ // ecx = argument count (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = address of new object (tagged)
+ // esp[4] = mapped parameter count (tagged)
+ // esp[12] = parameter count (tagged)
+ // esp[16] = address of receiver argument
+ // Copy arguments header and remaining slots (if there are any).
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(FACTORY->fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
+ Label arguments_loop, arguments_test;
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
+ __ sub(Operand(edx), ebx); // Is there a smarter way to do negative scaling?
+ __ sub(Operand(edx), ebx);
+ __ jmp(&arguments_test, Label::kNear);
+
+ __ bind(&arguments_loop);
+ __ sub(Operand(edx), Immediate(kPointerSize));
+ __ mov(eax, Operand(edx, 0));
+ __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
+ __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ cmp(ebx, Operand(ecx));
+ __ j(less, &arguments_loop, Label::kNear);
+
+ // Restore.
+ __ pop(eax); // Address of arguments object.
+ __ pop(ebx); // Parameter count.
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ pop(eax); // Remove saved parameter count.
+ __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters
+ // esp[8] : receiver displacement
+ // esp[12] : function
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
@@ -3652,26 +3132,28 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ bind(&adaptor_frame);
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
__ mov(Operand(esp, 2 * kPointerSize), edx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
- NearLabel add_arguments_object;
+ Label add_arguments_object;
__ bind(&try_allocate);
__ test(ecx, Operand(ecx));
- __ j(zero, &add_arguments_object);
+ __ j(zero, &add_arguments_object, Label::kNear);
__ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
+ __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
// Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+ const int offset =
+ Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
__ mov(edi, Operand(edi, offset));
// Copy the JS object part.
@@ -3680,15 +3162,12 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ mov(FieldOperand(eax, i), ebx);
}
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::arguments_callee_index == 0);
- __ mov(ebx, Operand(esp, 3 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
-
// Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::arguments_length_index == 1);
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ ecx);
// If there are no actual arguments, we're done.
Label done;
@@ -3700,16 +3179,17 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
+ __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
+
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
// Untag the length for the loop below.
__ SmiUntag(ecx);
// Copy the fixed array slots.
- NearLabel loop;
+ Label loop;
__ bind(&loop);
__ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
__ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
@@ -3724,7 +3204,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
@@ -3756,18 +3236,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference::address_of_regexp_stack_memory_address(
+ masm->isolate());
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
+ ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
__ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
__ test(ebx, Operand(ebx));
- __ j(zero, &runtime, not_taken);
+ __ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
__ mov(eax, Operand(esp, kJSRegExpOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
+ __ JumpIfSmi(eax, &runtime);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
__ j(not_equal, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
@@ -3801,8 +3281,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// edx: Number of capture registers
// Check that the second argument is a string.
__ mov(eax, Operand(esp, kSubjectOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
+ __ JumpIfSmi(eax, &runtime);
Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
__ j(NegateCondition(is_string), &runtime);
// Get the length of the string to ebx.
@@ -3814,8 +3293,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
__ mov(eax, Operand(esp, kPreviousIndexOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
+ __ JumpIfNotSmi(eax, &runtime);
__ cmp(eax, Operand(ebx));
__ j(above_equal, &runtime);
@@ -3823,14 +3301,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// edx: Number of capture registers
// Check that the fourth object is a JSArray object.
__ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
+ __ JumpIfSmi(eax, &runtime);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
__ j(not_equal, &runtime);
// Check that the JSArray is in fast case.
__ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
__ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(eax, Factory::fixed_array_map());
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(eax, factory->fixed_array_map());
__ j(not_equal, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information.
@@ -3868,7 +3346,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &runtime);
// String is a cons string.
__ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
- __ cmp(Operand(edx), Factory::empty_string());
+ __ cmp(Operand(edx), factory->empty_string());
__ j(not_equal, &runtime);
__ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -3918,11 +3396,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// edx: code
// edi: encoding of subject string (1 if ascii 0 if two_byte);
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->regexp_entry_native(), 1);
- static const int kRegExpExecuteArguments = 7;
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ static const int kRegExpExecuteArguments = 8;
__ EnterApiExitFrame(kRegExpExecuteArguments);
+ // Argument 8: Pass current isolate address.
+ __ mov(Operand(esp, 7 * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
+
// Argument 7: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
@@ -3933,20 +3417,21 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 5: static offsets vector buffer.
__ mov(Operand(esp, 4 * kPointerSize),
- Immediate(ExternalReference::address_of_static_offsets_vector()));
+ Immediate(ExternalReference::address_of_static_offsets_vector(
+ masm->isolate())));
// Argument 4: End of string data
// Argument 3: Start of string data
- NearLabel setup_two_byte, setup_rest;
+ Label setup_two_byte, setup_rest;
__ test(edi, Operand(edi));
__ mov(edi, FieldOperand(eax, String::kLengthOffset));
- __ j(zero, &setup_two_byte);
+ __ j(zero, &setup_two_byte, Label::kNear);
__ SmiUntag(edi);
__ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
__ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
__ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
- __ jmp(&setup_rest);
+ __ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
STATIC_ASSERT(kSmiTag == 0);
@@ -3974,10 +3459,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
__ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
- __ j(equal, &success, taken);
+ __ j(equal, &success);
Label failure;
__ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
- __ j(equal, &failure, taken);
+ __ j(equal, &failure);
__ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
// If not exception it can only be retry. Handle that in the runtime system.
__ j(not_equal, &runtime);
@@ -3985,9 +3470,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
+ ExternalReference pending_exception(Isolate::k_pending_exception_address,
+ masm->isolate());
__ mov(edx,
- Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ Operand::StaticVariable(ExternalReference::the_hole_value_location(
+ masm->isolate())));
__ mov(eax, Operand::StaticVariable(pending_exception));
__ cmp(edx, Operand(eax));
__ j(equal, &runtime);
@@ -3998,7 +3485,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ cmp(eax, Factory::termination_exception());
+ __ cmp(eax, factory->termination_exception());
Label throw_termination_exception;
__ j(equal, &throw_termination_exception);
@@ -4010,7 +3497,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&failure);
// For failure to match, return null.
- __ mov(Operand(eax), Factory::null_value());
+ __ mov(Operand(eax), factory->null_value());
__ ret(4 * kPointerSize);
// Load RegExp data.
@@ -4046,18 +3533,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector();
+ ExternalReference::address_of_static_offsets_vector(masm->isolate());
__ mov(ecx, Immediate(address_of_static_offsets_vector));
// ebx: last_match_info backing store (FixedArray)
// ecx: offsets vector
// edx: number of capture registers
- NearLabel next_capture, done;
+ Label next_capture, done;
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
__ sub(Operand(edx), Immediate(1));
- __ j(negative, &done);
+ __ j(negative, &done, Label::kNear);
// Read the value from the static offsets vector buffer.
__ mov(edi, Operand(ecx, edx, times_int_size, 0));
__ SmiTag(edi);
@@ -4084,10 +3571,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
const int kMaxInlineLength = 100;
Label slowcase;
- NearLabel done;
+ Label done;
__ mov(ebx, Operand(esp, kPointerSize * 3));
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(not_zero, &slowcase);
+ __ JumpIfNotSmi(ebx, &slowcase);
__ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
__ j(above, &slowcase);
// Smi-tagging is equivalent to multiplying by 2.
@@ -4111,7 +3597,8 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
__ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
- __ mov(ecx, Immediate(Factory::empty_fixed_array()));
+ Factory* factory = masm->isolate()->factory();
+ __ mov(ecx, Immediate(factory->empty_fixed_array()));
__ lea(ebx, Operand(eax, JSRegExpResult::kSize));
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
@@ -4134,12 +3621,12 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set map.
__ mov(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(factory->fixed_array_map()));
// Set length.
__ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
// Fill contents of fixed-array with the-hole.
__ SmiUntag(ecx);
- __ mov(edx, Immediate(Factory::the_hole_value()));
+ __ mov(edx, Immediate(factory->the_hole_value()));
__ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
// Fill fixed array elements with hole.
// eax: JSArray.
@@ -4149,7 +3636,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
Label loop;
__ test(ecx, Operand(ecx));
__ bind(&loop);
- __ j(less_equal, &done); // Jump if ecx is negative or zero.
+ __ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero.
__ sub(Operand(ecx), Immediate(1));
__ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
__ jmp(&loop);
@@ -4175,7 +3662,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register scratch = scratch2;
// Load the number string cache.
- ExternalReference roots_address = ExternalReference::roots_address();
+ ExternalReference roots_address =
+ ExternalReference::roots_address(masm->isolate());
__ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
__ mov(number_string_cache,
Operand::StaticArray(scratch, times_pointer_size, roots_address));
@@ -4189,22 +3677,21 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// number string cache for smis is just the smi value, and the hash for
// doubles is the xor of the upper and lower words. See
// Heap::GetNumberStringCache.
- NearLabel smi_hash_calculated;
- NearLabel load_result_from_cache;
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
if (object_is_smi) {
__ mov(scratch, object);
__ SmiUntag(scratch);
} else {
- NearLabel not_smi, hash_calculated;
+ Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
- __ test(object, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smi);
+ __ JumpIfNotSmi(object, &not_smi, Label::kNear);
__ mov(scratch, object);
__ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated);
+ __ jmp(&smi_hash_calculated, Label::kNear);
__ bind(&not_smi);
__ cmp(FieldOperand(object, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ masm->isolate()->factory()->heap_number_map());
__ j(not_equal, not_found);
STATIC_ASSERT(8 == kDoubleSize);
__ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
@@ -4218,8 +3705,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
index,
times_twice_pointer_size,
FixedArray::kHeaderSize));
- __ test(probe, Immediate(kSmiTagMask));
- __ j(zero, not_found);
+ __ JumpIfSmi(probe, not_found);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
@@ -4232,7 +3718,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
}
__ j(parity_even, not_found); // Bail out if NaN is involved.
__ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
+ __ jmp(&load_result_from_cache, Label::kNear);
}
__ bind(&smi_hash_calculated);
@@ -4254,7 +3740,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
index,
times_twice_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->number_to_string_native(), 1);
}
@@ -4290,8 +3777,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
Label non_smi, smi_done;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi, not_taken);
+ __ JumpIfNotSmi(ecx, &non_smi);
__ sub(edx, Operand(eax)); // Return on the result of the subtraction.
__ j(no_overflow, &smi_done);
__ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
@@ -4319,28 +3805,28 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (cc_ != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
- NearLabel check_for_nan;
- __ cmp(edx, Factory::undefined_value());
- __ j(not_equal, &check_for_nan);
+ Label check_for_nan;
+ __ cmp(edx, masm->isolate()->factory()->undefined_value());
+ __ j(not_equal, &check_for_nan, Label::kNear);
__ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
__ ret(0);
__ bind(&check_for_nan);
}
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
// so we do the second best thing - test it ourselves.
// Note: if cc_ != equal, never_nan_nan_ is not used.
if (never_nan_nan_ && (cc_ == equal)) {
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
} else {
- NearLabel heap_number;
+ Label heap_number;
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- __ j(equal, &heap_number);
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(equal, &heap_number, Label::kNear);
if (cc_ != equal) {
// Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &not_identical);
}
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -4369,8 +3855,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ setcc(above_equal, eax);
__ ret(0);
} else {
- NearLabel nan;
- __ j(above_equal, &nan);
+ Label nan;
+ __ j(above_equal, &nan, Label::kNear);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
__ bind(&nan);
@@ -4386,7 +3872,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Non-strict object equality is slower, so it is handled later in the stub.
if (cc_ == equal && strict_) {
Label slow; // Fallthrough label.
- NearLabel not_smis;
+ Label not_smis;
// If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual
@@ -4398,7 +3884,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ mov(ecx, Immediate(kSmiTagMask));
__ and_(ecx, Operand(eax));
__ test(ecx, Operand(edx));
- __ j(not_zero, &not_smis);
+ __ j(not_zero, &not_smis, Label::kNear);
// One operand is a smi.
// Check whether the non-smi is a heap number.
@@ -4413,7 +3899,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Check if the non-smi operand is a heap number.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+ Immediate(masm->isolate()->factory()->heap_number_map()));
// If heap number, handle it in the slow case.
__ j(equal, &slow);
// Return non-equal (ebx is not zero)
@@ -4427,13 +3913,13 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison.
- NearLabel first_non_object;
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &first_non_object);
+ Label first_non_object;
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, &first_non_object, Label::kNear);
// Return non-zero (eax is not zero)
- NearLabel return_not_equal;
+ Label return_not_equal;
STATIC_ASSERT(kHeapObjectTag != 0);
__ bind(&return_not_equal);
__ ret(0);
@@ -4443,7 +3929,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
+ __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -4466,7 +3952,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
+ __ j(parity_even, &unordered);
// Return a result of -1, 0, or 1, based on EFLAGS.
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
@@ -4482,12 +3968,12 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ FCmp();
// Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
+ __ j(parity_even, &unordered);
- NearLabel below_label, above_label;
+ Label below_label, above_label;
// Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, not_taken);
- __ j(above, &above_label, not_taken);
+ __ j(below, &below_label);
+ __ j(above, &above_label);
__ Set(eax, Immediate(0));
__ ret(0);
@@ -4534,12 +4020,20 @@ void CompareStub::Generate(MacroAssembler* masm) {
&check_unequal_objects);
// Inline comparison of ascii strings.
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ if (cc_ == equal) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm,
edx,
eax,
ecx,
- ebx,
- edi);
+ ebx);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ edx,
+ eax,
+ ecx,
+ ebx,
+ edi);
+ }
#ifdef DEBUG
__ Abort("Unexpected fall-through from string comparison");
#endif
@@ -4549,8 +4043,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- NearLabel not_both_objects;
- NearLabel return_unequal;
+ Label not_both_objects;
+ Label return_unequal;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
@@ -4558,20 +4052,20 @@ void CompareStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagMask == 1);
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &not_both_objects);
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
- __ j(below, &not_both_objects);
+ __ j(not_zero, &not_both_objects, Label::kNear);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(below, &not_both_objects, Label::kNear);
+ __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
+ __ j(below, &not_both_objects, Label::kNear);
// We do not bail out after this point. Both are JSObjects, and
// they are equal if and only if both are undetectable.
// The and of the undetectable flags is 1 if and only if they are equal.
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal);
+ __ j(zero, &return_unequal, Label::kNear);
__ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal);
+ __ j(zero, &return_unequal, Label::kNear);
// The objects are both undetectable, so they both compare as the value
// undefined, and are equal.
__ Set(eax, Immediate(EQUAL));
@@ -4609,8 +4103,7 @@ void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
Register object,
Register scratch) {
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, label);
+ __ JumpIfSmi(object, label);
__ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, kIsSymbolMask | kIsNotStringMask);
@@ -4627,31 +4120,22 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
+ // The receiver might implicitly be the global object. This is
+ // indicated by passing the hole as the receiver to the call
+ // function stub.
+ if (ReceiverMightBeImplicit()) {
+ Label call;
// Get the receiver from the stack.
// +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
__ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &receiver_is_value, not_taken);
-
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
- __ j(above_equal, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
-
- __ bind(&receiver_is_js_object);
+ // Call as function is indicated with the hole.
+ __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, &call, Label::kNear);
+ // Patch the receiver on the stack with the global receiver object.
+ __ mov(ebx, GlobalObjectOperand());
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
+ __ bind(&call);
}
// Get the function to call from the stack.
@@ -4659,15 +4143,30 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
// Check that the function really is a JavaScript function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ JumpIfSmi(edi, &slow);
// Goto slow case if we do not have a function.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow, not_taken);
+ __ j(not_equal, &slow);
// Fast-case: Just invoke the function.
ParameterCount actual(argc_);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+ if (ReceiverMightBeImplicit()) {
+ Label call_as_function;
+ __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+ __ j(equal, &call_as_function);
+ __ InvokeFunction(edi,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_METHOD);
+ __ bind(&call_as_function);
+ }
+ __ InvokeFunction(edi,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_FUNCTION);
// Slow-case: Non-function called.
__ bind(&slow);
@@ -4677,11 +4176,17 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ Set(eax, Immediate(argc_));
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(adaptor, RelocInfo::CODE_TARGET);
}
+bool CEntryStub::NeedsImmovableCode() {
+ return false;
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(eax);
}
@@ -4718,7 +4223,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
+ ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
if (always_allocate_scope) {
__ inc(Operand::StaticVariable(scope_depth));
}
@@ -4726,6 +4231,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Call C function.
__ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
__ call(Operand(ebx));
// Result is in eax or edx:eax - do not destroy these registers!
@@ -4736,9 +4243,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Make sure we're not trying to return 'the hole' from the runtime
// call as this may lead to crashes in the IC code later.
if (FLAG_debug_code) {
- NearLabel okay;
- __ cmp(eax, Factory::the_hole_value());
- __ j(not_equal, &okay);
+ Label okay;
+ __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, &okay, Label::kNear);
__ int3();
__ bind(&okay);
}
@@ -4749,20 +4256,21 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ lea(ecx, Operand(eax, 1));
// Lower 2 bits of ecx are 0 iff eax has failure tag.
__ test(ecx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned, not_taken);
+ __ j(zero, &failure_returned);
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ ExternalReference pending_exception_address(
+ Isolate::k_pending_exception_address, masm->isolate());
// Check that there is no pending exception, otherwise we
// should have returned some failure value.
if (FLAG_debug_code) {
__ push(edx);
__ mov(edx, Operand::StaticVariable(
- ExternalReference::the_hole_value_location()));
- NearLabel okay;
+ ExternalReference::the_hole_value_location(masm->isolate())));
+ Label okay;
__ cmp(edx, Operand::StaticVariable(pending_exception_address));
// Cannot use check here as it attempts to generate call into runtime.
- __ j(equal, &okay);
+ __ j(equal, &okay, Label::kNear);
__ int3();
__ bind(&okay);
__ pop(edx);
@@ -4779,21 +4287,22 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// If the returned exception is RETRY_AFTER_GC continue at retry label
STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
__ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, taken);
+ __ j(zero, &retry);
// Special handling of out of memory exceptions.
__ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
__ j(equal, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
+ ExternalReference the_hole_location =
+ ExternalReference::the_hole_value_location(masm->isolate());
__ mov(eax, Operand::StaticVariable(pending_exception_address));
- __ mov(edx,
- Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ mov(edx, Operand::StaticVariable(the_hole_location));
__ mov(Operand::StaticVariable(pending_exception_address), edx);
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ cmp(eax, Factory::termination_exception());
+ __ cmp(eax, masm->isolate()->factory()->termination_exception());
__ j(equal, throw_termination_exception);
// Handle normal exception.
@@ -4893,16 +4402,22 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ push(ebx);
// Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, masm->isolate());
__ push(Operand::StaticVariable(c_entry_fp));
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
+ masm->isolate());
__ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ j(not_equal, &not_outermost_js);
__ mov(Operand::StaticVariable(js_entry_sp), ebp);
+ __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ Label cont;
+ __ jmp(&cont);
__ bind(&not_outermost_js);
+ __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+ __ bind(&cont);
#endif
// Call a faked try-block that does the invoke.
@@ -4910,7 +4425,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Caught exception: Store result (exception) in the pending
// exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Top::k_pending_exception_address);
+ ExternalReference pending_exception(Isolate::k_pending_exception_address,
+ masm->isolate());
__ mov(Operand::StaticVariable(pending_exception), eax);
__ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
__ jmp(&exit);
@@ -4920,8 +4436,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
// Clear any pending exceptions.
- __ mov(edx,
- Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ ExternalReference the_hole_location =
+ ExternalReference::the_hole_value_location(masm->isolate());
+ __ mov(edx, Operand::StaticVariable(the_hole_location));
__ mov(Operand::StaticVariable(pending_exception), edx);
// Fake a receiver (NULL).
@@ -4932,10 +4449,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// cannot store a reference to the trampoline code directly in this
// stub, because the builtin stubs may not have been generated yet.
if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ ExternalReference construct_entry(
+ Builtins::kJSConstructEntryTrampoline,
+ masm->isolate());
__ mov(edx, Immediate(construct_entry));
} else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
+ ExternalReference entry(Builtins::kJSEntryTrampoline,
+ masm->isolate());
__ mov(edx, Immediate(entry));
}
__ mov(edx, Operand(edx, 0)); // deref address
@@ -4943,22 +4463,23 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ call(Operand(edx));
// Unlink this frame from the handler chain.
- __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
- // Pop next_sp.
- __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+ __ PopTryHandler();
+ __ bind(&exit);
#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current EBP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
+ // Check if the current stack frame is marked as the outermost JS frame.
+ __ pop(ebx);
+ __ cmp(Operand(ebx),
+ Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
__ j(not_equal, &not_outermost_js_2);
__ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ bind(&not_outermost_js_2);
#endif
// Restore the top frame descriptor from the stack.
- __ bind(&exit);
- __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
+ __ pop(Operand::StaticVariable(ExternalReference(
+ Isolate::k_c_entry_fp_address,
+ masm->isolate())));
// Restore callee-saved registers (C calling conventions).
__ pop(ebx);
@@ -5007,7 +4528,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
- ExternalReference roots_address = ExternalReference::roots_address();
+ ExternalReference roots_address =
+ ExternalReference::roots_address(masm->isolate());
ASSERT_EQ(object.code(), InstanceofStub::left().code());
ASSERT_EQ(function.code(), InstanceofStub::right().code());
@@ -5020,23 +4542,22 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
// Check that the left hand is a JS object.
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, &not_js_object, not_taken);
+ __ JumpIfSmi(object, &not_js_object);
__ IsObjectJSObjectType(object, map, scratch, &not_js_object);
// If there is a call site cache don't look in the global cache, but do the
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck()) {
// Look up the function and the map in the instanceof cache.
- NearLabel miss;
+ Label miss;
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ cmp(function,
Operand::StaticArray(scratch, times_pointer_size, roots_address));
- __ j(not_equal, &miss);
+ __ j(not_equal, &miss, Label::kNear);
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ cmp(map, Operand::StaticArray(
scratch, times_pointer_size, roots_address));
- __ j(not_equal, &miss);
+ __ j(not_equal, &miss, Label::kNear);
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(eax, Operand::StaticArray(
scratch, times_pointer_size, roots_address));
@@ -5048,8 +4569,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object.
- __ test(prototype, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ JumpIfSmi(prototype, &slow);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
// Update the global instanceof or call site inlined cache with the current
@@ -5079,12 +4599,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain of the object looking for the function
// prototype.
__ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
- NearLabel loop, is_instance, is_not_instance;
+ Label loop, is_instance, is_not_instance;
__ bind(&loop);
__ cmp(scratch, Operand(prototype));
- __ j(equal, &is_instance);
- __ cmp(Operand(scratch), Immediate(Factory::null_value()));
- __ j(equal, &is_not_instance);
+ __ j(equal, &is_instance, Label::kNear);
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(Operand(scratch), Immediate(factory->null_value()));
+ __ j(equal, &is_not_instance, Label::kNear);
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
__ jmp(&loop);
@@ -5097,7 +4618,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
times_pointer_size, roots_address), eax);
} else {
// Get return address and delta to inlined map check.
- __ mov(eax, Factory::true_value());
+ __ mov(eax, factory->true_value());
__ mov(scratch, Operand(esp, 0 * kPointerSize));
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
@@ -5119,7 +4640,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
scratch, times_pointer_size, roots_address), eax);
} else {
// Get return address and delta to inlined map check.
- __ mov(eax, Factory::false_value());
+ __ mov(eax, factory->false_value());
__ mov(scratch, Operand(esp, 0 * kPointerSize));
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
@@ -5137,21 +4658,19 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&not_js_object);
// Before null, smi and string value checks, check that the rhs is a function
// as for a non-function rhs an exception needs to be thrown.
- __ test(function, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ JumpIfSmi(function, &slow);
__ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
- __ j(not_equal, &slow, not_taken);
+ __ j(not_equal, &slow);
// Null is not instance of anything.
- __ cmp(object, Factory::null_value());
+ __ cmp(object, factory->null_value());
__ j(not_equal, &object_not_null);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null);
// Smi values is not instance of anything.
- __ test(object, Immediate(kSmiTagMask));
- __ j(not_zero, &object_not_null_or_smi, not_taken);
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
@@ -5181,13 +4700,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ push(function);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
__ LeaveInternalFrame();
- NearLabel true_value, done;
+ Label true_value, done;
__ test(eax, Operand(eax));
- __ j(zero, &true_value);
- __ mov(eax, Factory::false_value());
- __ jmp(&done);
+ __ j(zero, &true_value, Label::kNear);
+ __ mov(eax, factory->false_value());
+ __ jmp(&done, Label::kNear);
__ bind(&true_value);
- __ mov(eax, Factory::true_value());
+ __ mov(eax, factory->true_value());
__ bind(&done);
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
}
@@ -5222,7 +4741,8 @@ const char* CompareStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* cc_name;
@@ -5277,8 +4797,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
STATIC_ASSERT(kSmiTag == 0);
- __ test(object_, Immediate(kSmiTagMask));
- __ j(zero, receiver_not_string_);
+ __ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
@@ -5289,8 +4808,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the index is non-smi trigger the non-smi case.
STATIC_ASSERT(kSmiTag == 0);
- __ test(index_, Immediate(kSmiTagMask));
- __ j(not_zero, &index_not_smi_);
+ __ JumpIfNotSmi(index_, &index_not_smi_);
// Put smi-tagged index into scratch register.
__ mov(scratch_, index_);
@@ -5315,7 +4833,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// the case we would rather go to the runtime system now to flatten
// the string.
__ cmp(FieldOperand(object_, ConsString::kSecondOffset),
- Immediate(Factory::empty_string()));
+ Immediate(masm->isolate()->factory()->empty_string()));
__ j(not_equal, &call_runtime_);
// Get the first of the two strings and load its instance type.
__ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
@@ -5360,7 +4878,10 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Index is not a smi.
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ __ CheckMap(index_,
+ masm->isolate()->factory()->heap_number_map(),
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_);
@@ -5385,8 +4906,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
STATIC_ASSERT(kSmiTag == 0);
- __ test(scratch_, Immediate(kSmiTagMask));
- __ j(not_zero, index_out_of_range_);
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -5419,9 +4939,10 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ test(code_,
Immediate(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ j(not_zero, &slow_case_, not_taken);
+ __ j(not_zero, &slow_case_);
- __ Set(result_, Immediate(Factory::single_character_string_cache()));
+ Factory* factory = masm->isolate()->factory();
+ __ Set(result_, Immediate(factory->single_character_string_cache()));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiShiftSize == 0);
@@ -5429,8 +4950,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ mov(result_, FieldOperand(result_,
code_, times_half_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(result_, Factory::undefined_value());
- __ j(equal, &slow_case_, not_taken);
+ __ cmp(result_, factory->undefined_value());
+ __ j(equal, &slow_case_);
__ bind(&exit_);
}
@@ -5479,14 +5000,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
+ __ JumpIfSmi(eax, &string_add_runtime);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &string_add_runtime);
// First argument is a a string, test second.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
+ __ JumpIfSmi(edx, &string_add_runtime);
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &string_add_runtime);
} else {
@@ -5509,22 +5028,23 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// eax: first string
// edx: second string
// Check if either of the strings are empty. In that case return the other.
- NearLabel second_not_zero_length, both_not_zero_length;
+ Label second_not_zero_length, both_not_zero_length;
__ mov(ecx, FieldOperand(edx, String::kLengthOffset));
STATIC_ASSERT(kSmiTag == 0);
__ test(ecx, Operand(ecx));
- __ j(not_zero, &second_not_zero_length);
+ __ j(not_zero, &second_not_zero_length, Label::kNear);
// Second string is empty, result is first string which is already in eax.
- __ IncrementCounter(&Counters::string_add_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&second_not_zero_length);
__ mov(ebx, FieldOperand(eax, String::kLengthOffset));
STATIC_ASSERT(kSmiTag == 0);
__ test(ebx, Operand(ebx));
- __ j(not_zero, &both_not_zero_length);
+ __ j(not_zero, &both_not_zero_length, Label::kNear);
// First string is empty, result is second string which is in edx.
__ mov(eax, edx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Both strings are non-empty.
@@ -5539,8 +5059,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
// Handle exceptionally long strings in the runtime system.
__ j(overflow, &string_add_runtime);
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
+ // Use the symbol table when adding two one character strings, as it
+ // helps later optimizations to return a symbol here.
__ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
__ j(not_equal, &longer_than_two);
@@ -5558,7 +5078,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, ebx, ecx, eax, edx, edi,
&make_two_character_string_no_reload, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Allocate a two character string.
@@ -5570,7 +5090,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
__ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
__ bind(&make_two_character_string_no_reload);
- __ IncrementCounter(&Counters::string_add_make_two_char, 1);
+ __ IncrementCounter(counters->string_add_make_two_char(), 1);
__ AllocateAsciiString(eax, // Result.
2, // Length.
edi, // Scratch 1.
@@ -5581,7 +5101,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ or_(ebx, Operand(ecx));
// Set the characters in the new string.
__ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&longer_than_two);
@@ -5612,7 +5132,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
__ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
__ mov(eax, ecx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
@@ -5689,7 +5209,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// edx: first char of second argument
// edi: length of second argument
StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Handle creating a flat two byte result.
@@ -5730,7 +5250,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// edx: first char of second argument
// edi: length of second argument
StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Just jump to runtime to add the two strings.
@@ -5753,8 +5273,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
Label* slow) {
// First check if the argument is already a string.
Label not_string, done;
- __ test(arg, Immediate(kSmiTagMask));
- __ j(zero, &not_string);
+ __ JumpIfSmi(arg, &not_string);
__ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
__ j(below, &done);
@@ -5775,8 +5294,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
// Check if the argument is a safe string wrapper.
__ bind(&not_cached);
- __ test(arg, Immediate(kSmiTagMask));
- __ j(zero, slow);
+ __ JumpIfSmi(arg, slow);
__ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
__ j(not_equal, slow);
__ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
@@ -5795,7 +5313,7 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register count,
Register scratch,
bool ascii) {
- NearLabel loop;
+ Label loop;
__ bind(&loop);
// This loop just copies one character at a time, as it is only used for very
// short strings.
@@ -5842,9 +5360,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
}
// Don't enter the rep movs if there are less than 4 bytes to copy.
- NearLabel last_bytes;
+ Label last_bytes;
__ test(count, Immediate(~3));
- __ j(zero, &last_bytes);
+ __ j(zero, &last_bytes, Label::kNear);
// Copy from edi to esi using rep movs instruction.
__ mov(scratch, count);
@@ -5862,7 +5380,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ j(zero, &done);
// Copy remaining characters.
- NearLabel loop;
+ Label loop;
__ bind(&loop);
__ mov_b(scratch, Operand(src, 0));
__ mov_b(Operand(dest, 0), scratch);
@@ -5888,11 +5406,11 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Make sure that both characters are not digits as such strings has a
// different hash algorithm. Don't try to look for these in the symbol table.
- NearLabel not_array_index;
+ Label not_array_index;
__ mov(scratch, c1);
__ sub(Operand(scratch), Immediate(static_cast<int>('0')));
__ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index);
+ __ j(above, &not_array_index, Label::kNear);
__ mov(scratch, c2);
__ sub(Operand(scratch), Immediate(static_cast<int>('0')));
__ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
@@ -5915,7 +5433,8 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Load the symbol table.
Register symbol_table = c2;
- ExternalReference roots_address = ExternalReference::roots_address();
+ ExternalReference roots_address =
+ ExternalReference::roots_address(masm->isolate());
__ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
__ mov(symbol_table,
Operand::StaticArray(scratch, times_pointer_size, roots_address));
@@ -5955,8 +5474,11 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
SymbolTable::kElementsStartOffset));
// If entry is undefined no string with this hash can be found.
- __ cmp(candidate, Factory::undefined_value());
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(candidate, factory->undefined_value());
__ j(equal, not_found);
+ __ cmp(candidate, factory->null_value());
+ __ j(equal, &next_probe[i]);
// If length is not 2 the string is not a candidate.
__ cmp(FieldOperand(candidate, String::kLengthOffset),
@@ -6046,9 +5568,9 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
__ add(hash, Operand(scratch));
// if (hash == 0) hash = 27;
- NearLabel hash_not_zero;
+ Label hash_not_zero;
__ test(hash, Operand(hash));
- __ j(not_zero, &hash_not_zero);
+ __ j(not_zero, &hash_not_zero, Label::kNear);
__ mov(hash, Immediate(27));
__ bind(&hash_not_zero);
}
@@ -6066,8 +5588,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Make sure first argument is a string.
__ mov(eax, Operand(esp, 3 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
+ __ JumpIfSmi(eax, &runtime);
Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
__ j(NegateCondition(is_string), &runtime);
@@ -6077,11 +5598,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Calculate length of sub string using the smi values.
Label result_longer_than_two;
__ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
+ __ JumpIfNotSmi(ecx, &runtime);
__ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
+ __ JumpIfNotSmi(edx, &runtime);
__ sub(ecx, Operand(edx));
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
Label return_eax;
@@ -6152,7 +5671,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// esi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
__ mov(esi, edx); // Restore esi.
- __ IncrementCounter(&Counters::sub_string_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&non_ascii_flat);
@@ -6193,7 +5713,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(esi, edx); // Restore esi.
__ bind(&return_eax);
- __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
// Just jump to runtime to create the sub string.
@@ -6202,27 +5722,60 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2) {
+ Register length = scratch1;
+
+ // Compare lengths.
+ Label strings_not_equal, check_zero_length;
+ __ mov(length, FieldOperand(left, String::kLengthOffset));
+ __ cmp(length, FieldOperand(right, String::kLengthOffset));
+ __ j(equal, &check_zero_length, Label::kNear);
+ __ bind(&strings_not_equal);
+ __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
+ __ ret(0);
+
+ // Check if the length is zero.
+ Label compare_chars;
+ __ bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(length, Operand(length));
+ __ j(not_zero, &compare_chars, Label::kNear);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ // Compare characters.
+ __ bind(&compare_chars);
+ GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
+ &strings_not_equal, Label::kNear);
+
+ // Characters are equal.
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+}
+
+
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3) {
- Label result_not_equal;
- Label result_greater;
- Label compare_lengths;
-
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_compare_native(), 1);
// Find minimum length.
- NearLabel left_shorter;
+ Label left_shorter;
__ mov(scratch1, FieldOperand(left, String::kLengthOffset));
__ mov(scratch3, scratch1);
__ sub(scratch3, FieldOperand(right, String::kLengthOffset));
Register length_delta = scratch3;
- __ j(less_equal, &left_shorter);
+ __ j(less_equal, &left_shorter, Label::kNear);
// Right string is shorter. Change scratch1 to be length of right string.
__ sub(scratch1, Operand(length_delta));
__ bind(&left_shorter);
@@ -6230,41 +5783,19 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register min_length = scratch1;
// If either length is zero, just compare lengths.
+ Label compare_lengths;
__ test(min_length, Operand(min_length));
- __ j(zero, &compare_lengths);
-
- // Change index to run from -min_length to -1 by adding min_length
- // to string start. This means that loop ends when index reaches zero,
- // which doesn't need an additional compare.
- __ SmiUntag(min_length);
- __ lea(left,
- FieldOperand(left,
- min_length, times_1,
- SeqAsciiString::kHeaderSize));
- __ lea(right,
- FieldOperand(right,
- min_length, times_1,
- SeqAsciiString::kHeaderSize));
- __ neg(min_length);
-
- Register index = min_length; // index = -min_length;
+ __ j(zero, &compare_lengths, Label::kNear);
- {
- // Compare loop.
- NearLabel loop;
- __ bind(&loop);
- // Compare characters.
- __ mov_b(scratch2, Operand(left, index, times_1, 0));
- __ cmpb(scratch2, Operand(right, index, times_1, 0));
- __ j(not_equal, &result_not_equal);
- __ add(Operand(index), Immediate(1));
- __ j(not_zero, &loop);
- }
+ // Compare characters.
+ Label result_not_equal;
+ GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
+ &result_not_equal, Label::kNear);
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
__ test(length_delta, Operand(length_delta));
- __ j(not_zero, &result_not_equal);
+ __ j(not_zero, &result_not_equal, Label::kNear);
// Result is EQUAL.
STATIC_ASSERT(EQUAL == 0);
@@ -6272,8 +5803,9 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
+ Label result_greater;
__ bind(&result_not_equal);
- __ j(greater, &result_greater);
+ __ j(greater, &result_greater, Label::kNear);
// Result is LESS.
__ Set(eax, Immediate(Smi::FromInt(LESS)));
@@ -6286,6 +5818,36 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
}
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch,
+ Label* chars_not_equal,
+ Label::Distance chars_not_equal_near) {
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ lea(left,
+ FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(right,
+ FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+ __ neg(length);
+ Register index = length; // index = -length;
+
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ __ mov_b(scratch, Operand(left, index, times_1, 0));
+ __ cmpb(scratch, Operand(right, index, times_1, 0));
+ __ j(not_equal, chars_not_equal, chars_not_equal_near);
+ __ add(Operand(index), Immediate(1));
+ __ j(not_zero, &loop);
+}
+
+
void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -6297,13 +5859,13 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ mov(edx, Operand(esp, 2 * kPointerSize)); // left
__ mov(eax, Operand(esp, 1 * kPointerSize)); // right
- NearLabel not_same;
+ Label not_same;
__ cmp(edx, Operand(eax));
- __ j(not_equal, &not_same);
+ __ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&not_same);
@@ -6325,74 +5887,20 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
-void StringCharAtStub::Generate(MacroAssembler* masm) {
- // Expects two arguments (object, index) on the stack:
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: index
- // esp[8]: object
-
- Register object = ebx;
- Register index = eax;
- Register scratch1 = ecx;
- Register scratch2 = edx;
- Register result = eax;
-
- __ pop(scratch1); // Return address.
- __ pop(index);
- __ pop(object);
- __ push(scratch1);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ Set(result, Immediate(Factory::empty_string()));
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Set(result, Immediate(Smi::FromInt(0)));
- __ jmp(&done);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm, call_helper);
-
- __ bind(&done);
- __ ret(0);
-}
-
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
- NearLabel miss;
+ Label miss;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, not_taken);
+ __ JumpIfNotSmi(ecx, &miss, Label::kNear);
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
__ sub(eax, Operand(edx));
} else {
- NearLabel done;
+ Label done;
__ sub(edx, Operand(eax));
- __ j(no_overflow, &done);
+ __ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
__ not_(edx);
__ bind(&done);
@@ -6408,18 +5916,17 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::HEAP_NUMBERS);
- NearLabel generic_stub;
- NearLabel unordered;
- NearLabel miss;
+ Label generic_stub;
+ Label unordered;
+ Label miss;
__ mov(ecx, Operand(edx));
__ and_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &generic_stub, not_taken);
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss, Label::kNear);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss, Label::kNear);
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or SS2 or CMOV is unsupported.
@@ -6435,7 +5942,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
+ __ j(parity_even, &unordered, Label::kNear);
// Return a result of -1, 0, or 1, based on EFLAGS.
// Performing mov, because xor would destroy the flag register.
@@ -6458,18 +5965,138 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::SYMBOLS);
+ ASSERT(GetCondition() == equal);
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+ Register tmp1 = ecx;
+ Register tmp2 = ebx;
+
+ // Check that both operands are heap objects.
+ Label miss;
+ __ mov(tmp1, Operand(left));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ and_(tmp1, Operand(right));
+ __ JumpIfSmi(tmp1, &miss, Label::kNear);
+
+ // Check that both operands are symbols.
+ __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(tmp1, Operand(tmp2));
+ __ test(tmp1, Immediate(kIsSymbolMask));
+ __ j(zero, &miss, Label::kNear);
+
+ // Symbols are compared by identity.
+ Label done;
+ __ cmp(left, Operand(right));
+ // Make sure eax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(eax));
+ __ j(not_equal, &done, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ bind(&done);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(GetCondition() == equal);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+ Register tmp1 = ecx;
+ Register tmp2 = ebx;
+ Register tmp3 = edi;
+
+ // Check that both operands are heap objects.
+ __ mov(tmp1, Operand(left));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ and_(tmp1, Operand(right));
+ __ JumpIfSmi(tmp1, &miss);
+
+ // Check that both operands are strings. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ mov(tmp3, tmp1);
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ or_(tmp3, Operand(tmp2));
+ __ test(tmp3, Immediate(kIsNotStringMask));
+ __ j(not_zero, &miss);
+
+ // Fast check for identical strings.
+ Label not_same;
+ __ cmp(left, Operand(right));
+ __ j(not_equal, &not_same, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ // Handle not identical strings.
+ __ bind(&not_same);
+
+ // Check that both strings are symbols. If they are, we're done
+ // because we already know they are not identical.
+ Label do_compare;
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(tmp1, Operand(tmp2));
+ __ test(tmp1, Immediate(kIsSymbolMask));
+ __ j(zero, &do_compare, Label::kNear);
+ // Make sure eax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(eax));
+ __ ret(0);
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ bind(&do_compare);
+ __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2);
+
+ // Handle more complex cases in runtime.
+ __ bind(&runtime);
+ __ pop(tmp1); // Return address.
+ __ push(left);
+ __ push(right);
+ __ push(tmp1);
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::OBJECTS);
- NearLabel miss;
+ Label miss;
__ mov(ecx, Operand(edx));
__ and_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ JumpIfSmi(ecx, &miss, Label::kNear);
__ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss, Label::kNear);
__ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
__ sub(eax, Operand(edx));
@@ -6488,7 +6115,8 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ push(ecx);
// Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+ masm->isolate());
__ EnterInternalFrame();
__ push(edx);
__ push(eax);
@@ -6510,145 +6138,215 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
}
-// Loads a indexed element from a pixel array.
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register untagged_key,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key and is unchanged (must be a smi).
- // elements - is set to the the receiver's element if
- // the receiver doesn't have a pixel array or the
- // key is not a smi, otherwise it's the elements'
- // external pointer.
- // untagged_key - is set to the untagged key
-
- // Some callers already have verified that the key is a smi. key_not_smi is
- // set to NULL as a sentinel for that case. Otherwise, add an explicit check
- // to ensure the key is a smi must be added.
- if (key_not_smi != NULL) {
- __ JumpIfNotSmi(key, key_not_smi);
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(key);
- }
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ String* name,
+ Register r0) {
+ ASSERT(name->IsSymbol());
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = r0;
+ // Capacity is smi 2^n.
+ __ mov(index, FieldOperand(properties, kCapacityOffset));
+ __ dec(index);
+ __ and_(Operand(index),
+ Immediate(Smi::FromInt(name->Hash() +
+ StringDictionary::GetProbeOffset(i))));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+ Register entity_name = r0;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
+ __ j(equal, done);
+
+ // Stop if found the property.
+ __ cmp(entity_name, Handle<String>(name));
+ __ j(equal, miss);
+
+ // Check if the entry name is not a symbol.
+ __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ kIsSymbolMask);
+ __ j(zero, miss);
}
- __ mov(untagged_key, key);
- __ SmiUntag(untagged_key);
-
- __ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- // By passing NULL as not_pixel_array, callers signal that they have already
- // verified that the receiver has pixel array elements.
- if (not_pixel_array != NULL) {
- __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
- } else {
- if (FLAG_debug_code) {
- // Map check should have already made sure that elements is a pixel array.
- __ cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(Factory::pixel_array_map()));
- __ Assert(equal, "Elements isn't a pixel array");
+
+ StringDictionaryLookupStub stub(properties,
+ r0,
+ r0,
+ StringDictionaryLookupStub::NEGATIVE_LOOKUP);
+ __ push(Immediate(Handle<Object>(name)));
+ __ push(Immediate(name->Hash()));
+ MaybeObject* result = masm->TryCallStub(&stub);
+ if (result->IsFailure()) return result;
+ __ test(r0, Operand(r0));
+ __ j(not_zero, miss);
+ __ jmp(done);
+ return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r0|. Jump to the |miss| label
+// otherwise.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1) {
+ // Assert that name contains a string.
+ if (FLAG_debug_code) __ AbortIfNotString(name);
+
+ __ mov(r1, FieldOperand(elements, kCapacityOffset));
+ __ shr(r1, kSmiTagSize); // convert smi to int
+ __ dec(r1);
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
+ __ shr(r0, String::kHashShift);
+ if (i > 0) {
+ __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
}
+ __ and_(r0, Operand(r1));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
+
+ // Check if the key is identical to the name.
+ __ cmp(name, Operand(elements,
+ r0,
+ times_4,
+ kElementsStartOffset - kHeapObjectTag));
+ __ j(equal, done);
}
- // Key must be in range.
- __ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
- __ j(above_equal, out_of_range); // unsigned check handles negative keys.
-
- // Perform the indexed load and tag the result as a smi.
- __ mov(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
- __ movzx_b(result, Operand(elements, untagged_key, times_1, 0));
- __ SmiTag(result);
- __ ret(0);
+ StringDictionaryLookupStub stub(elements,
+ r1,
+ r0,
+ POSITIVE_LOOKUP);
+ __ push(name);
+ __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
+ __ shr(r0, String::kHashShift);
+ __ push(r0);
+ __ CallStub(&stub);
+
+ __ test(r1, Operand(r1));
+ __ j(zero, miss);
+ __ jmp(done);
}
-// Stores an indexed element into a pixel array, clamping the stored value.
-void GenerateFastPixelArrayStore(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register elements,
- Register scratch1,
- bool load_elements_from_receiver,
- Label* key_not_smi,
- Label* value_not_smi,
- Label* not_pixel_array,
- Label* out_of_range) {
- // Register use:
- // receiver - holds the receiver and is unchanged unless the
- // store succeeds.
- // key - holds the key (must be a smi) and is unchanged.
- // value - holds the value (must be a smi) and is unchanged.
- // elements - holds the element object of the receiver on entry if
- // load_elements_from_receiver is false, otherwise used
- // internally to store the pixel arrays elements and
- // external array pointer.
- //
- // receiver, key and value remain unmodified until it's guaranteed that the
- // store will succeed.
- Register external_pointer = elements;
- Register untagged_key = scratch1;
- Register untagged_value = receiver; // Only set once success guaranteed.
-
- // Fetch the receiver's elements if the caller hasn't already done so.
- if (load_elements_from_receiver) {
- __ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- }
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // Stack frame on entry:
+ // esp[0 * kPointerSize]: return address.
+ // esp[1 * kPointerSize]: key's hash.
+ // esp[2 * kPointerSize]: key.
+ // Registers:
+ // dictionary_: StringDictionary to probe.
+ // result_: used as scratch.
+ // index_: will hold an index of entry if lookup is successful.
+ // might alias with result_.
+ // Returns:
+ // result_ is zero if lookup failed, non zero otherwise.
- // By passing NULL as not_pixel_array, callers signal that they have already
- // verified that the receiver has pixel array elements.
- if (not_pixel_array != NULL) {
- __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
- } else {
- if (FLAG_debug_code) {
- // Map check should have already made sure that elements is a pixel array.
- __ cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(Factory::pixel_array_map()));
- __ Assert(equal, "Elements isn't a pixel array");
- }
- }
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
- // Some callers already have verified that the key is a smi. key_not_smi is
- // set to NULL as a sentinel for that case. Otherwise, add an explicit check
- // to ensure the key is a smi must be added.
- if (key_not_smi != NULL) {
- __ JumpIfNotSmi(key, key_not_smi);
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(key);
+ Register scratch = result_;
+
+ __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
+ __ dec(scratch);
+ __ SmiUntag(scratch);
+ __ push(scratch);
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ if (i > 0) {
+ __ add(Operand(scratch),
+ Immediate(StringDictionary::GetProbeOffset(i)));
+ }
+ __ and_(scratch, Operand(esp, 0));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ mov(scratch, Operand(dictionary_,
+ index_,
+ times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ cmp(scratch, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &not_in_dictionary);
+
+ // Stop if found the property.
+ __ cmp(scratch, Operand(esp, 3 * kPointerSize));
+ __ j(equal, &in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // If we hit a non symbol key during negative lookup
+ // we have to bailout as this key might be equal to the
+ // key we are looking for.
+
+ // Check if the entry name is not a symbol.
+ __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ kIsSymbolMask);
+ __ j(zero, &maybe_in_dictionary);
}
}
- // Key must be a smi and it must be in range.
- __ mov(untagged_key, key);
- __ SmiUntag(untagged_key);
- __ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
- __ j(above_equal, out_of_range); // unsigned check handles negative keys.
-
- // Value must be a smi.
- __ JumpIfNotSmi(value, value_not_smi);
- __ mov(untagged_value, value);
- __ SmiUntag(untagged_value);
-
- { // Clamp the value to [0..255].
- NearLabel done;
- __ test(untagged_value, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, untagged_value); // 1 if negative, 0 if positive.
- __ dec_b(untagged_value); // 0 if negative, 255 if positive.
- __ bind(&done);
+ __ bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ mov(result_, Immediate(0));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
}
- __ mov(external_pointer,
- FieldOperand(elements, PixelArray::kExternalPointerOffset));
- __ mov_b(Operand(external_pointer, untagged_key, times_1, 0), untagged_value);
- __ ret(0); // Return value in eax.
+ __ bind(&in_dictionary);
+ __ mov(result_, Immediate(1));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&not_in_dictionary);
+ __ mov(result_, Immediate(0));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 454bfa0a3..d51549d54 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -45,8 +45,8 @@ class TranscendentalCacheStub: public CodeStub {
UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
};
- explicit TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
+ TranscendentalCacheStub(TranscendentalCache::Type type,
+ ArgumentType argument_type)
: type_(type), argument_type_(argument_type) {}
void Generate(MacroAssembler* masm);
private:
@@ -60,89 +60,28 @@ class TranscendentalCacheStub: public CodeStub {
};
-class ToBooleanStub: public CodeStub {
+class UnaryOpStub: public CodeStub {
public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags,
- TypeInfo operands_type)
+ UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
: op_(op),
mode_(mode),
- flags_(flags),
- args_in_registers_(false),
- args_reversed_(false),
- static_operands_type_(operands_type),
- runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
+ operand_type_(UnaryOpIC::UNINITIALIZED),
name_(NULL) {
- if (static_operands_type_.IsSmi()) {
- mode_ = NO_OVERWRITE;
- }
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
+ UnaryOpStub(int key, UnaryOpIC::TypeInfo operand_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
- flags_(FlagBits::decode(key)),
- args_in_registers_(ArgsInRegistersBits::decode(key)),
- args_reversed_(ArgsReversedBits::decode(key)),
- use_sse3_(SSE3Bits::decode(key)),
- static_operands_type_(TypeInfo::ExpandedRepresentation(
- StaticTypeInfoBits::decode(key))),
- runtime_operands_type_(runtime_operands_type),
+ operand_type_(operand_type),
name_(NULL) {
}
- // Generate code to call the stub with the supplied arguments. This will add
- // code at the call site to prepare arguments either in registers or on the
- // stack together with the actual call.
- void GenerateCall(MacroAssembler* masm, Register left, Register right);
- void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
- void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
- bool ArgsInRegistersSupported() {
- return op_ == Token::ADD || op_ == Token::SUB
- || op_ == Token::MUL || op_ == Token::DIV;
- }
-
- void SetArgsInRegisters() {
- ASSERT(ArgsInRegistersSupported());
- args_in_registers_ = true;
- }
-
private:
Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
- bool args_in_registers_; // Arguments passed in registers not on the stack.
- bool args_reversed_; // Left and right argument are swapped.
- bool use_sse3_;
-
- // Number type information of operands, determined by code generator.
- TypeInfo static_operands_type_;
+ UnaryOverwriteMode mode_;
// Operand type information determined at runtime.
- BinaryOpIC::TypeInfo runtime_operands_type_;
+ UnaryOpIC::TypeInfo operand_type_;
char* name_;
@@ -150,99 +89,86 @@ class GenericBinaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
- PrintF("GenericBinaryOpStub %d (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
+ PrintF("TypeRecordingUnaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
- static_cast<int>(flags_),
- static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_),
- static_operands_type_.ToString());
+ UnaryOpIC::GetName(operand_type_));
}
#endif
- // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class SSE3Bits: public BitField<bool, 9, 1> {};
- class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
- class ArgsReversedBits: public BitField<bool, 11, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
- class StaticTypeInfoBits: public BitField<int, 13, 3> {};
- class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
+ class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+ class OpBits: public BitField<Token::Value, 1, 7> {};
+ class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
- Major MajorKey() { return GenericBinaryOp; }
+ Major MajorKey() { return UnaryOp; }
int MinorKey() {
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | SSE3Bits::encode(use_sse3_)
- | ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_)
- | StaticTypeInfoBits::encode(
- static_operands_type_.ThreeBitRepresentation())
- | RuntimeTypeInfoBits::encode(runtime_operands_type_);
+ return ModeBits::encode(mode_)
+ | OpBits::encode(op_)
+ | OperandTypeInfoBits::encode(operand_type_);
}
+ // Note: A lot of the helper functions below will vanish when we use virtual
+ // function instead of switch more often.
void Generate(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- bool IsOperationCommutative() {
- return (op_ == Token::ADD) || (op_ == Token::MUL);
- }
+ void GenerateTypeTransition(MacroAssembler* masm);
- void SetArgsReversed() { args_reversed_ = true; }
- bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgsInRegisters() { return args_in_registers_; }
- bool HasArgsReversed() { return args_reversed_; }
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateSmiStubSub(MacroAssembler* masm);
+ void GenerateSmiStubBitNot(MacroAssembler* masm);
+ void GenerateSmiCodeSub(MacroAssembler* masm,
+ Label* non_smi,
+ Label* undo,
+ Label* slow,
+ Label::Distance non_smi_near = Label::kFar,
+ Label::Distance undo_near = Label::kFar,
+ Label::Distance slow_near = Label::kFar);
+ void GenerateSmiCodeBitNot(MacroAssembler* masm,
+ Label* non_smi,
+ Label::Distance non_smi_near = Label::kFar);
+ void GenerateSmiCodeUndo(MacroAssembler* masm);
- bool ShouldGenerateSmiCode() {
- return HasSmiCodeInStub() &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateHeapNumberStubSub(MacroAssembler* masm);
+ void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+ void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateGenericStubSub(MacroAssembler* masm);
+ void GenerateGenericStubBitNot(MacroAssembler* masm);
+ void GenerateGenericCodeFallback(MacroAssembler* masm);
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+ virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
+ return UnaryOpIC::ToState(operand_type_);
}
virtual void FinishCode(Code* code) {
- code->set_binary_op_type(runtime_operands_type_);
+ code->set_unary_op_type(operand_type_);
}
-
- friend class CodeGenerator;
};
-class TypeRecordingBinaryOpStub: public CodeStub {
+class BinaryOpStub: public CodeStub {
public:
- TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
- operands_type_(TRBinaryOpIC::UNINITIALIZED),
- result_type_(TRBinaryOpIC::UNINITIALIZED),
+ operands_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED),
name_(NULL) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- TypeRecordingBinaryOpStub(
+ BinaryOpStub(
int key,
- TRBinaryOpIC::TypeInfo operands_type,
- TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+ BinaryOpIC::TypeInfo operands_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_sse3_(SSE3Bits::decode(key)),
@@ -261,8 +187,8 @@ class TypeRecordingBinaryOpStub: public CodeStub {
bool use_sse3_;
// Operand type information determined at runtime.
- TRBinaryOpIC::TypeInfo operands_type_;
- TRBinaryOpIC::TypeInfo result_type_;
+ BinaryOpIC::TypeInfo operands_type_;
+ BinaryOpIC::TypeInfo result_type_;
char* name_;
@@ -270,12 +196,12 @@ class TypeRecordingBinaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
- PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+ PrintF("BinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
- TRBinaryOpIC::GetName(operands_type_));
+ BinaryOpIC::GetName(operands_type_));
}
#endif
@@ -283,10 +209,10 @@ class TypeRecordingBinaryOpStub: public CodeStub {
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class SSE3Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+ class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+ class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
- Major MajorKey() { return TypeRecordingBinaryOp; }
+ Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
@@ -308,6 +234,7 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
@@ -316,15 +243,15 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
- virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
- return TRBinaryOpIC::ToState(operands_type_);
+ return BinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Code* code) {
- code->set_type_recording_binary_op_type(operands_type_);
- code->set_type_recording_binary_op_result_type(result_type_);
+ code->set_binary_op_type(operands_type_);
+ code->set_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
@@ -438,11 +365,9 @@ class SubStringStub: public CodeStub {
class StringCompareStub: public CodeStub {
public:
- explicit StringCompareStub() {
- }
+ StringCompareStub() { }
- // Compare two flat ascii strings and returns result in eax after popping two
- // arguments from the stack.
+ // Compares two flat ASCII strings and returns result in eax.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
@@ -450,11 +375,27 @@ class StringCompareStub: public CodeStub {
Register scratch2,
Register scratch3);
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
+ // Compares two flat ASCII strings for equality and returns result
+ // in eax.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2);
- void Generate(MacroAssembler* masm);
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch,
+ Label* chars_not_equal,
+ Label::Distance chars_not_equal_near = Label::kFar);
};
@@ -491,46 +432,73 @@ class NumberToStringStub: public CodeStub {
};
-// Generate code to load an element from a pixel array. The receiver is assumed
-// to not be a smi and to have elements, the caller must guarantee this
-// precondition. If key is not a smi, then the generated code branches to
-// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
-// check has already been performed on key so that the smi check is not
-// generated. If key is not a valid index within the bounds of the pixel array,
-// the generated code jumps to out_of_range. receiver, key and elements are
-// unchanged throughout the generated code sequence.
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register untagged_key,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range);
-
-// Generate code to store an element into a pixel array, clamping values between
-// [0..255]. The receiver is assumed to not be a smi and to have elements, the
-// caller must guarantee this precondition. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated. If the value is not a smi, the generated
-// code will branch to value_not_smi. If the receiver doesn't have pixel array
-// elements, the generated code will branch to not_pixel_array, unless
-// not_pixel_array is NULL, in which case the caller must ensure that the
-// receiver has pixel array elements. If key is not a valid index within the
-// bounds of the pixel array, the generated code jumps to out_of_range.
-void GenerateFastPixelArrayStore(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register elements,
- Register scratch1,
- bool load_elements_from_receiver,
- Label* key_not_smi,
- Label* value_not_smi,
- Label* not_pixel_array,
- Label* out_of_range);
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ StringDictionaryLookupStub(Register dictionary,
+ Register result,
+ Register index,
+ LookupMode mode)
+ : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ String* name,
+ Register r0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1);
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("StringDictionaryLookupStub\n");
+ }
+#endif
+
+ Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+ int MinorKey() {
+ return DictionaryBits::encode(dictionary_.code()) |
+ ResultBits::encode(result_.code()) |
+ IndexBits::encode(index_.code()) |
+ LookupModeBits::encode(mode_);
+ }
+
+ class DictionaryBits: public BitField<int, 0, 3> {};
+ class ResultBits: public BitField<int, 3, 3> {};
+ class IndexBits: public BitField<int, 6, 3> {};
+ class LookupModeBits: public BitField<LookupMode, 9, 1> {};
+
+ Register dictionary_;
+ Register result_;
+ Register index_;
+ LookupMode mode_;
+};
+
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/codegen-ia32-inl.h b/deps/v8/src/ia32/codegen-ia32-inl.h
deleted file mode 100644
index 49c706d13..000000000
--- a/deps/v8/src/ia32/codegen-ia32-inl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_IA32_CODEGEN_IA32_INL_H_
-#define V8_IA32_CODEGEN_IA32_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_CODEGEN_IA32_INL_H_
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 3a2753d27..572c36c88 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,81 +29,15 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "codegen-inl.h"
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
+#include "codegen.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
-
-// -------------------------------------------------------------------------
-// Platform-specific FrameRegisterState functions.
-
-void FrameRegisterState::Save(MacroAssembler* masm) const {
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int action = registers_[i];
- if (action == kPush) {
- __ push(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
- __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
- }
- }
-}
-
-
-void FrameRegisterState::Restore(MacroAssembler* masm) const {
- // Restore registers in reverse order due to the stack.
- for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
- int action = registers_[i];
- if (action == kPush) {
- __ pop(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore) {
- action &= ~kSyncedFlag;
- __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
- }
- }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
- frame_state_.Save(masm_);
-}
-
-
-void DeferredCode::RestoreRegisters() {
- frame_state_.Restore(masm_);
-}
-
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- frame_state_->Save(masm);
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- frame_state_->Restore(masm);
-}
-
-
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterInternalFrame();
}
@@ -114,10036 +48,21 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
}
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- destination_(NULL),
- previous_(NULL) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
- ControlDestination* destination)
- : owner_(owner),
- destination_(destination),
- previous_(owner->state()) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- info_(NULL),
- frame_(NULL),
- allocator_(NULL),
- state_(NULL),
- loop_nesting_(0),
- in_safe_int32_mode_(false),
- safe_int32_mode_enabled_(true),
- function_return_is_shadowed_(false),
- in_spilled_code_(false),
- jit_cookie_((FLAG_mask_constants_with_cookie) ? V8::RandomPrivate() : 0) {
-}
-
-
-// Calling conventions:
-// ebp: caller's frame pointer
-// esp: stack pointer
-// edi: called JS function
-// esi: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- // Record the position for debugging purposes.
- CodeForFunctionPosition(info->function());
- Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
- // Initialize state.
- info_ = info;
- ASSERT(allocator_ == NULL);
- RegisterAllocator register_allocator(this);
- allocator_ = &register_allocator;
- ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame();
- set_in_spilled_code(false);
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(0, loop_nesting_);
- loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
- JumpTarget::set_compiling_deferred_code(false);
-
- {
- CodeGenState state(this);
-
- // Entry:
- // Stack: receiver, arguments, return address.
- // ebp: caller's frame pointer
- // esp: stack pointer
- // edi: called JS function
- // esi: callee's context
- allocator_->Initialize();
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- frame_->SpillAll();
- __ int3();
- }
-#endif
-
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
-
- // Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
-
- // Update context local.
- frame_->SaveContextRegister();
-
- // Verify that the runtime call result and esi agree.
- if (FLAG_debug_code) {
- __ cmp(context.reg(), Operand(esi));
- __ Assert(equal, "Runtime::NewContext should end up in esi");
- }
- }
-
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope()->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ mov(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
- }
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
-
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
- }
-
-
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
-
- // Generate code to 'execute' declarations and initialize functions
- // (source elements). In case of an illegal redeclaration we need to
- // handle that instead of processing the declarations.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ illegal redeclarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope()->declarations());
- // Bail out if a stack-overflow exception occurred when processing
- // declarations.
- if (HasStackOverflow()) return;
- }
-
- if (FLAG_trace) {
- frame_->CallRuntime(Runtime::kTraceEnter, 0);
- // Ignore the return value.
- }
- CheckStack();
-
- // Compile the body of the function in a vanilla state. Don't
- // bother compiling all the code if the scope has an illegal
- // redeclaration.
- if (!scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
- bool is_builtin = Bootstrapper::IsActive();
- bool should_trace =
- is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
- if (should_trace) {
- frame_->CallRuntime(Runtime::kDebugTrace, 0);
- // Ignore the return value.
- }
-#endif
- VisitStatements(info->function()->body());
-
- // Handle the return from the function.
- if (has_valid_frame()) {
- // If there is a valid frame, control flow can fall off the end of
- // the body. In that case there is an implicit return statement.
- ASSERT(!function_return_is_shadowed_);
- CodeForReturnPosition(info->function());
- frame_->PrepareForReturn();
- Result undefined(Factory::undefined_value());
- if (function_return_.is_bound()) {
- function_return_.Jump(&undefined);
- } else {
- function_return_.Bind(&undefined);
- GenerateReturnSequence(&undefined);
- }
- } else if (function_return_.is_linked()) {
- // If the return target has dangling jumps to it, then we have not
- // yet generated the return sequence. This can happen when (a)
- // control does not flow off the end of the body so we did not
- // compile an artificial return statement just above, and (b) there
- // are return statements in the body but (c) they are all shadowed.
- Result return_value;
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
- }
- }
- }
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
- loop_nesting_ = 0;
-
- // Code generation state must be reset.
- ASSERT(state_ == NULL);
- ASSERT(!function_return_is_shadowed_);
- function_return_.Unuse();
- DeleteFrame();
-
- // Process any deferred code using the register allocator.
- if (!HasStackOverflow()) {
- JumpTarget::set_compiling_deferred_code(true);
- ProcessDeferred();
- JumpTarget::set_compiling_deferred_code(false);
- }
-
- // There is no need to delete the register allocator, it is a
- // stack-allocated local.
- allocator_ = NULL;
-}
-
-
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return frame_->ParameterAt(index);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(esi)); // do not overwrite context register
- Register context = esi;
- int chain_length = scope()->ContextChainLength(slot->var()->scope());
- for (int i = 0; i < chain_length; i++) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
-
- default:
- UNREACHABLE();
- return Operand(eax);
- }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- ASSERT(tmp.is_register());
- Register context = esi;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- }
- // Check that last extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal, not_taken);
- __ mov(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp.reg(), slot->index());
-}
-
-
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* expr,
- ControlDestination* dest,
- bool force_control) {
- ASSERT(!in_spilled_code());
- int original_height = frame_->height();
-
- { CodeGenState new_state(this, dest);
- Visit(expr);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- !dest->is_used() &&
- frame_->height() == original_height) {
- dest->Goto(true);
- }
- }
-
- if (force_control && !dest->is_used()) {
- // Convert the TOS value into flow to the control destination.
- ToBoolean(dest);
- }
-
- ASSERT(!(force_control && !dest->is_used()));
- ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Load(expression);
- frame_->SpillAll();
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::LoadInSafeInt32Mode(Expression* expr,
- BreakTarget* unsafe_bailout) {
- set_unsafe_bailout(unsafe_bailout);
- set_in_safe_int32_mode(true);
- Load(expr);
- Result value = frame_->Pop();
- ASSERT(frame_->HasNoUntaggedInt32Elements());
- if (expr->GuaranteedSmiResult()) {
- ConvertInt32ResultToSmi(&value);
- } else {
- ConvertInt32ResultToNumber(&value);
- }
- set_in_safe_int32_mode(false);
- set_unsafe_bailout(NULL);
- frame_->Push(&value);
-}
-
-
-void CodeGenerator::LoadWithSafeInt32ModeDisabled(Expression* expr) {
- set_safe_int32_mode_enabled(false);
- Load(expr);
- set_safe_int32_mode_enabled(true);
-}
-
-
-void CodeGenerator::ConvertInt32ResultToSmi(Result* value) {
- ASSERT(value->is_untagged_int32());
- if (value->is_register()) {
- __ add(value->reg(), Operand(value->reg()));
- } else {
- ASSERT(value->is_constant());
- ASSERT(value->handle()->IsSmi());
- }
- value->set_untagged_int32(false);
- value->set_type_info(TypeInfo::Smi());
-}
-
-
-void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
- ASSERT(value->is_untagged_int32());
- if (value->is_register()) {
- Register val = value->reg();
- JumpTarget done;
- __ add(val, Operand(val));
- done.Branch(no_overflow, value);
- __ sar(val, 1);
- // If there was an overflow, bits 30 and 31 of the original number disagree.
- __ xor_(val, 0x80000000u);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ cvtsi2sd(xmm0, Operand(val));
- } else {
- // Move val to ST[0] in the FPU
- // Push and pop are safe with respect to the virtual frame because
- // all synced elements are below the actual stack pointer.
- __ push(val);
- __ fild_s(Operand(esp, 0));
- __ pop(val);
- }
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_register());
- Label allocation_failed;
- __ AllocateHeapNumber(val, scratch.reg(),
- no_reg, &allocation_failed);
- VirtualFrame* clone = new VirtualFrame(frame_);
- scratch.Unuse();
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
- } else {
- __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset));
- }
- done.Jump(value);
-
- // Establish the virtual frame, cloned from where AllocateHeapNumber
- // jumped to allocation_failed.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- __ bind(&allocation_failed);
- if (!CpuFeatures::IsSupported(SSE2)) {
- // Pop the value from the floating point stack.
- __ fstp(0);
- }
- unsafe_bailout_->Jump();
-
- done.Bind(value);
- } else {
- ASSERT(value->is_constant());
- }
- value->set_untagged_int32(false);
- value->set_type_info(TypeInfo::Integer32());
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
-
- // If the expression should be a side-effect-free 32-bit int computation,
- // compile that SafeInt32 path, and a bailout path.
- if (!in_safe_int32_mode() &&
- safe_int32_mode_enabled() &&
- expr->side_effect_free() &&
- expr->num_bit_ops() > 2 &&
- CpuFeatures::IsSupported(SSE2)) {
- BreakTarget unsafe_bailout;
- JumpTarget done;
- unsafe_bailout.set_expected_height(frame_->height());
- LoadInSafeInt32Mode(expr, &unsafe_bailout);
- done.Jump();
-
- if (unsafe_bailout.is_linked()) {
- unsafe_bailout.Bind();
- LoadWithSafeInt32ModeDisabled(expr);
- }
- done.Bind();
- } else {
- JumpTarget true_target;
- JumpTarget false_target;
- ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(expr, &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The false target was just bound.
- JumpTarget loaded;
- frame_->Push(Factory::false_value());
- // There may be dangling jumps to the true target.
- if (true_target.is_linked()) {
- loaded.Jump();
- true_target.Bind();
- frame_->Push(Factory::true_value());
- loaded.Bind();
- }
-
- } else if (dest.is_used()) {
- // There is true, and possibly false, control flow (with true as
- // the fall through).
- JumpTarget loaded;
- frame_->Push(Factory::true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- false_target.Bind();
- frame_->Push(Factory::false_value());
- loaded.Bind();
- }
-
- } else {
- // We have a valid value on top of the frame, but we still may
- // have dangling jumps to the true and false targets from nested
- // subexpressions (eg, the left subexpressions of the
- // short-circuited boolean operators).
- ASSERT(has_valid_frame());
- if (true_target.is_linked() || false_target.is_linked()) {
- JumpTarget loaded;
- loaded.Jump(); // Don't lose the current TOS.
- if (true_target.is_linked()) {
- true_target.Bind();
- frame_->Push(Factory::true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- }
- }
- if (false_target.is_linked()) {
- false_target.Bind();
- frame_->Push(Factory::false_value());
- }
- loaded.Bind();
- }
- }
- }
- ASSERT(has_valid_frame());
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
- if (in_spilled_code()) {
- frame_->EmitPush(GlobalObjectOperand());
- } else {
- Result temp = allocator_->Allocate();
- __ mov(temp.reg(), GlobalObjectOperand());
- frame_->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
- Result temp = allocator_->Allocate();
- Register reg = temp.reg();
- __ mov(reg, GlobalObjectOperand());
- __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
- frame_->Push(&temp);
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
- // Special handling of identifiers as subexpressions of typeof.
- Variable* variable = expr->AsVariableProxy()->AsVariable();
- if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // For a global variable we build the property reference
- // <global>.<variable> and perform a (regular non-contextual) property
- // load to make sure we do not get reference errors.
- Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
- Literal key(variable->name());
- Property property(&global, &key, RelocInfo::kNoPosition);
- Reference ref(this, &property);
- ref.GetValue();
- } else if (variable != NULL && variable->AsSlot() != NULL) {
- // For a variable that rewrites to a slot, we signal it is the immediate
- // subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
- } else {
- // Anything else can be handled normally.
- Load(expr);
- }
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
- ASSERT(scope()->arguments_shadow() != NULL);
- // We don't want to do lazy arguments allocation for functions that
- // have heap-allocated contexts, because it interfers with the
- // uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0)
- ? EAGER_ARGUMENTS_ALLOCATION
- : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
- ArgumentsAllocationMode mode = ArgumentsMode();
- ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
- Comment cmnt(masm_, "[ store arguments object");
- if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the arguments marker value
- // as a sentinel indicating that the arguments object hasn't been
- // allocated yet.
- frame_->Push(Factory::arguments_marker());
- } else {
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- frame_->PushFunction();
- frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope()->num_parameters()));
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
- }
-
- Variable* arguments = scope()->arguments();
- Variable* shadow = scope()->arguments_shadow();
- ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
- ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
- Result probe = frame_->Pop();
- if (probe.is_constant()) {
- // We have to skip updating the arguments object if it has
- // been assigned a proper value.
- skip_arguments = !probe.handle()->IsArgumentsMarker();
- } else {
- __ cmp(Operand(probe.reg()), Immediate(Factory::arguments_marker()));
- probe.Unuse();
- done.Branch(not_equal);
- }
- }
- if (!skip_arguments) {
- StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- }
- StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
- return frame_->Pop();
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- // References are loaded from both spilled and unspilled code. Set the
- // state to unspilled to allow that (and explicitly spill after
- // construction at the construction sites).
- bool was_in_spilled_code = in_spilled_code_;
- in_spilled_code_ = false;
-
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- // The expression is either a property or a variable proxy that rewrites
- // to a property.
- Load(property->obj());
- if (property->key()->IsPropertyName()) {
- ref->set_type(Reference::NAMED);
- } else {
- Load(property->key());
- ref->set_type(Reference::KEYED);
- }
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- // If eax is free, the register allocator prefers it. Thus the code
- // generator will load the global object into eax, which is where
- // LoadIC wants it. Most uses of Reference call LoadIC directly
- // after the reference is created.
- frame_->Spill(eax);
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->AsSlot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- // Anything else is a runtime error.
- Load(e);
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- }
-
- in_spilled_code_ = was_in_spilled_code;
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
- Comment cmnt(masm_, "[ ToBoolean");
-
- // The value to convert should be popped from the frame.
- Result value = frame_->Pop();
- value.ToRegister();
-
- if (value.is_integer32()) { // Also takes Smi case.
- Comment cmnt(masm_, "ONLY_INTEGER_32");
- if (FLAG_debug_code) {
- Label ok;
- __ AbortIfNotNumber(value.reg());
- __ test(value.reg(), Immediate(kSmiTagMask));
- __ j(zero, &ok);
- __ fldz();
- __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
- __ FCmp();
- __ j(not_zero, &ok);
- __ Abort("Smi was wrapped in HeapNumber in output from bitop");
- __ bind(&ok);
- }
- // In the integer32 case there are no Smis hidden in heap numbers, so we
- // need only test for Smi zero.
- __ test(value.reg(), Operand(value.reg()));
- dest->false_target()->Branch(zero);
- value.Unuse();
- dest->Split(not_zero);
- } else if (value.is_number()) {
- Comment cmnt(masm_, "ONLY_NUMBER");
- // Fast case if TypeInfo indicates only numbers.
- if (FLAG_debug_code) {
- __ AbortIfNotNumber(value.reg());
- }
- // Smi => false iff zero.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(value.reg(), Operand(value.reg()));
- dest->false_target()->Branch(zero);
- __ test(value.reg(), Immediate(kSmiTagMask));
- dest->true_target()->Branch(zero);
- __ fldz();
- __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
- __ FCmp();
- value.Unuse();
- dest->Split(not_zero);
- } else {
- // Fast case checks.
- // 'false' => false.
- __ cmp(value.reg(), Factory::false_value());
- dest->false_target()->Branch(equal);
-
- // 'true' => true.
- __ cmp(value.reg(), Factory::true_value());
- dest->true_target()->Branch(equal);
-
- // 'undefined' => false.
- __ cmp(value.reg(), Factory::undefined_value());
- dest->false_target()->Branch(equal);
-
- // Smi => false iff zero.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(value.reg(), Operand(value.reg()));
- dest->false_target()->Branch(zero);
- __ test(value.reg(), Immediate(kSmiTagMask));
- dest->true_target()->Branch(zero);
-
- // Call the stub for all other cases.
- frame_->Push(&value); // Undo the Pop() from above.
- ToBooleanStub stub;
- Result temp = frame_->CallStub(&stub, 1);
- // Convert the result to a condition code.
- __ test(temp.reg(), Operand(temp.reg()));
- temp.Unuse();
- dest->Split(not_equal);
- }
-}
-
-
-// Perform or call the specialized stub for a binary operation. Requires the
-// three registers left, right and dst to be distinct and spilled. This
-// deferred operation has up to three entry points: The main one calls the
-// runtime system. The second is for when the result is a non-Smi. The
-// third is for when at least one of the inputs is non-Smi and we have SSE2.
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
- DeferredInlineBinaryOperation(Token::Value op,
- Register dst,
- Register left,
- Register right,
- TypeInfo left_info,
- TypeInfo right_info,
- OverwriteMode mode)
- : op_(op), dst_(dst), left_(left), right_(right),
- left_info_(left_info), right_info_(right_info), mode_(mode) {
- set_comment("[ DeferredInlineBinaryOperation");
- ASSERT(!left.is(right));
- }
-
- virtual void Generate();
-
- // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
- // Exit().
- virtual bool AutoSaveAndRestore() { return false; }
-
- void JumpToAnswerOutOfRange(Condition cond);
- void JumpToConstantRhs(Condition cond, Smi* smi_value);
- Label* NonSmiInputLabel();
-
- private:
- void GenerateAnswerOutOfRange();
- void GenerateNonSmiInput();
-
- Token::Value op_;
- Register dst_;
- Register left_;
- Register right_;
- TypeInfo left_info_;
- TypeInfo right_info_;
- OverwriteMode mode_;
- Label answer_out_of_range_;
- Label non_smi_input_;
- Label constant_rhs_;
- Smi* smi_value_;
-};
-
-
-Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
- if (Token::IsBitOp(op_) && CpuFeatures::IsSupported(SSE2)) {
- return &non_smi_input_;
- } else {
- return entry_label();
- }
-}
-
-
-void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
- __ j(cond, &answer_out_of_range_);
-}
-
-
-void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
- Smi* smi_value) {
- smi_value_ = smi_value;
- __ j(cond, &constant_rhs_);
-}
-
-
-void DeferredInlineBinaryOperation::Generate() {
- // Registers are not saved implicitly for this stub, so we should not
- // tread on the registers that were not passed to us.
- if (CpuFeatures::IsSupported(SSE2) &&
- ((op_ == Token::ADD) ||
- (op_ == Token::SUB) ||
- (op_ == Token::MUL) ||
- (op_ == Token::DIV))) {
- CpuFeatures::Scope use_sse2(SSE2);
- Label call_runtime, after_alloc_failure;
- Label left_smi, right_smi, load_right, do_op;
- if (!left_info_.IsSmi()) {
- __ test(left_, Immediate(kSmiTagMask));
- __ j(zero, &left_smi);
- if (!left_info_.IsNumber()) {
- __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(not_equal, &call_runtime);
- }
- __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_LEFT) {
- __ mov(dst_, left_);
- }
- __ jmp(&load_right);
-
- __ bind(&left_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left_);
- }
- __ SmiUntag(left_);
- __ cvtsi2sd(xmm0, Operand(left_));
- __ SmiTag(left_);
- if (mode_ == OVERWRITE_LEFT) {
- Label alloc_failure;
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
-
- __ bind(&load_right);
- if (!right_info_.IsSmi()) {
- __ test(right_, Immediate(kSmiTagMask));
- __ j(zero, &right_smi);
- if (!right_info_.IsNumber()) {
- __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(not_equal, &call_runtime);
- }
- __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_RIGHT) {
- __ mov(dst_, right_);
- } else if (mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
- __ jmp(&do_op);
-
- __ bind(&right_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right_);
- }
- __ SmiUntag(right_);
- __ cvtsi2sd(xmm1, Operand(right_));
- __ SmiTag(right_);
- if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
- __ push(left_);
- __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
- __ pop(left_);
- }
-
- __ bind(&do_op);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
- Exit();
-
-
- __ bind(&after_alloc_failure);
- __ pop(left_);
- __ bind(&call_runtime);
- }
- // Register spilling is not done implicitly for this stub.
- // We can't postpone it any more now though.
- SaveRegisters();
-
- GenericBinaryOpStub stub(op_,
- mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(left_info_, right_info_));
- stub.GenerateCall(masm_, left_, right_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
- RestoreRegisters();
- Exit();
-
- if (non_smi_input_.is_linked() || constant_rhs_.is_linked()) {
- GenerateNonSmiInput();
- }
- if (answer_out_of_range_.is_linked()) {
- GenerateAnswerOutOfRange();
- }
-}
-
-
-void DeferredInlineBinaryOperation::GenerateNonSmiInput() {
- // We know at least one of the inputs was not a Smi.
- // This is a third entry point into the deferred code.
- // We may not overwrite left_ because we want to be able
- // to call the handling code for non-smi answer and it
- // might want to overwrite the heap number in left_.
- ASSERT(!right_.is(dst_));
- ASSERT(!left_.is(dst_));
- ASSERT(!left_.is(right_));
- // This entry point is used for bit ops where the right hand side
- // is a constant Smi and the left hand side is a heap object. It
- // is also used for bit ops where both sides are unknown, but where
- // at least one of them is a heap object.
- bool rhs_is_constant = constant_rhs_.is_linked();
- // We can't generate code for both cases.
- ASSERT(!non_smi_input_.is_linked() || !constant_rhs_.is_linked());
-
- if (FLAG_debug_code) {
- __ int3(); // We don't fall through into this code.
- }
-
- __ bind(&non_smi_input_);
-
- if (rhs_is_constant) {
- __ bind(&constant_rhs_);
- // In this case the input is a heap object and it is in the dst_ register.
- // The left_ and right_ registers have not been initialized yet.
- __ mov(right_, Immediate(smi_value_));
- __ mov(left_, Operand(dst_));
- if (!CpuFeatures::IsSupported(SSE2)) {
- __ jmp(entry_label());
- return;
- } else {
- CpuFeatures::Scope use_sse2(SSE2);
- __ JumpIfNotNumber(dst_, left_info_, entry_label());
- __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
- __ SmiUntag(right_);
- }
- } else {
- // We know we have SSE2 here because otherwise the label is not linked (see
- // NonSmiInputLabel).
- CpuFeatures::Scope use_sse2(SSE2);
- // Handle the non-constant right hand side situation:
- if (left_info_.IsSmi()) {
- // Right is a heap object.
- __ JumpIfNotNumber(right_, right_info_, entry_label());
- __ ConvertToInt32(right_, right_, dst_, right_info_, entry_label());
- __ mov(dst_, Operand(left_));
- __ SmiUntag(dst_);
- } else if (right_info_.IsSmi()) {
- // Left is a heap object.
- __ JumpIfNotNumber(left_, left_info_, entry_label());
- __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
- __ SmiUntag(right_);
- } else {
- // Here we don't know if it's one or both that is a heap object.
- Label only_right_is_heap_object, got_both;
- __ mov(dst_, Operand(left_));
- __ SmiUntag(dst_, &only_right_is_heap_object);
- // Left was a heap object.
- __ JumpIfNotNumber(left_, left_info_, entry_label());
- __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
- __ SmiUntag(right_, &got_both);
- // Both were heap objects.
- __ rcl(right_, 1); // Put tag back.
- __ JumpIfNotNumber(right_, right_info_, entry_label());
- __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
- __ jmp(&got_both);
- __ bind(&only_right_is_heap_object);
- __ JumpIfNotNumber(right_, right_info_, entry_label());
- __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
- __ bind(&got_both);
- }
- }
- ASSERT(op_ == Token::BIT_AND ||
- op_ == Token::BIT_OR ||
- op_ == Token::BIT_XOR ||
- right_.is(ecx));
- switch (op_) {
- case Token::BIT_AND: __ and_(dst_, Operand(right_)); break;
- case Token::BIT_OR: __ or_(dst_, Operand(right_)); break;
- case Token::BIT_XOR: __ xor_(dst_, Operand(right_)); break;
- case Token::SHR: __ shr_cl(dst_); break;
- case Token::SAR: __ sar_cl(dst_); break;
- case Token::SHL: __ shl_cl(dst_); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check that the *unsigned* result fits in a smi. Neither of
- // the two high-order bits can be set:
- // * 0x80000000: high bit would be lost when smi tagging.
- // * 0x40000000: this number would convert to negative when smi
- // tagging.
- __ test(dst_, Immediate(0xc0000000));
- __ j(not_zero, &answer_out_of_range_);
- } else {
- // Check that the *signed* result fits in a smi.
- __ cmp(dst_, 0xc0000000);
- __ j(negative, &answer_out_of_range_);
- }
- __ SmiTag(dst_);
- Exit();
-}
-
-
-void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() {
- Label after_alloc_failure2;
- Label allocation_ok;
- __ bind(&after_alloc_failure2);
- // We have to allocate a number, causing a GC, while keeping hold of
- // the answer in dst_. The answer is not a Smi. We can't just call the
- // runtime shift function here because we already threw away the inputs.
- __ xor_(left_, Operand(left_));
- __ shl(dst_, 1); // Put top bit in carry flag and Smi tag the low bits.
- __ rcr(left_, 1); // Rotate with carry.
- __ push(dst_); // Smi tagged low 31 bits.
- __ push(left_); // 0 or 0x80000000, which is Smi tagged in both cases.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- if (!left_.is(eax)) {
- __ mov(left_, eax);
- }
- __ pop(right_); // High bit.
- __ pop(dst_); // Low 31 bits.
- __ shr(dst_, 1); // Put 0 in top bit.
- __ or_(dst_, Operand(right_));
- __ jmp(&allocation_ok);
-
- // This is the second entry point to the deferred code. It is used only by
- // the bit operations.
- // The dst_ register has the answer. It is not Smi tagged. If mode_ is
- // OVERWRITE_LEFT then left_ must contain either an overwritable heap number
- // or a Smi.
- // Put a heap number pointer in left_.
- __ bind(&answer_out_of_range_);
- SaveRegisters();
- if (mode_ == OVERWRITE_LEFT) {
- __ test(left_, Immediate(kSmiTagMask));
- __ j(not_zero, &allocation_ok);
- }
- // This trashes right_.
- __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
- __ bind(&allocation_ok);
- if (CpuFeatures::IsSupported(SSE2) && op_ != Token::SHR) {
- CpuFeatures::Scope use_sse2(SSE2);
- ASSERT(Token::IsBitOp(op_));
- // Signed conversion.
- __ cvtsi2sd(xmm0, Operand(dst_));
- __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
- } else {
- if (op_ == Token::SHR) {
- __ push(Immediate(0)); // High word of unsigned value.
- __ push(dst_);
- __ fild_d(Operand(esp, 0));
- __ Drop(2);
- } else {
- ASSERT(Token::IsBitOp(op_));
- __ push(dst_);
- __ fild_s(Operand(esp, 0)); // Signed conversion.
- __ pop(dst_);
- }
- __ fstp_d(FieldOperand(left_, HeapNumber::kValueOffset));
- }
- __ mov(dst_, left_);
- RestoreRegisters();
- Exit();
-}
-
-
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
- Token::Value op,
- const Result& right,
- const Result& left) {
- // Set TypeInfo of result according to the operation performed.
- // Rely on the fact that smis have a 31 bit payload on ia32.
- STATIC_ASSERT(kSmiValueSize == 31);
- switch (op) {
- case Token::COMMA:
- return right.type_info();
- case Token::OR:
- case Token::AND:
- // Result type can be either of the two input types.
- return operands_type;
- case Token::BIT_AND: {
- // Anding with positive Smis will give you a Smi.
- if (right.is_constant() && right.handle()->IsSmi() &&
- Smi::cast(*right.handle())->value() >= 0) {
- return TypeInfo::Smi();
- } else if (left.is_constant() && left.handle()->IsSmi() &&
- Smi::cast(*left.handle())->value() >= 0) {
- return TypeInfo::Smi();
- }
- return (operands_type.IsSmi())
- ? TypeInfo::Smi()
- : TypeInfo::Integer32();
- }
- case Token::BIT_OR: {
- // Oring with negative Smis will give you a Smi.
- if (right.is_constant() && right.handle()->IsSmi() &&
- Smi::cast(*right.handle())->value() < 0) {
- return TypeInfo::Smi();
- } else if (left.is_constant() && left.handle()->IsSmi() &&
- Smi::cast(*left.handle())->value() < 0) {
- return TypeInfo::Smi();
- }
- return (operands_type.IsSmi())
- ? TypeInfo::Smi()
- : TypeInfo::Integer32();
- }
- case Token::BIT_XOR:
- // Result is always a 32 bit integer. Smi property of inputs is preserved.
- return (operands_type.IsSmi())
- ? TypeInfo::Smi()
- : TypeInfo::Integer32();
- case Token::SAR:
- if (left.is_smi()) return TypeInfo::Smi();
- // Result is a smi if we shift by a constant >= 1, otherwise an integer32.
- // Shift amount is masked with 0x1F (ECMA standard 11.7.2).
- return (right.is_constant() && right.handle()->IsSmi()
- && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
- ? TypeInfo::Smi()
- : TypeInfo::Integer32();
- case Token::SHR:
- // Result is a smi if we shift by a constant >= 2, an integer32 if
- // we shift by 1, and an unsigned 32-bit integer if we shift by 0.
- if (right.is_constant() && right.handle()->IsSmi()) {
- int shift_amount = Smi::cast(*right.handle())->value() & 0x1F;
- if (shift_amount > 1) {
- return TypeInfo::Smi();
- } else if (shift_amount > 0) {
- return TypeInfo::Integer32();
- }
- }
- return TypeInfo::Number();
- case Token::ADD:
- if (operands_type.IsSmi()) {
- // The Integer32 range is big enough to take the sum of any two Smis.
- return TypeInfo::Integer32();
- } else if (operands_type.IsNumber()) {
- return TypeInfo::Number();
- } else if (left.type_info().IsString() || right.type_info().IsString()) {
- return TypeInfo::String();
- } else {
- return TypeInfo::Unknown();
- }
- case Token::SHL:
- return TypeInfo::Integer32();
- case Token::SUB:
- // The Integer32 range is big enough to take the difference of any two
- // Smis.
- return (operands_type.IsSmi()) ?
- TypeInfo::Integer32() :
- TypeInfo::Number();
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Result is always a number.
- return TypeInfo::Number();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return TypeInfo::Unknown();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = expr->op();
- Comment cmnt_token(masm_, Token::String(op));
-
- if (op == Token::COMMA) {
- // Simply discard left value.
- frame_->Nip(1);
- return;
- }
-
- Result right = frame_->Pop();
- Result left = frame_->Pop();
-
- if (op == Token::ADD) {
- const bool left_is_string = left.type_info().IsString();
- const bool right_is_string = right.type_info().IsString();
- // Make sure constant strings have string type info.
- ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
- left_is_string);
- ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
- right_is_string);
- if (left_is_string || right_is_string) {
- frame_->Push(&left);
- frame_->Push(&right);
- Result answer;
- if (left_is_string) {
- if (right_is_string) {
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- } else {
- StringAddStub stub(NO_STRING_CHECK_LEFT_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- }
- } else if (right_is_string) {
- StringAddStub stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- }
- answer.set_type_info(TypeInfo::String());
- frame_->Push(&answer);
- return;
- }
- // Neither operand is known to be a string.
- }
-
- bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
- bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
- bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
- bool right_is_non_smi_constant =
- right.is_constant() && !right.handle()->IsSmi();
-
- if (left_is_smi_constant && right_is_smi_constant) {
- // Compute the constant result at compile time, and leave it on the frame.
- int left_int = Smi::cast(*left.handle())->value();
- int right_int = Smi::cast(*right.handle())->value();
- if (FoldConstantSmis(op, left_int, right_int)) return;
- }
-
- // Get number type of left and right sub-expressions.
- TypeInfo operands_type =
- TypeInfo::Combine(left.type_info(), right.type_info());
-
- TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
-
- Result answer;
- if (left_is_non_smi_constant || right_is_non_smi_constant) {
- // Go straight to the slow case, with no smi code.
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_SMI_CODE_IN_STUB,
- operands_type);
- answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
- } else if (right_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
- false, overwrite_mode);
- } else if (left_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
- true, overwrite_mode);
- } else {
- // Set the flags based on the operation, type and loop nesting level.
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- // For all other operations only inline the Smi check code for likely smis
- // if the operation is part of a loop.
- if (loop_nesting() > 0 &&
- (Token::IsBitOp(op) ||
- operands_type.IsInteger32() ||
- expr->type()->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
- } else {
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_GENERIC_BINARY_FLAGS,
- operands_type);
- answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
- }
- }
-
- answer.set_type_info(result_type);
- frame_->Push(&answer);
-}
-
-
-Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
- Result* left,
- Result* right) {
- if (stub->ArgsInRegistersSupported()) {
- stub->SetArgsInRegisters();
- return frame_->CallStub(stub, left, right);
- } else {
- frame_->Push(left);
- frame_->Push(right);
- return frame_->CallStub(stub, 2);
- }
-}
-
-
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- Object* answer_object = Heap::undefined_value();
- switch (op) {
- case Token::ADD:
- if (Smi::IsValid(left + right)) {
- answer_object = Smi::FromInt(left + right);
- }
- break;
- case Token::SUB:
- if (Smi::IsValid(left - right)) {
- answer_object = Smi::FromInt(left - right);
- }
- break;
- case Token::MUL: {
- double answer = static_cast<double>(left) * right;
- if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
- // If the product is zero and the non-zero factor is negative,
- // the spec requires us to return floating point negative zero.
- if (answer != 0 || (left >= 0 && right >= 0)) {
- answer_object = Smi::FromInt(static_cast<int>(answer));
- }
- }
- }
- break;
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- answer_object = Smi::FromInt(left | right);
- break;
- case Token::BIT_AND:
- answer_object = Smi::FromInt(left & right);
- break;
- case Token::BIT_XOR:
- answer_object = Smi::FromInt(left ^ right);
- break;
-
- case Token::SHL: {
- int shift_amount = right & 0x1F;
- if (Smi::IsValid(left << shift_amount)) {
- answer_object = Smi::FromInt(left << shift_amount);
- }
- break;
- }
- case Token::SHR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- unsigned_left >>= shift_amount;
- if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
- answer_object = Smi::FromInt(unsigned_left);
- }
- break;
- }
- case Token::SAR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- if (left < 0) {
- // Perform arithmetic shift of a negative number by
- // complementing number, logical shifting, complementing again.
- unsigned_left = ~unsigned_left;
- unsigned_left >>= shift_amount;
- unsigned_left = ~unsigned_left;
- } else {
- unsigned_left >>= shift_amount;
- }
- ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
- answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- if (answer_object == Heap::undefined_value()) {
- return false;
- }
- frame_->Push(Handle<Object>(answer_object));
- return true;
-}
-
-
-void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
- Result* right,
- JumpTarget* both_smi) {
- TypeInfo left_info = left->type_info();
- TypeInfo right_info = right->type_info();
- if (left_info.IsDouble() || left_info.IsString() ||
- right_info.IsDouble() || right_info.IsString()) {
- // We know that left and right are not both smi. Don't do any tests.
- return;
- }
-
- if (left->reg().is(right->reg())) {
- if (!left_info.IsSmi()) {
- __ test(left->reg(), Immediate(kSmiTagMask));
- both_smi->Branch(zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- left->Unuse();
- right->Unuse();
- both_smi->Jump();
- }
- } else if (!left_info.IsSmi()) {
- if (!right_info.IsSmi()) {
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), left->reg());
- __ or_(temp.reg(), Operand(right->reg()));
- __ test(temp.reg(), Immediate(kSmiTagMask));
- temp.Unuse();
- both_smi->Branch(zero);
- } else {
- __ test(left->reg(), Immediate(kSmiTagMask));
- both_smi->Branch(zero);
- }
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- if (!right_info.IsSmi()) {
- __ test(right->reg(), Immediate(kSmiTagMask));
- both_smi->Branch(zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
- left->Unuse();
- right->Unuse();
- both_smi->Jump();
- }
- }
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- Register scratch,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred) {
- JumpIfNotBothSmiUsingTypeInfo(left,
- right,
- scratch,
- left_info,
- right_info,
- deferred->entry_label());
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- Register scratch,
- TypeInfo left_info,
- TypeInfo right_info,
- Label* on_not_smi) {
- if (left.is(right)) {
- if (!left_info.IsSmi()) {
- __ test(left, Immediate(kSmiTagMask));
- __ j(not_zero, on_not_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left);
- }
- } else if (!left_info.IsSmi()) {
- if (!right_info.IsSmi()) {
- __ mov(scratch, left);
- __ or_(scratch, Operand(right));
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(not_zero, on_not_smi);
- } else {
- __ test(left, Immediate(kSmiTagMask));
- __ j(not_zero, on_not_smi);
- if (FLAG_debug_code) __ AbortIfNotSmi(right);
- }
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left);
- if (!right_info.IsSmi()) {
- __ test(right, Immediate(kSmiTagMask));
- __ j(not_zero, on_not_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right);
- }
- }
-}
-
-
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
- // Copy the type info because left and right may be overwritten.
- TypeInfo left_type_info = left->type_info();
- TypeInfo right_type_info = right->type_info();
- Token::Value op = expr->op();
- Result answer;
- // Special handling of div and mod because they use fixed registers.
- if (op == Token::DIV || op == Token::MOD) {
- // We need eax as the quotient register, edx as the remainder
- // register, neither left nor right in eax or edx, and left copied
- // to eax.
- Result quotient;
- Result remainder;
- bool left_is_in_eax = false;
- // Step 1: get eax for quotient.
- if ((left->is_register() && left->reg().is(eax)) ||
- (right->is_register() && right->reg().is(eax))) {
- // One or both is in eax. Use a fresh non-edx register for
- // them.
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (fresh.reg().is(edx)) {
- remainder = fresh;
- fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- }
- if (left->is_register() && left->reg().is(eax)) {
- quotient = *left;
- *left = fresh;
- left_is_in_eax = true;
- }
- if (right->is_register() && right->reg().is(eax)) {
- quotient = *right;
- *right = fresh;
- }
- __ mov(fresh.reg(), eax);
- } else {
- // Neither left nor right is in eax.
- quotient = allocator_->Allocate(eax);
- }
- ASSERT(quotient.is_register() && quotient.reg().is(eax));
- ASSERT(!(left->is_register() && left->reg().is(eax)));
- ASSERT(!(right->is_register() && right->reg().is(eax)));
-
- // Step 2: get edx for remainder if necessary.
- if (!remainder.is_valid()) {
- if ((left->is_register() && left->reg().is(edx)) ||
- (right->is_register() && right->reg().is(edx))) {
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (left->is_register() && left->reg().is(edx)) {
- remainder = *left;
- *left = fresh;
- }
- if (right->is_register() && right->reg().is(edx)) {
- remainder = *right;
- *right = fresh;
- }
- __ mov(fresh.reg(), edx);
- } else {
- // Neither left nor right is in edx.
- remainder = allocator_->Allocate(edx);
- }
- }
- ASSERT(remainder.is_register() && remainder.reg().is(edx));
- ASSERT(!(left->is_register() && left->reg().is(edx)));
- ASSERT(!(right->is_register() && right->reg().is(edx)));
-
- left->ToRegister();
- right->ToRegister();
- frame_->Spill(eax);
- frame_->Spill(edx);
- // DeferredInlineBinaryOperation requires all the registers that it is
- // told about to be spilled and distinct.
- Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
-
- // Check that left and right are smi tagged.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- (op == Token::DIV) ? eax : edx,
- left->reg(),
- distinct_right.reg(),
- left_type_info,
- right_type_info,
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), edx,
- left_type_info, right_type_info, deferred);
- if (!left_is_in_eax) {
- __ mov(eax, left->reg());
- }
- // Sign extend eax into edx:eax.
- __ cdq();
- // Check for 0 divisor.
- __ test(right->reg(), Operand(right->reg()));
- deferred->Branch(zero);
- // Divide edx:eax by the right operand.
- __ idiv(right->reg());
-
- // Complete the operation.
- if (op == Token::DIV) {
- // Check for negative zero result. If result is zero, and divisor
- // is negative, return a floating point negative zero. The
- // virtual frame is unchanged in this block, so local control flow
- // can use a Label rather than a JumpTarget. If the context of this
- // expression will treat -0 like 0, do not do this test.
- if (!expr->no_negative_zero()) {
- Label non_zero_result;
- __ test(left->reg(), Operand(left->reg()));
- __ j(not_zero, &non_zero_result);
- __ test(right->reg(), Operand(right->reg()));
- deferred->Branch(negative);
- __ bind(&non_zero_result);
- }
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by
- // idiv instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- deferred->Branch(equal);
- // Check that the remainder is zero.
- __ test(edx, Operand(edx));
- deferred->Branch(not_zero);
- // Tag the result and store it in the quotient register.
- __ SmiTag(eax);
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = quotient;
- } else {
- ASSERT(op == Token::MOD);
- // Check for a negative zero result. If the result is zero, and
- // the dividend is negative, return a floating point negative
- // zero. The frame is unchanged in this block, so local control
- // flow can use a Label rather than a JumpTarget.
- if (!expr->no_negative_zero()) {
- Label non_zero_result;
- __ test(edx, Operand(edx));
- __ j(not_zero, &non_zero_result, taken);
- __ test(left->reg(), Operand(left->reg()));
- deferred->Branch(negative);
- __ bind(&non_zero_result);
- }
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = remainder;
- }
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Special handling of shift operations because they use fixed
- // registers.
- if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
- // Move left out of ecx if necessary.
- if (left->is_register() && left->reg().is(ecx)) {
- *left = allocator_->Allocate();
- ASSERT(left->is_valid());
- __ mov(left->reg(), ecx);
- }
- right->ToRegister(ecx);
- left->ToRegister();
- ASSERT(left->is_register() && !left->reg().is(ecx));
- ASSERT(right->is_register() && right->reg().is(ecx));
- if (left_type_info.IsSmi()) {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- }
- if (right_type_info.IsSmi()) {
- if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
- }
-
- // We will modify right, it must be spilled.
- frame_->Spill(ecx);
- // DeferredInlineBinaryOperation requires all the registers that it is told
- // about to be spilled and distinct. We know that right is ecx and left is
- // not ecx.
- frame_->Spill(left->reg());
-
- // Use a fresh answer register to avoid spilling the left operand.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
-
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- ecx,
- left_type_info,
- right_type_info,
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
- left_type_info, right_type_info,
- deferred->NonSmiInputLabel());
-
- // Untag both operands.
- __ mov(answer.reg(), left->reg());
- __ SmiUntag(answer.reg());
- __ SmiUntag(right->reg()); // Right is ecx.
-
- // Perform the operation.
- ASSERT(right->reg().is(ecx));
- switch (op) {
- case Token::SAR: {
- __ sar_cl(answer.reg());
- if (!left_type_info.IsSmi()) {
- // Check that the *signed* result fits in a smi.
- __ cmp(answer.reg(), 0xc0000000);
- deferred->JumpToAnswerOutOfRange(negative);
- }
- break;
- }
- case Token::SHR: {
- __ shr_cl(answer.reg());
- // Check that the *unsigned* result fits in a smi. Neither of
- // the two high-order bits can be set:
- // * 0x80000000: high bit would be lost when smi tagging.
- // * 0x40000000: this number would convert to negative when smi
- // tagging.
- // These two cases can only happen with shifts by 0 or 1 when
- // handed a valid smi. If the answer cannot be represented by a
- // smi, restore the left and right arguments, and jump to slow
- // case. The low bit of the left argument may be lost, but only
- // in a case where it is dropped anyway.
- __ test(answer.reg(), Immediate(0xc0000000));
- deferred->JumpToAnswerOutOfRange(not_zero);
- break;
- }
- case Token::SHL: {
- __ shl_cl(answer.reg());
- // Check that the *signed* result fits in a smi.
- __ cmp(answer.reg(), 0xc0000000);
- deferred->JumpToAnswerOutOfRange(negative);
- break;
- }
- default:
- UNREACHABLE();
- }
- // Smi-tag the result in answer.
- __ SmiTag(answer.reg());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Handle the other binary operations.
- left->ToRegister();
- right->ToRegister();
- // DeferredInlineBinaryOperation requires all the registers that it is told
- // about to be spilled.
- Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
- // A newly allocated register answer is used to hold the answer. The
- // registers containing left and right are not modified so they don't
- // need to be spilled in the fast case.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
-
- // Perform the smi tag check.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- distinct_right.reg(),
- left_type_info,
- right_type_info,
- overwrite_mode);
- Label non_smi_bit_op;
- if (op != Token::BIT_OR) {
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
- left_type_info, right_type_info,
- deferred->NonSmiInputLabel());
- }
-
- __ mov(answer.reg(), left->reg());
- switch (op) {
- case Token::ADD:
- __ add(answer.reg(), Operand(right->reg()));
- deferred->Branch(overflow);
- break;
-
- case Token::SUB:
- __ sub(answer.reg(), Operand(right->reg()));
- deferred->Branch(overflow);
- break;
-
- case Token::MUL: {
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // Remove smi tag from the left operand (but keep sign).
- // Left-hand operand has been copied into answer.
- __ SmiUntag(answer.reg());
- // Do multiplication of smis, leaving result in answer.
- __ imul(answer.reg(), Operand(right->reg()));
- // Go slow on overflows.
- deferred->Branch(overflow);
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case. The frame is unchanged
- // in this block, so local control flow can use a Label rather
- // than a JumpTarget.
- if (!expr->no_negative_zero()) {
- Label non_zero_result;
- __ test(answer.reg(), Operand(answer.reg()));
- __ j(not_zero, &non_zero_result, taken);
- __ mov(answer.reg(), left->reg());
- __ or_(answer.reg(), Operand(right->reg()));
- deferred->Branch(negative);
- __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct.
- __ bind(&non_zero_result);
- }
- break;
- }
-
- case Token::BIT_OR:
- __ or_(answer.reg(), Operand(right->reg()));
- __ test(answer.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, deferred->NonSmiInputLabel());
- break;
-
- case Token::BIT_AND:
- __ and_(answer.reg(), Operand(right->reg()));
- break;
-
- case Token::BIT_XOR:
- __ xor_(answer.reg(), Operand(right->reg()));
- break;
-
- default:
- UNREACHABLE();
- break;
- }
-
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- Register dst,
- Register src,
- TypeInfo type_info,
- Smi* value,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- src_(src),
- type_info_(type_info),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
- set_comment("[ DeferredInlineSmiOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register src_;
- TypeInfo type_info_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperation::Generate() {
- // For mod we don't generate all the Smi code inline.
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- stub.GenerateCall(masm_, src_, value_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
- DeferredInlineSmiOperationReversed(Token::Value op,
- Register dst,
- Smi* value,
- Register src,
- TypeInfo type_info,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- type_info_(type_info),
- value_(value),
- src_(src),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperationReversed");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- TypeInfo type_info_;
- Smi* value_;
- Register src_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- stub.GenerateCall(masm_, value_, src_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of src + value is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
- DeferredInlineSmiAdd(Register dst,
- TypeInfo type_info,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst),
- type_info_(type_info),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- if (type_info_.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
- set_comment("[ DeferredInlineSmiAdd");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- TypeInfo type_info_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAdd::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- __ sub(Operand(dst_), Immediate(value_));
- GenericBinaryOpStub igostub(
- Token::ADD,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of value + src is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
- DeferredInlineSmiAddReversed(Register dst,
- TypeInfo type_info,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst),
- type_info_(type_info),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAddReversed");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- TypeInfo type_info_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAddReversed::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- __ sub(Operand(dst_), Immediate(value_));
- GenericBinaryOpStub igostub(
- Token::ADD,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- igostub.GenerateCall(masm_, value_, dst_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of src - value is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative subtraction and call the
-// appropriate specialized stub for subtract. The result is left in
-// dst.
-class DeferredInlineSmiSub: public DeferredCode {
- public:
- DeferredInlineSmiSub(Register dst,
- TypeInfo type_info,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst),
- type_info_(type_info),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
- set_comment("[ DeferredInlineSmiSub");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- TypeInfo type_info_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSub::Generate() {
- // Undo the optimistic sub operation and call the shared stub.
- __ add(Operand(dst_), Immediate(value_));
- GenericBinaryOpStub igostub(
- Token::SUB,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB,
- TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> value,
- bool reversed,
- OverwriteMode overwrite_mode) {
- // Generate inline code for a binary operation when one of the
- // operands is a constant smi. Consumes the argument "operand".
- if (IsUnsafeSmi(value)) {
- Result unsafe_operand(value);
- if (reversed) {
- return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
- overwrite_mode);
- } else {
- return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
- overwrite_mode);
- }
- }
-
- // Get the literal value.
- Smi* smi_value = Smi::cast(*value);
- int int_value = smi_value->value();
-
- Token::Value op = expr->op();
- Result answer;
- switch (op) {
- case Token::ADD: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
-
- // Optimistically add. Call the specialized add stub if the
- // result is not a smi or overflows.
- DeferredCode* deferred = NULL;
- if (reversed) {
- deferred = new DeferredInlineSmiAddReversed(operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- } else {
- deferred = new DeferredInlineSmiAdd(operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- }
- __ add(Operand(operand->reg()), Immediate(value));
- deferred->Branch(overflow);
- if (!operand->type_info().IsSmi()) {
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- deferred->BindExit();
- answer = *operand;
- break;
- }
-
- case Token::SUB: {
- DeferredCode* deferred = NULL;
- if (reversed) {
- // The reversed case is only hit when the right operand is not a
- // constant.
- ASSERT(operand->is_register());
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- __ Set(answer.reg(), Immediate(value));
- deferred =
- new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- operand->reg(),
- operand->type_info(),
- overwrite_mode);
- __ sub(answer.reg(), Operand(operand->reg()));
- } else {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- answer = *operand;
- deferred = new DeferredInlineSmiSub(operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- __ sub(Operand(operand->reg()), Immediate(value));
- }
- deferred->Branch(overflow);
- if (!operand->type_info().IsSmi()) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- deferred->BindExit();
- operand->Unuse();
- break;
- }
-
- case Token::SAR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- frame_->Spill(operand->reg());
- if (!operand->type_info().IsSmi()) {
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- if (shift_value > 0) {
- __ sar(operand->reg(), shift_value);
- __ and_(operand->reg(), ~kSmiTagMask);
- }
- deferred->BindExit();
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- if (shift_value > 0) {
- __ sar(operand->reg(), shift_value);
- __ and_(operand->reg(), ~kSmiTagMask);
- }
- }
- answer = *operand;
- }
- break;
-
- case Token::SHR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- if (!operand->type_info().IsSmi()) {
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- __ mov(answer.reg(), operand->reg());
- __ SmiUntag(answer.reg());
- __ shr(answer.reg(), shift_value);
- // A negative Smi shifted right two is in the positive Smi range.
- if (shift_value < 2) {
- __ test(answer.reg(), Immediate(0xc0000000));
- deferred->Branch(not_zero);
- }
- operand->Unuse();
- __ SmiTag(answer.reg());
- deferred->BindExit();
- }
- break;
-
- case Token::SHL:
- if (reversed) {
- // Move operand into ecx and also into a second register.
- // If operand is already in a register, take advantage of that.
- // This lets us modify ecx, but still bail out to deferred code.
- Result right;
- Result right_copy_in_ecx;
- TypeInfo right_type_info = operand->type_info();
- operand->ToRegister();
- if (operand->reg().is(ecx)) {
- right = allocator()->Allocate();
- __ mov(right.reg(), ecx);
- frame_->Spill(ecx);
- right_copy_in_ecx = *operand;
- } else {
- right_copy_in_ecx = allocator()->Allocate(ecx);
- __ mov(ecx, operand->reg());
- right = *operand;
- }
- operand->Unuse();
-
- answer = allocator()->Allocate();
- DeferredInlineSmiOperationReversed* deferred =
- new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- right.reg(),
- right_type_info,
- overwrite_mode);
- __ mov(answer.reg(), Immediate(int_value));
- __ sar(ecx, kSmiTagSize);
- if (!right_type_info.IsSmi()) {
- deferred->Branch(carry);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(right.reg());
- }
- __ shl_cl(answer.reg());
- __ cmp(answer.reg(), 0xc0000000);
- deferred->Branch(sign);
- __ SmiTag(answer.reg());
-
- deferred->BindExit();
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- if (shift_value == 0) {
- // Spill operand so it can be overwritten in the slow case.
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- deferred->BindExit();
- answer = *operand;
- } else {
- // Use a fresh temporary for nonzero shift values.
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- if (!operand->type_info().IsSmi()) {
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- __ mov(answer.reg(), operand->reg());
- STATIC_ASSERT(kSmiTag == 0); // adjust code if not the case
- // We do no shifts, only the Smi conversion, if shift_value is 1.
- if (shift_value > 1) {
- __ shl(answer.reg(), shift_value - 1);
- }
- // Convert int result to Smi, checking that it is in int range.
- STATIC_ASSERT(kSmiTagSize == 1); // adjust code if not the case
- __ add(answer.reg(), Operand(answer.reg()));
- deferred->Branch(overflow);
- deferred->BindExit();
- operand->Unuse();
- }
- }
- break;
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- operand->ToRegister();
- // DeferredInlineBinaryOperation requires all the registers that it is
- // told about to be spilled.
- frame_->Spill(operand->reg());
- DeferredInlineBinaryOperation* deferred = NULL;
- if (!operand->type_info().IsSmi()) {
- Result left = allocator()->Allocate();
- ASSERT(left.is_valid());
- Result right = allocator()->Allocate();
- ASSERT(right.is_valid());
- deferred = new DeferredInlineBinaryOperation(
- op,
- operand->reg(),
- left.reg(),
- right.reg(),
- operand->type_info(),
- TypeInfo::Smi(),
- overwrite_mode == NO_OVERWRITE ? NO_OVERWRITE : OVERWRITE_LEFT);
- __ test(operand->reg(), Immediate(kSmiTagMask));
- deferred->JumpToConstantRhs(not_zero, smi_value);
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(operand->reg());
- }
- if (op == Token::BIT_AND) {
- __ and_(Operand(operand->reg()), Immediate(value));
- } else if (op == Token::BIT_XOR) {
- if (int_value != 0) {
- __ xor_(Operand(operand->reg()), Immediate(value));
- }
- } else {
- ASSERT(op == Token::BIT_OR);
- if (int_value != 0) {
- __ or_(Operand(operand->reg()), Immediate(value));
- }
- }
- if (deferred != NULL) deferred->BindExit();
- answer = *operand;
- break;
- }
-
- case Token::DIV:
- if (!reversed && int_value == 2) {
- operand->ToRegister();
- frame_->Spill(operand->reg());
-
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- // Check that lowest log2(value) bits of operand are zero, and test
- // smi tag at the same time.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ test(operand->reg(), Immediate(3));
- deferred->Branch(not_zero); // Branch if non-smi or odd smi.
- __ sar(operand->reg(), 1);
- deferred->BindExit();
- answer = *operand;
- } else {
- // Cannot fall through MOD to default case, so we duplicate the
- // default case here.
- Result constant_operand(value);
- if (reversed) {
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
- overwrite_mode);
- }
- }
- break;
-
- // Generate inline code for mod of powers of 2 and negative powers of 2.
- case Token::MOD:
- if (!reversed &&
- int_value != 0 &&
- (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- operand->type_info(),
- smi_value,
- overwrite_mode);
- // Check for negative or non-Smi left hand side.
- __ test(operand->reg(), Immediate(kSmiTagMask | kSmiSignMask));
- deferred->Branch(not_zero);
- if (int_value < 0) int_value = -int_value;
- if (int_value == 1) {
- __ mov(operand->reg(), Immediate(Smi::FromInt(0)));
- } else {
- __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
- }
- deferred->BindExit();
- answer = *operand;
- break;
- }
- // Fall through if we did not find a power of 2 on the right hand side!
- // The next case must be the default.
-
- default: {
- Result constant_operand(value);
- if (reversed) {
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
- overwrite_mode);
- }
- break;
- }
- }
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-static bool CouldBeNaN(const Result& result) {
- if (result.type_info().IsSmi()) return false;
- if (result.type_info().IsInteger32()) return false;
- if (!result.is_constant()) return true;
- if (!result.handle()->IsHeapNumber()) return false;
- return isnan(HeapNumber::cast(*result.handle())->value());
-}
-
-
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
- switch (cc) {
- case less: return below;
- case equal: return equal;
- case less_equal: return below_equal;
- case greater: return above;
- case greater_equal: return above_equal;
- default: UNREACHABLE();
- }
- UNREACHABLE();
- return equal;
-}
-
-
-static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
- bool inline_number_compare) {
- CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
- if (nan_info == kCantBothBeNaN) {
- flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
- }
- if (inline_number_compare) {
- flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
- }
- return flags;
-}
-
-
-void CodeGenerator::Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* dest) {
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cc == equal);
-
- Result left_side;
- Result right_side;
- // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cc == greater || cc == less_equal) {
- cc = ReverseCondition(cc);
- left_side = frame_->Pop();
- right_side = frame_->Pop();
- } else {
- right_side = frame_->Pop();
- left_side = frame_->Pop();
- }
- ASSERT(cc == less || cc == equal || cc == greater_equal);
-
- // If either side is a constant smi, optimize the comparison.
- bool left_side_constant_smi = false;
- bool left_side_constant_null = false;
- bool left_side_constant_1_char_string = false;
- if (left_side.is_constant()) {
- left_side_constant_smi = left_side.handle()->IsSmi();
- left_side_constant_null = left_side.handle()->IsNull();
- left_side_constant_1_char_string =
- (left_side.handle()->IsString() &&
- String::cast(*left_side.handle())->length() == 1 &&
- String::cast(*left_side.handle())->IsAsciiRepresentation());
- }
- bool right_side_constant_smi = false;
- bool right_side_constant_null = false;
- bool right_side_constant_1_char_string = false;
- if (right_side.is_constant()) {
- right_side_constant_smi = right_side.handle()->IsSmi();
- right_side_constant_null = right_side.handle()->IsNull();
- right_side_constant_1_char_string =
- (right_side.handle()->IsString() &&
- String::cast(*right_side.handle())->length() == 1 &&
- String::cast(*right_side.handle())->IsAsciiRepresentation());
- }
-
- if (left_side_constant_smi || right_side_constant_smi) {
- bool is_loop_condition = (node->AsExpression() != NULL) &&
- node->AsExpression()->is_loop_condition();
- ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
- left_side_constant_smi, right_side_constant_smi,
- is_loop_condition);
- } else if (left_side_constant_1_char_string ||
- right_side_constant_1_char_string) {
- if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
- // Trivial case, comparing two constants.
- int left_value = String::cast(*left_side.handle())->Get(0);
- int right_value = String::cast(*right_side.handle())->Get(0);
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant 1 character string.
- // If left side is a constant 1-character string, reverse the operands.
- // Since one side is a constant string, conversion order does not matter.
- if (left_side_constant_1_char_string) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may reintroduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant string, inlining the case
- // where both sides are strings.
- left_side.ToRegister();
-
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_not_string, is_string;
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
- ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
- __ test(left_side.reg(), Immediate(kSmiTagMask));
- is_not_string.Branch(zero, &left_side);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(left_side.reg(), HeapObject::kMapOffset));
- __ movzx_b(temp.reg(),
- FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
- // If we are testing for equality then make use of the symbol shortcut.
- // Check if the right left hand side has the same type as the left hand
- // side (which is always a symbol).
- if (cc == equal) {
- Label not_a_symbol;
- STATIC_ASSERT(kSymbolTag != 0);
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- __ test(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
- __ j(zero, &not_a_symbol);
- // They are symbols, so do identity compare.
- __ cmp(left_side.reg(), right_side.handle());
- dest->true_target()->Branch(equal);
- dest->false_target()->Branch(not_equal);
- __ bind(&not_a_symbol);
- }
- // Call the compare stub if the left side is not a flat ascii string.
- __ and_(temp.reg(),
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- __ cmp(temp.reg(), kStringTag | kSeqStringTag | kAsciiStringTag);
- temp.Unuse();
- is_string.Branch(equal, &left_side);
-
- // Setup and call the compare stub.
- is_not_string.Bind(&left_side);
- CompareFlags flags =
- static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_COMPARE_IN_STUB);
- CompareStub stub(cc, strict, flags);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ cmp(result.reg(), 0);
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_string.Bind(&left_side);
- // left_side is a sequential ASCII string.
- left_side = Result(left_reg);
- right_side = Result(right_val);
- // Test string equality and comparison.
- Label comparison_done;
- if (cc == equal) {
- __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ j(not_equal, &comparison_done);
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
- char_value);
- } else {
- __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- // If the length is 0 then the jump is taken and the flags
- // correctly represent being less than the one-character string.
- __ j(below, &comparison_done);
- // Compare the first character of the string with the
- // constant 1-character string.
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
- char_value);
- __ j(not_equal, &comparison_done);
- // If the first character is the same then the long string sorts after
- // the short one.
- __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ bind(&comparison_done);
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
- } else {
- // Neither side is a constant Smi, constant 1-char string or constant null.
- // If either side is a non-smi constant, or known to be a heap number,
- // skip the smi check.
- bool known_non_smi =
- (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
- (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
- left_side.type_info().IsDouble() ||
- right_side.type_info().IsDouble();
-
- NaNInformation nan_info =
- (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
- kBothCouldBeNaN :
- kCantBothBeNaN;
-
- // Inline number comparison handling any combination of smi's and heap
- // numbers if:
- // code is in a loop
- // the compare operation is different from equal
- // compare is not a for-loop comparison
- // The reason for excluding equal is that it will most likely be done
- // with smi's (not heap numbers) and the code to comparing smi's is inlined
- // separately. The same reason applies for for-loop comparison which will
- // also most likely be smi comparisons.
- bool is_loop_condition = (node->AsExpression() != NULL)
- && node->AsExpression()->is_loop_condition();
- bool inline_number_compare =
- loop_nesting() > 0 && cc != equal && !is_loop_condition;
-
- // Left and right needed in registers for the following code.
- left_side.ToRegister();
- right_side.ToRegister();
-
- if (known_non_smi) {
- // Inlined equality check:
- // If at least one of the objects is not NaN, then if the objects
- // are identical, they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmp(left_side.reg(), Operand(right_side.reg()));
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
- CompareStub stub(cc, strict, flags);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ test(answer.reg(), Operand(answer.reg()));
- answer.Unuse();
- dest->Split(cc);
- } else {
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
- Register left_reg = left_side.reg();
- Register right_reg = right_side.reg();
-
- // In-line check for comparing two smis.
- JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
-
- if (has_valid_frame()) {
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmp(left_side.reg(), Operand(right_side.reg()));
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareFlags flags =
- ComputeCompareFlags(nan_info, inline_number_compare);
- CompareStub stub(cc, strict, flags);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ test(answer.reg(), Operand(answer.reg()));
- answer.Unuse();
- if (is_smi.is_linked()) {
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
- } else {
- dest->Split(cc);
- }
- }
-
- if (is_smi.is_linked()) {
- is_smi.Bind();
- left_side = Result(left_reg);
- right_side = Result(right_reg);
- __ cmp(left_side.reg(), Operand(right_side.reg()));
- right_side.Unuse();
- left_side.Unuse();
- dest->Split(cc);
- }
- }
- }
-}
-
-
-void CodeGenerator::ConstantSmiComparison(Condition cc,
- bool strict,
- ControlDestination* dest,
- Result* left_side,
- Result* right_side,
- bool left_side_constant_smi,
- bool right_side_constant_smi,
- bool is_loop_condition) {
- if (left_side_constant_smi && right_side_constant_smi) {
- // Trivial case, comparing two constants.
- int left_value = Smi::cast(*left_side->handle())->value();
- int right_value = Smi::cast(*right_side->handle())->value();
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant Smi.
- // If left side is a constant Smi, reverse the operands.
- // Since one side is a constant Smi, conversion order does not matter.
- if (left_side_constant_smi) {
- Result* temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may re-introduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant Smi, inlining the case
- // where both sides are Smis.
- left_side->ToRegister();
- Register left_reg = left_side->reg();
- Handle<Object> right_val = right_side->handle();
-
- if (left_side->is_smi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left_reg);
- }
- // Test smi equality and comparison by signed int comparison.
- if (IsUnsafeSmi(right_side->handle())) {
- right_side->ToRegister();
- __ cmp(left_reg, Operand(right_side->reg()));
- } else {
- __ cmp(Operand(left_reg), Immediate(right_side->handle()));
- }
- left_side->Unuse();
- right_side->Unuse();
- dest->Split(cc);
- } else {
- // Only the case where the left side could possibly be a non-smi is left.
- JumpTarget is_smi;
- if (cc == equal) {
- // We can do the equality comparison before the smi check.
- __ cmp(Operand(left_reg), Immediate(right_side->handle()));
- dest->true_target()->Branch(equal);
- __ test(left_reg, Immediate(kSmiTagMask));
- dest->false_target()->Branch(zero);
- } else {
- // Do the smi check, then the comparison.
- __ test(left_reg, Immediate(kSmiTagMask));
- is_smi.Branch(zero, left_side, right_side);
- }
-
- // Jump or fall through to here if we are comparing a non-smi to a
- // constant smi. If the non-smi is a heap number and this is not
- // a loop condition, inline the floating point code.
- if (!is_loop_condition && CpuFeatures::IsSupported(SSE2)) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- CpuFeatures::Scope use_sse2(SSE2);
- JumpTarget not_number;
- __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- not_number.Branch(not_equal, left_side);
- __ movdbl(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = Smi::cast(*right_val)->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ mov(temp.reg(), Immediate(value));
- __ cvtsi2sd(xmm0, Operand(temp.reg()));
- temp.Unuse();
- }
- __ ucomisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, left_side);
- left_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
- not_number.Bind(left_side);
- }
-
- // Setup and call the compare stub.
- CompareFlags flags =
- static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
- CompareStub stub(cc, strict, flags);
- Result result = frame_->CallStub(&stub, left_side, right_side);
- result.ToRegister();
- __ test(result.reg(), Operand(result.reg()));
- result.Unuse();
- if (cc == equal) {
- dest->Split(cc);
- } else {
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- // It is important for performance for this case to be at the end.
- is_smi.Bind(left_side, right_side);
- if (IsUnsafeSmi(right_side->handle())) {
- right_side->ToRegister();
- __ cmp(left_reg, Operand(right_side->reg()));
- } else {
- __ cmp(Operand(left_reg), Immediate(right_side->handle()));
- }
- left_side->Unuse();
- right_side->Unuse();
- dest->Split(cc);
- }
- }
- }
-}
-
-
-// Check that the comparison operand is a number. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void CheckComparisonOperand(MacroAssembler* masm_,
- Result* operand,
- Result* left_side,
- Result* right_side,
- JumpTarget* not_numbers) {
- // Perform check if operand is not known to be a number.
- if (!operand->type_info().IsNumber()) {
- Label done;
- __ test(operand->reg(), Immediate(kSmiTagMask));
- __ j(zero, &done);
- __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- not_numbers->Branch(not_equal, left_side, right_side, not_taken);
- __ bind(&done);
- }
-}
-
-
-// Load a comparison operand to the FPU stack. This assumes that the operand has
-// already been checked and is a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
- Result* operand) {
- Label done;
- if (operand->type_info().IsDouble()) {
- // Operand is known to be a heap number, just load it.
- __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- } else if (operand->type_info().IsSmi()) {
- // Operand is known to be a smi. Convert it to double and keep the original
- // smi.
- __ SmiUntag(operand->reg());
- __ push(operand->reg());
- __ fild_s(Operand(esp, 0));
- __ pop(operand->reg());
- __ SmiTag(operand->reg());
- } else {
- // Operand type not known, check for smi otherwise assume heap number.
- Label smi;
- __ test(operand->reg(), Immediate(kSmiTagMask));
- __ j(zero, &smi);
- __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- __ jmp(&done);
- __ bind(&smi);
- __ SmiUntag(operand->reg());
- __ push(operand->reg());
- __ fild_s(Operand(esp, 0));
- __ pop(operand->reg());
- __ SmiTag(operand->reg());
- __ jmp(&done);
- }
- __ bind(&done);
-}
-
-
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
- Result* operand,
- XMMRegister xmm_reg,
- Result* left_side,
- Result* right_side,
- JumpTarget* not_numbers) {
- Label done;
- if (operand->type_info().IsDouble()) {
- // Operand is known to be a heap number, just load it.
- __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- } else if (operand->type_info().IsSmi()) {
- // Operand is known to be a smi. Convert it to double and keep the original
- // smi.
- __ SmiUntag(operand->reg());
- __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
- __ SmiTag(operand->reg());
- } else {
- // Operand type not known, check for smi or heap number.
- Label smi;
- __ test(operand->reg(), Immediate(kSmiTagMask));
- __ j(zero, &smi);
- if (!operand->type_info().IsNumber()) {
- __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- not_numbers->Branch(not_equal, left_side, right_side, taken);
- }
- __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&smi);
- // Comvert smi to float and keep the original smi.
- __ SmiUntag(operand->reg());
- __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
- __ SmiTag(operand->reg());
- __ jmp(&done);
- }
- __ bind(&done);
-}
-
-
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest) {
- ASSERT(left_side->is_register());
- ASSERT(right_side->is_register());
-
- JumpTarget not_numbers;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
-
- // Load left and right operand into registers xmm0 and xmm1 and compare.
- LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side,
- &not_numbers);
- LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
- &not_numbers);
- __ ucomisd(xmm0, xmm1);
- } else {
- Label check_right, compare;
-
- // Make sure that both comparison operands are numbers.
- CheckComparisonOperand(masm_, left_side, left_side, right_side,
- &not_numbers);
- CheckComparisonOperand(masm_, right_side, left_side, right_side,
- &not_numbers);
-
- // Load right and left operand to FPU stack and compare.
- LoadComparisonOperand(masm_, right_side);
- LoadComparisonOperand(masm_, left_side);
- __ FCmp();
- }
-
- // Bail out if a NaN is involved.
- not_numbers.Branch(parity_even, left_side, right_side, not_taken);
-
- // Split to destination targets based on comparison.
- left_side->Unuse();
- right_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
-
- not_numbers.Bind(left_side, right_side);
-}
-
-
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- // Push the arguments ("left-to-right") on the stack.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Record the position for debugging purposes.
- CodeForSourcePosition(position);
-
- // Use the shared code stub to call the function.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, flags);
- Result answer = frame_->CallStub(&call_function, arg_count + 1);
- // Restore context and replace function on the stack with the
- // result of the stub invocation.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &answer);
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position) {
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments).
- // If the arguments object of the scope has not been allocated,
- // and x.apply is Function.prototype.apply, this optimization
- // just copies y and the arguments of the current function on the
- // stack, as receiver and arguments, and calls x.
- // In the implementation comments, we call x the applicand
- // and y the receiver.
- ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
- ASSERT(arguments->IsArguments());
-
- // Load applicand.apply onto the stack. This will usually
- // give us a megamorphic load site. Not super, but it works.
- Load(applicand);
- frame()->Dup();
- Handle<String> name = Factory::LookupAsciiSymbol("apply");
- frame()->Push(name);
- Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
- __ nop();
- frame()->Push(&answer);
-
- // Load the receiver and the existing arguments object onto the
- // expression stack. Avoid allocating the arguments object here.
- Load(receiver);
- LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
- // Emit the source position information after having loaded the
- // receiver and the arguments.
- CodeForSourcePosition(position);
- // Contents of frame at this point:
- // Frame[0]: arguments object of the current function or the hole.
- // Frame[1]: receiver
- // Frame[2]: applicand.apply
- // Frame[3]: applicand.
-
- // Check if the arguments object has been lazily allocated
- // already. If so, just use that instead of copying the arguments
- // from the stack. This also deals with cases where a local variable
- // named 'arguments' has been introduced.
- frame_->Dup();
- Result probe = frame_->Pop();
- { VirtualFrame::SpilledScope spilled_scope;
- Label slow, done;
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsArgumentsMarker();
- } else {
- __ cmp(Operand(probe.reg()), Immediate(Factory::arguments_marker()));
- probe.Unuse();
- __ j(not_equal, &slow);
- }
-
- if (try_lazy) {
- Label build_args;
- // Get rid of the arguments object probe.
- frame_->Drop(); // Can be called on a spilled frame.
- // Stack now has 3 elements on it.
- // Contents of stack at this point:
- // esp[0]: receiver
- // esp[1]: applicand.apply
- // esp[2]: applicand.
-
- // Check that the receiver really is a JavaScript object.
- __ mov(eax, Operand(esp, 0));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &build_args);
- // We allow all JSObjects including JSFunctions. As long as
- // JS_FUNCTION_TYPE is the last instance type and it is right
- // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
- // bound.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &build_args);
-
- // Check that applicand.apply is Function.prototype.apply.
- __ mov(eax, Operand(esp, kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &build_args);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &build_args);
- __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset));
- __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ cmp(Operand(ecx), Immediate(apply_code));
- __ j(not_equal, &build_args);
-
- // Check that applicand is a function.
- __ mov(edi, Operand(esp, 2 * kPointerSize));
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &build_args);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &build_args);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ mov(eax, Immediate(scope()->num_parameters()));
- for (int i = 0; i < scope()->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(eax);
- __ mov(ecx, Operand(eax));
- __ cmp(eax, kArgumentsLimit);
- __ j(above, &build_args);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- // ecx is a small non-negative integer, due to the test above.
- __ test(ecx, Operand(ecx));
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
- // Drop applicand.apply and applicand from the stack, and push
- // the result of the function call, but leave the spilled frame
- // unchanged, with 3 elements, so it is correct when we compile the
- // slow-case code.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- __ push(eax);
- // Stack now has 1 element:
- // esp[0]: result
- __ jmp(&done);
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // applicand.apply.
- __ bind(&build_args);
- // Stack now has 3 elements, because we have jumped from where:
- // esp[0]: receiver
- // esp[1]: applicand.apply
- // esp[2]: applicand.
-
- // StoreArgumentsObject requires a correct frame, and may modify it.
- Result arguments_object = StoreArgumentsObject(false);
- frame_->SpillAll();
- arguments_object.ToRegister();
- frame_->EmitPush(arguments_object.reg());
- arguments_object.Unuse();
- // Stack and frame now have 4 elements.
- __ bind(&slow);
- }
-
- // Generic computation of x.apply(y, args) with no special optimization.
- // Flip applicand.apply and applicand on the stack, so
- // applicand looks like the receiver of the applicand.apply call.
- // Then process it as a normal function call.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
-
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- // The function and its two arguments have been dropped.
- frame_->Drop(1); // Drop the receiver as well.
- res.ToRegister();
- frame_->EmitPush(res.reg());
- // Stack now has 1 element:
- // esp[0]: result
- if (try_lazy) __ bind(&done);
- } // End of spilled scope.
- // Restore the context register after a call.
- frame_->RestoreContextRegister();
-}
-
-
-class DeferredStackCheck: public DeferredCode {
- public:
- DeferredStackCheck() {
- set_comment("[ DeferredStackCheck");
- }
-
- virtual void Generate();
-};
-
-
-void DeferredStackCheck::Generate() {
- StackCheckStub stub;
- __ CallStub(&stub);
-}
-
-
-void CodeGenerator::CheckStack() {
- DeferredStackCheck* deferred = new DeferredStackCheck;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- deferred->Branch(below);
- deferred->BindExit();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Visit(statement);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- VisitStatements(statements);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
- for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
- Visit(statements->at(i));
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ Block");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- VisitStatements(node->statements());
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals. The inevitable call
- // will sync frame elements to memory anyway, so we do it eagerly to
- // allow us to push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- frame_->EmitPush(esi); // The context is the first argument.
- frame_->EmitPush(Immediate(pairs));
- frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
- frame_->EmitPush(Immediate(Smi::FromInt(strict_mode_flag())));
- Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
- // Return value is ignored.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = node->proxy()->var();
- ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->AsSlot();
-
- // If it was not possible to allocate the variable at compile time,
- // we need to "declare" it at runtime to make sure it actually
- // exists in the local context.
- if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Variables with a "LOOKUP" slot were introduced as non-locals
- // during variable resolution and must have mode DYNAMIC.
- ASSERT(var->is_dynamic());
- // For now, just do a runtime call. Sync the virtual frame eagerly
- // so we can simply push the arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(var->name()));
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
- PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->EmitPush(Immediate(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (node->mode() == Variable::CONST) {
- frame_->EmitPush(Immediate(Factory::the_hole_value()));
- } else if (node->fun() != NULL) {
- Load(node->fun());
- } else {
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
- }
- Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
- // Ignore the return value (declarations are statements).
- return;
- }
-
- ASSERT(!var->is_global());
-
- // If we have a function or a constant, we need to initialize the variable.
- Expression* val = NULL;
- if (node->mode() == Variable::CONST) {
- val = new Literal(Factory::the_hole_value());
- } else {
- val = node->fun(); // NULL if we don't have a function
- }
-
- if (val != NULL) {
- {
- // Set the initial value.
- Reference target(this, node->proxy());
- Load(val);
- target.SetValue(NOT_CONST_INIT);
- // The reference is removed from the stack (preserving TOS) when
- // it goes out of scope.
- }
- // Get rid of the assigned value (declarations are statements).
- frame_->Drop();
- }
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- Load(expression);
- // Remove the lingering expression result from the top of stack.
- frame_->Drop();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "// EmptyStatement");
- CodeForStatementPosition(node);
- // nothing to do
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ IfStatement");
- // Generate different code depending on which parts of the if statement
- // are present or not.
- bool has_then_stm = node->HasThenStatement();
- bool has_else_stm = node->HasElseStatement();
-
- CodeForStatementPosition(node);
- JumpTarget exit;
- if (has_then_stm && has_else_stm) {
- JumpTarget then;
- JumpTarget else_;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Visit(node->else_statement());
-
- // We may have dangling jumps to the then part.
- if (then.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Visit(node->then_statement());
-
- if (else_.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- }
-
- } else if (has_then_stm) {
- ASSERT(!has_else_stm);
- JumpTarget then;
- ControlDestination dest(&then, &exit, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // then part.
- if (then.is_linked()) {
- exit.Unuse();
- exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then label was bound.
- Visit(node->then_statement());
- }
-
- } else if (has_else_stm) {
- ASSERT(!has_then_stm);
- JumpTarget else_;
- ControlDestination dest(&exit, &else_, false);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.true_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // else part.
- if (else_.is_linked()) {
- exit.Unuse();
- exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- } else {
- // The else label was bound.
- Visit(node->else_statement());
- }
-
- } else {
- ASSERT(!has_then_stm && !has_else_stm);
- // We only care about the condition's side effects (not its value
- // or control flow effect). LoadCondition is called without
- // forcing control flow.
- ControlDestination dest(&exit, &exit, true);
- LoadCondition(node->condition(), &dest, false);
- if (!dest.is_used()) {
- // We got a value on the frame rather than (or in addition to)
- // control flow.
- frame_->Drop();
- }
- }
-
- if (exit.is_linked()) {
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ContinueStatement");
- CodeForStatementPosition(node);
- node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ BreakStatement");
- CodeForStatementPosition(node);
- node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ReturnStatement");
-
- CodeForStatementPosition(node);
- Load(node->expression());
- Result return_value = frame_->Pop();
- masm()->positions_recorder()->WriteRecordedPositions();
- if (function_return_is_shadowed_) {
- function_return_.Jump(&return_value);
- } else {
- frame_->PrepareForReturn();
- if (function_return_.is_bound()) {
- // If the function return label is already bound we reuse the
- // code by jumping to the return site.
- function_return_.Jump(&return_value);
- } else {
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
- }
- }
-}
-
-
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
- // The return value is a live (but not currently reference counted)
- // reference to eax. This is safe because the current frame does not
- // contain a reference to eax (it is prepared for the return by spilling
- // all registers).
- if (FLAG_trace) {
- frame_->Push(return_value);
- *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
- return_value->ToRegister(eax);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
-
- // Leave the frame and return popping the arguments and the
- // receiver.
- frame_->Exit();
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
- __ Ret(arguments_bytes, ecx);
- DeleteFrame();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithEnterStatement");
- CodeForStatementPosition(node);
- Load(node->expression());
- Result context;
- if (node->is_catch_block()) {
- context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kPushContext, 1);
- }
-
- // Update context local.
- frame_->SaveContextRegister();
-
- // Verify that the runtime call result and esi agree.
- if (FLAG_debug_code) {
- __ cmp(context.reg(), Operand(esi));
- __ Assert(equal, "Runtime::NewContext should end up in esi");
- }
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithExitStatement");
- CodeForStatementPosition(node);
- // Pop context.
- __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
- // Update context local.
- frame_->SaveContextRegister();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ SwitchStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- // Compile the switch value.
- Load(node->tag());
-
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
- CaseClause* default_clause = NULL;
-
- JumpTarget next_test;
- // Compile the case label expressions and comparisons. Exit early
- // if a comparison is unconditionally true. The target next_test is
- // bound before the loop in order to indicate control flow to the
- // first comparison.
- next_test.Bind();
- for (int i = 0; i < length && !next_test.is_unused(); i++) {
- CaseClause* clause = cases->at(i);
- // The default is not a test, but remember it for later.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- // We recycle the same target next_test for each test. Bind it if
- // the previous test has not done so and then unuse it for the
- // loop.
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- next_test.Unuse();
-
- // Duplicate the switch value.
- frame_->Dup();
-
- // Compile the label expression.
- Load(clause->label());
-
- // Compare and branch to the body if true or the next test if
- // false. Prefer the next test as a fall through.
- ControlDestination dest(clause->body_target(), &next_test, false);
- Comparison(node, equal, true, &dest);
-
- // If the comparison fell through to the true target, jump to the
- // actual body.
- if (dest.true_was_fall_through()) {
- clause->body_target()->Unuse();
- clause->body_target()->Jump();
- }
- }
-
- // If there was control flow to a next test from the last one
- // compiled, compile a jump to the default or break target.
- if (!next_test.is_unused()) {
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- // Drop the switch value.
- frame_->Drop();
- if (default_clause != NULL) {
- default_clause->body_target()->Jump();
- } else {
- node->break_target()->Jump();
- }
- }
-
- // The last instruction emitted was a jump, either to the default
- // clause or the break target, or else to a case body from the loop
- // that compiles the tests.
- ASSERT(!has_valid_frame());
- // Compile case bodies as needed.
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
-
- // There are two ways to reach the body: from the corresponding
- // test or as the fall through of the previous body.
- if (clause->body_target()->is_linked() || has_valid_frame()) {
- if (clause->body_target()->is_linked()) {
- if (has_valid_frame()) {
- // If we have both a jump to the test and a fall through, put
- // a jump on the fall through path to avoid the dropping of
- // the switch value on the test path. The exception is the
- // default which has already had the switch value dropped.
- if (clause->is_default()) {
- clause->body_target()->Bind();
- } else {
- JumpTarget body;
- body.Jump();
- clause->body_target()->Bind();
- frame_->Drop();
- body.Bind();
- }
- } else {
- // No fall through to worry about.
- clause->body_target()->Bind();
- if (!clause->is_default()) {
- frame_->Drop();
- }
- }
- } else {
- // Otherwise, we have only fall through.
- ASSERT(has_valid_frame());
- }
-
- // We are now prepared to compile the body.
- Comment cmnt(masm_, "[ Case body");
- VisitStatements(clause->statements());
- }
- clause->body_target()->Unuse();
- }
-
- // We may not have a valid frame here so bind the break target only
- // if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DoWhileStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- IncrementLoopNesting();
-
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- // Label the top of the loop for the backward jump if necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // Use the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case ALWAYS_FALSE:
- // No need to label it.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- break;
- case DONT_KNOW:
- // Continue is the test, so use the backward body target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- body.Bind();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Compile the test.
- switch (info) {
- case ALWAYS_TRUE:
- // If control flow can fall off the end of the body, jump back
- // to the top and bind the break target at the exit.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- case ALWAYS_FALSE:
- // We may have had continues or breaks in the body.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- case DONT_KNOW:
- // We have to compile the test expression if it can be reached by
- // control flow falling out of the body or via continue.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- Comment cmnt(masm_, "[ DoWhileCondition");
- CodeForDoWhileConditionPosition(node);
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- }
-
- DecrementLoopNesting();
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WhileStatement");
- CodeForStatementPosition(node);
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop with the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is the test at the bottom, no need to label the test
- // at the top. The body is a backward target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else {
- // Label the test at the top as the continue target. The body
- // is a forward-only target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- }
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // The loop body has been labeled with the continue target.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- // If we have chosen to recompile the test at the bottom,
- // then it is the continue target.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here and thus an invalid fall-through).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // If we have chosen not to recompile the test at the bottom,
- // jump back to the one at the top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
- ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
- if (slot->type() == Slot::LOCAL) {
- frame_->SetTypeForLocalAt(slot->index(), info);
- } else {
- frame_->SetTypeForParamAt(slot->index(), info);
- }
- if (FLAG_debug_code && info.IsSmi()) {
- if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
- } else {
- frame_->PushParameterAt(slot->index());
- }
- Result var = frame_->Pop();
- var.ToRegister();
- __ AbortIfNotSmi(var.reg());
- }
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ForStatement");
- CodeForStatementPosition(node);
-
- // Compile the init expression if present.
- if (node->init() != NULL) {
- Visit(node->init());
- }
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything else.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
-
- // Target for backward edge if no test at the bottom, otherwise
- // unused.
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
- // Target for backward edge if there is a test at the bottom,
- // otherwise used as target for test at the top.
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop.
- if (node->next() == NULL) {
- // Use the continue target if there is no update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // Otherwise use the backward loop target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is either the update expression or the test at the
- // bottom, no need to label the test at the top.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else if (node->next() == NULL) {
- // We are not recompiling the test at the bottom and there is no
- // update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // We are not recompiling the test at the bottom and there is an
- // update expression.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
-
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
-
- // We know that the loop index is a smi if it is not modified in the
- // loop body and it is checked against a constant limit in the loop
- // condition. In this case, we reset the static type information of the
- // loop index to smi before compiling the body, the update expression, and
- // the bottom check of the loop condition.
- if (node->is_fast_smi_loop()) {
- // Set number type of the loop variable to smi.
- SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
- }
-
- Visit(node->body());
-
- // If there is an update expression, compile it if necessary.
- if (node->next() != NULL) {
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
-
- // Control can reach the update by falling out of the body or by a
- // continue.
- if (has_valid_frame()) {
- // Record the source position of the statement as this code which
- // is after the code for the body actually belongs to the loop
- // statement and not the body.
- CodeForStatementPosition(node);
- Visit(node->next());
- }
- }
-
- // Set the type of the loop variable to smi before compiling the test
- // expression if we are in a fast smi loop condition.
- if (node->is_fast_smi_loop() && has_valid_frame()) {
- // Set number type of the loop variable to smi.
- SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
- }
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- if (node->continue_target()->is_linked()) {
- // We can have dangling jumps to the continue target if there
- // was no update expression.
- node->continue_target()->Bind();
- }
- // Control can reach the test at the bottom by falling out of
- // the body, by a continue in the body, or from the update
- // expression.
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // Otherwise, jump back to the test at the top.
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ForInStatement");
- CodeForStatementPosition(node);
-
- JumpTarget primitive;
- JumpTarget jsobject;
- JumpTarget fixed_array;
- JumpTarget entry(JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check;
- JumpTarget exit;
-
- // Get the object to enumerate over (converted to JSObject).
- LoadAndSpill(node->enumerable());
-
- // Both SpiderMonkey and kjs ignore null and undefined in contrast
- // to the specification. 12.6.4 mandates a call to ToObject.
- frame_->EmitPop(eax);
-
- // eax: value to be iterated over
- __ cmp(eax, Factory::undefined_value());
- exit.Branch(equal);
- __ cmp(eax, Factory::null_value());
- exit.Branch(equal);
-
- // Stack layout in body:
- // [iteration counter (smi)] <- slot 0
- // [length of array] <- slot 1
- // [FixedArray] <- slot 2
- // [Map or 0] <- slot 3
- // [Object] <- slot 4
-
- // Check if enumerable is already a JSObject
- // eax: value to be iterated over
- __ test(eax, Immediate(kSmiTagMask));
- primitive.Branch(zero);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- jsobject.Branch(above_equal);
-
- primitive.Bind();
- frame_->EmitPush(eax);
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
- // function call returns the value in eax, which is where we want it below
-
- jsobject.Bind();
- // Get the set of properties (as a FixedArray or Map).
- // eax: value to be iterated over
- frame_->EmitPush(eax); // Push the object being iterated over.
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- JumpTarget call_runtime;
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- JumpTarget check_prototype;
- JumpTarget use_cache;
- __ mov(ecx, eax);
- loop.Bind();
- // Check that there are no elements.
- __ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset));
- __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
- call_runtime.Branch(not_equal);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in ebx for the subsequent
- // prototype load.
- __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
- __ cmp(Operand(edx), Immediate(Factory::empty_descriptor_array()));
- call_runtime.Branch(equal);
- // Check that there in an enum cache in the non-empty instance
- // descriptors. This is the case if the next enumeration index
- // field does not contain a smi.
- __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
- __ test(edx, Immediate(kSmiTagMask));
- call_runtime.Branch(zero);
- // For all objects but the receiver, check that the cache is empty.
- __ cmp(ecx, Operand(eax));
- check_prototype.Branch(equal);
- __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
- call_runtime.Branch(not_equal);
- check_prototype.Bind();
- // Load the prototype from the map and loop if non-null.
- __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ cmp(Operand(ecx), Immediate(Factory::null_value()));
- loop.Branch(not_equal);
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- use_cache.Jump();
-
- call_runtime.Bind();
- // Call the runtime to get the property names for the object.
- frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call
- frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- // eax: map or fixed array (result from call to
- // Runtime::kGetPropertyNamesFast)
- __ mov(edx, Operand(eax));
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ecx, Factory::meta_map());
- fixed_array.Branch(not_equal);
-
- use_cache.Bind();
- // Get enum cache
- // eax: map (either the result from a call to
- // Runtime::kGetPropertyNamesFast or has been fetched directly from
- // the object)
- __ mov(ecx, Operand(eax));
-
- __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
- // Get the bridge array held in the enumeration index field.
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
- // Get the cache from the bridge array.
- __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- frame_->EmitPush(eax); // <- slot 3
- frame_->EmitPush(edx); // <- slot 2
- __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- frame_->EmitPush(eax); // <- slot 1
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
- entry.Jump();
-
- fixed_array.Bind();
- // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
- frame_->EmitPush(eax); // <- slot 2
-
- // Push the length of the array and the initial index onto the stack.
- __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- frame_->EmitPush(eax); // <- slot 1
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
-
- // Condition.
- entry.Bind();
- // Grab the current frame's height for the break and continue
- // targets only after all the state is pushed on the frame.
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- __ mov(eax, frame_->ElementAt(0)); // load the current count
- __ cmp(eax, frame_->ElementAt(1)); // compare to the array length
- node->break_target()->Branch(above_equal);
-
- // Get the i'th entry of the array.
- __ mov(edx, frame_->ElementAt(2));
- __ mov(ebx, FixedArrayElementOperand(edx, eax));
-
- // Get the expected map from the stack or a zero map in the
- // permanent slow case eax: current iteration count ebx: i'th entry
- // of the enum cache
- __ mov(edx, frame_->ElementAt(3));
- // Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
- // eax: current iteration count
- // ebx: i'th entry of the enum cache
- // edx: expected map value
- __ mov(ecx, frame_->ElementAt(4));
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ cmp(ecx, Operand(edx));
- end_del_check.Branch(equal);
-
- // Convert the entry to a string (or null if it isn't a property anymore).
- frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
- frame_->EmitPush(ebx); // push entry
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
- __ mov(ebx, Operand(eax));
-
- // If the property has been removed while iterating, we just skip it.
- __ test(ebx, Operand(ebx));
- node->continue_target()->Branch(equal);
-
- end_del_check.Bind();
- // Store the entry in the 'each' expression and take another spin in the
- // loop. edx: i'th entry of the enum cache (or string there of)
- frame_->EmitPush(ebx);
- { Reference each(this, node->each());
- if (!each.is_illegal()) {
- if (each.size() > 0) {
- // Loading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
- // Get the value (under the reference on the stack) from memory.
- frame_->EmitPush(frame_->ElementAt(each.size()));
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(2);
- } else {
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop();
- }
- }
- }
- // Unloading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
-
- // Body.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
-
- // Next. Reestablish a spilled frame in case we are coming here via
- // a continue in the body.
- node->continue_target()->Bind();
- frame_->SpillAll();
- frame_->EmitPop(eax);
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
- frame_->EmitPush(eax);
- entry.Jump();
-
- // Cleanup. No need to spill because VirtualFrame::Drop is safe for
- // any frame.
- node->break_target()->Bind();
- frame_->Drop(5);
-
- // Exit.
- exit.Bind();
-
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryCatchStatement");
- CodeForStatementPosition(node);
-
- JumpTarget try_block;
- JumpTarget exit;
-
- try_block.Call();
- // --- Catch block ---
- frame_->EmitPush(eax);
-
- // Store the caught exception in the catch variable.
- Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
- StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
- // Remove the exception from the stack.
- frame_->Drop();
-
- VisitStatementsAndSpill(node->catch_block()->statements());
- if (has_valid_frame()) {
- exit.Jump();
- }
-
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_CATCH_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- bool has_unlinks = false;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- has_unlinks = has_unlinks || shadows[i]->is_linked();
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
-
- // Make sure that there's nothing left on the stack above the
- // handler structure.
- if (FLAG_debug_code) {
- __ mov(eax, Operand::StaticVariable(handler_address));
- __ cmp(esp, Operand(eax));
- __ Assert(equal, "stack pointer should point to top handler");
- }
-
- // If we can fall off the end of the try block, unlink from try chain.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame. Unlink from
- // the handler list and drop the rest of this handler from the
- // frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(Operand::StaticVariable(handler_address));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- if (has_unlinks) {
- exit.Jump();
- }
- }
-
- // Generate unlink code for the (formerly) shadowing targets that
- // have been jumped to. Deallocate each shadow target.
- Result return_value;
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // Unlink from try chain; be careful not to destroy the TOS if
- // there is one.
- if (i == kReturnShadowIndex) {
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(eax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that we
- // break from (eg, for...in) may have left stuff on the stack.
- __ mov(esp, Operand::StaticVariable(handler_address));
- frame_->Forget(frame_->height() - handler_height);
-
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(Operand::StaticVariable(handler_address));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- if (!function_return_is_shadowed_) frame_->PrepareForReturn();
- shadows[i]->other_target()->Jump(&return_value);
- } else {
- shadows[i]->other_target()->Jump();
- }
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryFinallyStatement");
- CodeForStatementPosition(node);
-
- // State: Used to keep track of reason for entering the finally
- // block. Should probably be extended to hold information for
- // break/continue from within the try block.
- enum { FALLING, THROWING, JUMPING };
-
- JumpTarget try_block;
- JumpTarget finally_block;
-
- try_block.Call();
-
- frame_->EmitPush(eax);
- // In case of thrown exceptions, this is where we continue.
- __ Set(ecx, Immediate(Smi::FromInt(THROWING)));
- finally_block.Jump();
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_FINALLY_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- int nof_unlinks = 0;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- if (shadows[i]->is_linked()) nof_unlinks++;
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
-
- // If we can fall off the end of the try block, unlink from the try
- // chain and set the state on the frame to FALLING.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(Operand::StaticVariable(handler_address));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- // Fake a top of stack value (unneeded when FALLING) and set the
- // state in ecx, then jump around the unlink blocks if any.
- frame_->EmitPush(Immediate(Factory::undefined_value()));
- __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
- if (nof_unlinks > 0) {
- finally_block.Jump();
- }
- }
-
- // Generate code to unlink and set the state for the (formerly)
- // shadowing targets that have been jumped to.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // If we have come from the shadowed return, the return value is
- // on the virtual frame. We must preserve it until it is
- // pushed.
- if (i == kReturnShadowIndex) {
- Result return_value;
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(eax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that
- // we break from (eg, for...in) may have left stuff on the
- // stack.
- __ mov(esp, Operand::StaticVariable(handler_address));
- frame_->Forget(frame_->height() - handler_height);
-
- // Unlink this handler and drop it from the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(Operand::StaticVariable(handler_address));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- // If this target shadowed the function return, materialize
- // the return value on the stack.
- frame_->EmitPush(eax);
- } else {
- // Fake TOS for targets that shadowed breaks and continues.
- frame_->EmitPush(Immediate(Factory::undefined_value()));
- }
- __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
- if (--nof_unlinks > 0) {
- // If this is not the last unlink block, jump around the next.
- finally_block.Jump();
- }
- }
- }
-
- // --- Finally block ---
- finally_block.Bind();
-
- // Push the state on the stack.
- frame_->EmitPush(ecx);
-
- // We keep two elements on the stack - the (possibly faked) result
- // and the state - while evaluating the finally block.
- //
- // Generate code for the statements in the finally block.
- VisitStatementsAndSpill(node->finally_block()->statements());
-
- if (has_valid_frame()) {
- // Restore state and return value or faked TOS.
- frame_->EmitPop(ecx);
- frame_->EmitPop(eax);
- }
-
- // Generate code to jump to the right destination for all used
- // formerly shadowing targets. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (has_valid_frame() && shadows[i]->is_bound()) {
- BreakTarget* original = shadows[i]->other_target();
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
- if (i == kReturnShadowIndex) {
- // The return value is (already) in eax.
- Result return_value = allocator_->Allocate(eax);
- ASSERT(return_value.is_valid());
- if (function_return_is_shadowed_) {
- original->Branch(equal, &return_value);
- } else {
- // Branch around the preparation for return which may emit
- // code.
- JumpTarget skip;
- skip.Branch(not_equal);
- frame_->PrepareForReturn();
- original->Jump(&return_value);
- skip.Bind();
- }
- } else {
- original->Branch(equal);
- }
- }
- }
-
- if (has_valid_frame()) {
- // Check if we need to rethrow the exception.
- JumpTarget exit;
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
- exit.Branch(not_equal);
-
- // Rethrow exception.
- frame_->EmitPush(eax); // undo pop from above
- frame_->CallRuntime(Runtime::kReThrow, 1);
-
- // Done.
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DebuggerStatement");
- CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Spill everything, even constants, to the frame.
- frame_->SpillAll();
-
- frame_->DebugBreak();
- // Ignore the return value.
-#endif
-}
-
-
-Result CodeGenerator::InstantiateFunction(
- Handle<SharedFunctionInfo> function_info,
- bool pretenure) {
- // The inevitable call will sync frame elements to memory anyway, so
- // we do it eagerly to allow us to push the arguments directly into
- // place.
- frame()->SyncRange(0, frame()->element_count() - 1);
-
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (scope()->is_function_scope() &&
- function_info->num_literals() == 0 &&
- !pretenure) {
- FastNewClosureStub stub;
- frame()->EmitPush(Immediate(function_info));
- return frame()->CallStub(&stub, 1);
- } else {
- // Call the runtime to instantiate the function based on the
- // shared function info.
- frame()->EmitPush(esi);
- frame()->EmitPush(Immediate(function_info));
- frame()->EmitPush(Immediate(pretenure
- ? Factory::true_value()
- : Factory::false_value()));
- return frame()->CallRuntime(Runtime::kNewClosure, 3);
- }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
- Comment cmnt(masm_, "[ FunctionLiteral");
- ASSERT(!in_safe_int32_mode());
- // Build the function info and instantiate it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(node, script());
- // Check for stack-overflow exception.
- if (function_info.is_null()) {
- SetStackOverflow();
- return;
- }
- Result result = InstantiateFunction(function_info, node->pretenure());
- frame()->Push(&result);
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- Result result = InstantiateFunction(node->shared_function_info(), false);
- frame()->Push(&result);
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
- Comment cmnt(masm_, "[ Conditional");
- ASSERT(!in_safe_int32_mode());
- JumpTarget then;
- JumpTarget else_;
- JumpTarget exit;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Load(node->else_expression());
-
- if (then.is_linked()) {
- exit.Jump();
- then.Bind();
- Load(node->then_expression());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Load(node->then_expression());
-
- if (else_.is_linked()) {
- exit.Jump();
- else_.Bind();
- Load(node->else_expression());
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
- JumpTarget slow;
- JumpTarget done;
- Result value;
-
- // Generate fast case for loading from slots that correspond to
- // local/global variables or arguments unless they are shadowed by
- // eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(slot,
- typeof_state,
- &value,
- &slow,
- &done);
-
- slow.Bind();
- // A runtime call is inevitable. We eagerly sync frame elements
- // to memory so that we can push the arguments directly into place
- // on top of the frame.
- frame()->SyncRange(0, frame()->element_count() - 1);
- frame()->EmitPush(esi);
- frame()->EmitPush(Immediate(slot->var()->name()));
- if (typeof_state == INSIDE_TYPEOF) {
- value =
- frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- } else {
- value = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
- }
-
- done.Bind(&value);
- frame_->Push(&value);
-
- } else if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- //
- // We currently spill the virtual frame because constants use the
- // potentially unsafe direct-frame access of SlotOperand.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Load const");
- Label exit;
- __ mov(ecx, SlotOperand(slot, ecx));
- __ cmp(ecx, Factory::the_hole_value());
- __ j(not_equal, &exit);
- __ mov(ecx, Factory::undefined_value());
- __ bind(&exit);
- frame()->EmitPush(ecx);
-
- } else if (slot->type() == Slot::PARAMETER) {
- frame()->PushParameterAt(slot->index());
-
- } else if (slot->type() == Slot::LOCAL) {
- frame()->PushLocalAt(slot->index());
-
- } else {
- // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
- // here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because it will always be a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
- frame()->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- LoadFromSlot(slot, state);
-
- // Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
- // ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
- // If the loaded value is a constant, we know if the arguments
- // object has been lazily loaded yet.
- Result result = frame()->Pop();
- if (result.is_constant()) {
- if (result.handle()->IsArgumentsMarker()) {
- result = StoreArgumentsObject(false);
- }
- frame()->Push(&result);
- return;
- }
- ASSERT(result.is_register());
- // The loaded value is in a register. If it is the sentinel that
- // indicates that we haven't loaded the arguments object yet, we
- // need to do it now.
- JumpTarget exit;
- __ cmp(Operand(result.reg()), Immediate(Factory::arguments_marker()));
- frame()->Push(&result);
- exit.Branch(not_equal);
-
- result = StoreArgumentsObject(false);
- frame()->SetElementAt(0, &result);
- result.Unuse();
- exit.Bind();
- return;
-}
-
-
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- ASSERT(!in_safe_int32_mode());
- // Check that no extension objects have been created by calls to
- // eval from the current scope to the global scope.
- Register context = esi;
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid()); // All non-reserved registers were available.
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- // Load next context in chain.
- __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s != NULL && s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(tmp.reg())) {
- __ mov(tmp.reg(), context);
- }
- __ bind(&next);
- // Terminate at global context.
- __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- Immediate(Factory::global_context_map()));
- __ j(equal, &fast);
- // Check that extension is NULL.
- __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal, not_taken);
- // Load next context in chain.
- __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
- __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- __ jmp(&next);
- __ bind(&fast);
- }
- tmp.Unuse();
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- // The register allocator prefers eax if it is free, so the code generator
- // will load the global object directly into eax, which is where the LoadIC
- // expects it.
- frame_->Spill(eax);
- LoadGlobal();
- frame_->Push(slot->var()->name());
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame_->CallLoadIC(mode);
- // A test eax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test eax
- // instruction here.
- __ nop();
- return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
- done->Jump(result);
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- *result = allocator()->Allocate();
- ASSERT(result->is_valid());
- __ mov(result->reg(),
- ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ cmp(result->reg(), Factory::the_hole_value());
- done->Branch(not_equal, result);
- __ mov(result->reg(), Factory::undefined_value());
- }
- done->Jump(result);
- } else if (rewrite != NULL) {
- // Generate fast case for calls of an argument function.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- Result arguments = allocator()->Allocate();
- ASSERT(arguments.is_valid());
- __ mov(arguments.reg(),
- ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
- arguments,
- slow));
- frame_->Push(&arguments);
- frame_->Push(key_literal->handle());
- *result = EmitKeyedLoad();
- done->Jump(result);
- }
- }
- }
- }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call. Since the call is inevitable,
- // we eagerly sync the virtual frame so we can directly push the
- // arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(slot->var()->name()));
-
- Result value;
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize const
- // properties (introduced via eval("const foo = (some expr);")). Also,
- // uses the current function context instead of the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the same
- // time, because the const declaration may be at the end of the eval
- // code (sigh...) and the const variable may have been used before
- // (where its value is 'undefined'). Thus, we can only do the
- // initialization when we actually encounter the expression and when
- // the expression operands are defined and valid, and thus we need the
- // split into 2 operations: declaration of the context slot followed
- // by initialization.
- value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- frame_->Push(Smi::FromInt(strict_mode_flag()));
- value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling chained assignment
- // expressions.
- frame_->Push(&value);
-
- } else {
- ASSERT(!slot->var()->is_dynamic());
-
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is executed,
- // the code is identical to a normal store (see below).
- //
- // We spill the frame in the code below because the direct-frame
- // access of SlotOperand is potentially unsafe with an unspilled
- // frame.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Init const");
- __ mov(ecx, SlotOperand(slot, ecx));
- __ cmp(ecx, Factory::the_hole_value());
- exit.Branch(not_equal);
- }
-
- // We must execute the store. Storing a variable must keep the (new)
- // value on the stack. This is necessary for compiling assignment
- // expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will initialize
- // consts to 'the hole' value and by doing so, end up calling this code.
- if (slot->type() == Slot::PARAMETER) {
- frame_->StoreToParameterAt(slot->index());
- } else if (slot->type() == Slot::LOCAL) {
- frame_->StoreToLocalAt(slot->index());
- } else {
- // The other slot types (LOOKUP and GLOBAL) cannot reach here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because the slot is a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- frame_->Dup();
- Result value = frame_->Pop();
- value.ToRegister();
- Result start = allocator_->Allocate();
- ASSERT(start.is_valid());
- __ mov(SlotOperand(slot, start.reg()), value.reg());
- // RecordWrite may destroy the value registers.
- //
- // TODO(204): Avoid actually spilling when the value is not
- // needed (probably the common case).
- frame_->Spill(value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
- // The results start, value, and temp are unused by going out of
- // scope.
- }
-
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* slot) {
- Comment cmnt(masm_, "[ Slot");
- if (in_safe_int32_mode()) {
- if ((slot->type() == Slot::LOCAL && !slot->is_arguments())) {
- frame()->UntaggedPushLocalAt(slot->index());
- } else if (slot->type() == Slot::PARAMETER) {
- frame()->UntaggedPushParameterAt(slot->index());
- } else {
- UNREACHABLE();
- }
- } else {
- LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- }
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
- Comment cmnt(masm_, "[ VariableProxy");
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- ASSERT(!in_safe_int32_mode());
- Reference ref(this, node);
- ref.GetValue();
- }
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
- Comment cmnt(masm_, "[ Literal");
- if (frame_->ConstantPoolOverflowed()) {
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- if (in_safe_int32_mode()) {
- temp.set_untagged_int32(true);
- }
- __ Set(temp.reg(), Immediate(node->handle()));
- frame_->Push(&temp);
- } else {
- if (in_safe_int32_mode()) {
- frame_->PushUntaggedElement(node->handle());
- } else {
- frame_->Push(node->handle());
- }
- }
-}
-
-
-void CodeGenerator::PushUnsafeSmi(Handle<Object> value) {
- ASSERT(value->IsSmi());
- int bits = reinterpret_cast<int>(*value);
- __ push(Immediate(bits ^ jit_cookie_));
- __ xor_(Operand(esp, 0), Immediate(jit_cookie_));
-}
-
-
-void CodeGenerator::StoreUnsafeSmiToLocal(int offset, Handle<Object> value) {
- ASSERT(value->IsSmi());
- int bits = reinterpret_cast<int>(*value);
- __ mov(Operand(ebp, offset), Immediate(bits ^ jit_cookie_));
- __ xor_(Operand(ebp, offset), Immediate(jit_cookie_));
-}
-
-
-void CodeGenerator::MoveUnsafeSmi(Register target, Handle<Object> value) {
- ASSERT(target.is_valid());
- ASSERT(value->IsSmi());
- int bits = reinterpret_cast<int>(*value);
- __ Set(target, Immediate(bits ^ jit_cookie_));
- __ xor_(target, jit_cookie_);
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
- if (!value->IsSmi()) return false;
- int int_value = Smi::cast(*value)->value();
- return !is_intn(int_value, kMaxSmiInlinedBits);
-}
-
-
-// Materialize the regexp literal 'node' in the literals array
-// 'literals' of the function. Leave the regexp boilerplate in
-// 'boilerplate'.
-class DeferredRegExpLiteral: public DeferredCode {
- public:
- DeferredRegExpLiteral(Register boilerplate,
- Register literals,
- RegExpLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredRegExpLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- RegExpLiteral* node_;
-};
-
-
-void DeferredRegExpLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ push(Immediate(Smi::FromInt(node_->literal_index())));
- // RegExp pattern (2).
- __ push(Immediate(node_->pattern()));
- // RegExp flags (3).
- __ push(Immediate(node_->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
-}
-
-
-class DeferredAllocateInNewSpace: public DeferredCode {
- public:
- DeferredAllocateInNewSpace(int size,
- Register target,
- int registers_to_save = 0)
- : size_(size), target_(target), registers_to_save_(registers_to_save) {
- ASSERT(size >= kPointerSize && size <= Heap::MaxObjectSizeInNewSpace());
- ASSERT_EQ(0, registers_to_save & target.bit());
- set_comment("[ DeferredAllocateInNewSpace");
- }
- void Generate();
-
- private:
- int size_;
- Register target_;
- int registers_to_save_;
-};
-
-
-void DeferredAllocateInNewSpace::Generate() {
- for (int i = 0; i < kNumRegs; i++) {
- if (registers_to_save_ & (1 << i)) {
- Register save_register = { i };
- __ push(save_register);
- }
- }
- __ push(Immediate(Smi::FromInt(size_)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- if (!target_.is(eax)) {
- __ mov(target_, eax);
- }
- for (int i = kNumRegs - 1; i >= 0; i--) {
- if (registers_to_save_ & (1 << i)) {
- Register save_register = { i };
- __ pop(save_register);
- }
- }
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ RegExp Literal");
-
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ mov(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the RegExp object. If so,
- // jump to the deferred code passing the literals array.
- DeferredRegExpLiteral* deferred =
- new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
- __ cmp(boilerplate.reg(), Factory::undefined_value());
- deferred->Branch(equal);
- deferred->BindExit();
-
- // Register of boilerplate contains RegExp object.
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-
- DeferredAllocateInNewSpace* allocate_fallback =
- new DeferredAllocateInNewSpace(size, literals.reg());
- frame_->Push(&boilerplate);
- frame_->SpillTop();
- __ AllocateInNewSpace(size,
- literals.reg(),
- tmp.reg(),
- no_reg,
- allocate_fallback->entry_label(),
- TAG_OBJECT);
- allocate_fallback->BindExit();
- boilerplate = frame_->Pop();
- // Copy from boilerplate to clone and return clone.
-
- for (int i = 0; i < size; i += kPointerSize) {
- __ mov(tmp.reg(), FieldOperand(boilerplate.reg(), i));
- __ mov(FieldOperand(literals.reg(), i), tmp.reg());
- }
- frame_->Push(&literals);
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ mov(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
- // Literal array.
- frame_->Push(&literals);
- // Literal index.
- frame_->Push(Smi::FromInt(node->literal_index()));
- // Constant properties.
- frame_->Push(node->constant_properties());
- // Should the object literal have fast elements?
- frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
- Result clone;
- if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
- frame_->Push(&clone);
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- node->CalculateEmitStore();
-
- for (int i = 0; i < node->properties()->length(); i++) {
- ObjectLiteral::Property* property = node->properties()->at(i);
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
- // else fall through.
- case ObjectLiteral::Property::COMPUTED: {
- Handle<Object> key(property->key()->handle());
- if (key->IsSymbol()) {
- // Duplicate the object as the IC receiver.
- frame_->Dup();
- Load(property->value());
- if (property->emit_store()) {
- Result ignored =
- frame_->CallStoreIC(Handle<String>::cast(key), false,
- strict_mode_flag());
- // A test eax instruction following the store IC call would
- // indicate the presence of an inlined version of the
- // store. Add a nop to indicate that there is no such
- // inlined version.
- __ nop();
- } else {
- frame_->Drop(2);
- }
- break;
- }
- // Fall through
- }
- case ObjectLiteral::Property::PROTOTYPE: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- Load(property->value());
- if (property->emit_store()) {
- frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes
- // Ignore the result.
- Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
- } else {
- frame_->Drop(3);
- }
- break;
- }
- case ObjectLiteral::Property::SETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(1));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- case ObjectLiteral::Property::GETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(0));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- default: UNREACHABLE();
- }
- }
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ mov(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- frame_->Push(&literals);
- frame_->Push(Smi::FromInt(node->literal_index()));
- frame_->Push(node->constant_elements());
- int length = node->values()->length();
- Result clone;
- if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- clone = frame_->CallStub(&stub, 3);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
- } else if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
- clone = frame_->CallStub(&stub, 3);
- }
- frame_->Push(&clone);
-
- // Generate code to set the elements in the array that are not
- // literals.
- for (int i = 0; i < length; i++) {
- Expression* value = node->values()->at(i);
-
- if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
- continue;
- }
-
- // The property must be set by generated code.
- Load(value);
-
- // Get the property value off the stack.
- Result prop_value = frame_->Pop();
- prop_value.ToRegister();
-
- // Fetch the array literal while leaving a copy on the stack and
- // use it to get the elements array.
- frame_->Dup();
- Result elements = frame_->Pop();
- elements.ToRegister();
- frame_->Spill(elements.reg());
- // Get the elements array.
- __ mov(elements.reg(),
- FieldOperand(elements.reg(), JSObject::kElementsOffset));
-
- // Write to the indexed properties array.
- int offset = i * kPointerSize + FixedArray::kHeaderSize;
- __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
-
- // Update the write barrier for the array address.
- frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
- }
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
- ASSERT(!in_safe_int32_mode());
- ASSERT(!in_spilled_code());
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- Load(node->key());
- Load(node->value());
- Result result =
- frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm(), "[ Variable Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL);
- Slot* slot = var->AsSlot();
- ASSERT(slot != NULL);
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- Load(node->value());
-
- // Perform the binary operation.
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- // Construct the implicit binary operation.
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Perform the assignment.
- if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
- CodeForSourcePosition(node->position());
- StoreToSlot(slot,
- node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
- }
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm(), "[ Named Property Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
- ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
- // Initialize name and evaluate the receiver sub-expression if necessary. If
- // the receiver is trivial it is not placed on the stack at this point, but
- // loaded whenever actually needed.
- Handle<String> name;
- bool is_trivial_receiver = false;
- if (var != NULL) {
- name = var->name();
- } else {
- Literal* lit = prop->key()->AsLiteral();
- ASSERT_NOT_NULL(lit);
- name = Handle<String>::cast(lit->handle());
- // Do not materialize the receiver on the frame if it is trivial.
- is_trivial_receiver = prop->obj()->IsTrivial();
- if (!is_trivial_receiver) Load(prop->obj());
- }
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- // Initialization block consists of assignments of the form expr.x = ..., so
- // this will never be an assignment to a variable, so there must be a
- // receiver object.
- ASSERT_EQ(NULL, var);
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else {
- frame()->Dup();
- }
- Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block() && !is_trivial_receiver) {
- frame()->Dup();
- }
-
- // Stack layout:
- // [tos] : receiver (only materialized if non-trivial)
- // [tos+1] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else if (var != NULL) {
- // The LoadIC stub expects the object in eax.
- // Freeing eax causes the code generator to load the global into it.
- frame_->Spill(eax);
- LoadGlobal();
- } else {
- frame()->Dup();
- }
- Result value = EmitNamedLoad(name, var != NULL);
- frame()->Push(&value);
- Load(node->value());
-
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- // Construct the implicit binary operation.
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : receiver (only materialized if non-trivial)
- // [tos+2] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(var == NULL || var->mode() != Variable::CONST);
- ASSERT_NE(Token::INIT_CONST, node->op());
- if (is_trivial_receiver) {
- Result value = frame()->Pop();
- frame()->Push(prop->obj());
- frame()->Push(&value);
- }
- CodeForSourcePosition(node->position());
- bool is_contextual = (var != NULL);
- Result answer = EmitNamedStore(name, is_contextual);
- frame()->Push(&answer);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- if (node->ends_initialization_block()) {
- ASSERT_EQ(NULL, var);
- // The argument to the runtime call is the receiver.
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else {
- // A copy of the receiver is below the value of the assignment. Swap
- // the receiver and the value of the assignment expression.
- Result result = frame()->Pop();
- Result receiver = frame()->Pop();
- frame()->Push(&result);
- frame()->Push(&receiver);
- }
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT_EQ(frame()->height(), original_height + 1);
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm_, "[ Keyed Property Assignment");
- Property* prop = node->target()->AsProperty();
- ASSERT_NOT_NULL(prop);
-
- // Evaluate the receiver subexpression.
- Load(prop->obj());
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- frame_->Dup();
- Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block()) {
- frame_->Dup();
- }
-
- // Evaluate the key subexpression.
- Load(prop->key());
-
- // Stack layout:
- // [tos] : key
- // [tos+1] : receiver
- // [tos+2] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- // Duplicate receiver and key for loading the current property value.
- frame()->PushElementAt(1);
- frame()->PushElementAt(1);
- Result value = EmitKeyedLoad();
- frame()->Push(&value);
- Load(node->value());
-
- // Perform the binary operation.
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : key
- // [tos+2] : receiver
- // [tos+3] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(node->op() != Token::INIT_CONST);
- CodeForSourcePosition(node->position());
- Result answer = EmitKeyedStore(prop->key()->type());
- frame()->Push(&answer);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- // Change to fast case at the end of an initialization block.
- if (node->ends_initialization_block()) {
- // The argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment. Swap the receiver and
- // the value of the assignment expression.
- Result result = frame()->Pop();
- Result receiver = frame()->Pop();
- frame()->Push(&result);
- frame()->Push(&receiver);
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
- ASSERT(!in_safe_int32_mode());
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
-
- if (var != NULL && !var->is_global()) {
- EmitSlotAssignment(node);
-
- } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
- (var != NULL && var->is_global())) {
- // Properties whose keys are property names and global variables are
- // treated as named property references. We do not need to consider
- // global 'this' because it is not a valid left-hand side.
- EmitNamedPropertyAssignment(node);
-
- } else if (prop != NULL) {
- // Other properties (including rewritten parameters for a function that
- // uses arguments) are keyed property assignments.
- EmitKeyedPropertyAssignment(node);
-
- } else {
- // Invalid left-hand side.
- Load(node->target());
- Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
- // The runtime call doesn't actually return but the code generator will
- // still generate code and expects a certain frame height.
- frame()->Push(&result);
- }
-
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ Throw");
- Load(node->exception());
- Result result = frame_->CallRuntime(Runtime::kThrow, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ Property");
- Reference property(this, node);
- property.GetValue();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ Call");
-
- Expression* function = node->expression();
- ZoneList<Expression*>* args = node->arguments();
-
- // Check if the function is a variable or a property.
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
-
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
-
- if (var != NULL && var->is_possibly_eval()) {
- // ----------------------------------
- // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
- // ----------------------------------
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
-
- // Prepare the stack for the call to the resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Result to hold the result of the function resolution and the
- // final result of the eval call.
- Result result;
-
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- JumpTarget done;
- if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
- JumpTarget slow;
- // Prepare the stack for the call to
- // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
- // function, the first argument to the eval call and the
- // receiver.
- Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- frame_->Push(&fun);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
- frame_->PushParameterAt(-1);
-
- // Push the strict mode flag.
- frame_->Push(Smi::FromInt(strict_mode_flag()));
-
- // Resolve the call.
- result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
- done.Jump(&result);
- slow.Bind();
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval by
- // pushing the loaded function, the first argument to the eval
- // call and the receiver.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
- frame_->PushParameterAt(-1);
-
- // Push the strict mode flag.
- frame_->Push(Smi::FromInt(strict_mode_flag()));
-
- // Resolve the call.
- result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
- // If we generated fast-case code bind the jump-target where fast
- // and slow case merge.
- if (done.is_linked()) done.Bind(&result);
-
- // The runtime call returns a pair of values in eax (function) and
- // edx (receiver). Touch up the stack with the right values.
- Result receiver = allocator_->Allocate(edx);
- frame_->SetElementAt(arg_count + 1, &result);
- frame_->SetElementAt(arg_count, &receiver);
- receiver.Unuse();
-
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- result = frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
-
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
-
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Push the name of the function onto the frame.
- frame_->Push(var->name());
-
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
- arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- frame_->Push(&result);
-
- } else if (var != NULL && var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::LOOKUP) {
- // ----------------------------------
- // JavaScript examples:
- //
- // with (obj) foo(1, 2, 3) // foo may be in obj.
- //
- // function f() {};
- // function g() {
- // eval(...);
- // f(); // f could be in extension object.
- // }
- // ----------------------------------
-
- JumpTarget slow, done;
- Result function;
-
- // Generate fast case for loading functions from slots that
- // correspond to local/global variables or arguments unless they
- // are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &function,
- &slow,
- &done);
-
- slow.Bind();
- // Enter the runtime system to load the function from the context.
- // Sync the frame so we can push the arguments directly into
- // place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(var->name()));
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- // The runtime call returns a pair of values in eax and edx. The
- // looked-up function is in eax and the receiver is in edx. These
- // register references are not ref counted here. We spill them
- // eagerly since they are arguments to an inevitable call (and are
- // not sharable by the arguments).
- ASSERT(!allocator()->is_used(eax));
- frame_->EmitPush(eax);
-
- // Load the receiver.
- ASSERT(!allocator()->is_used(edx));
- frame_->EmitPush(edx);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- JumpTarget call;
- call.Jump();
- done.Bind(&function);
- frame_->Push(&function);
- LoadGlobalReceiver();
- call.Bind();
- }
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-
- } else if (property != NULL) {
- // Check if the key is a literal string.
- Literal* literal = property->key()->AsLiteral();
-
- if (literal != NULL && literal->handle()->IsSymbol()) {
- // ------------------------------------------------------------------
- // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
- // ------------------------------------------------------------------
-
- Handle<String> name = Handle<String>::cast(literal->handle());
-
- if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
- name->IsEqualTo(CStrVector("apply")) &&
- args->length() == 2 &&
- args->at(1)->AsVariableProxy() != NULL &&
- args->at(1)->AsVariableProxy()->IsArguments()) {
- // Use the optimized Function.prototype.apply that avoids
- // allocating lazily allocated arguments objects.
- CallApplyLazy(property->obj(),
- args->at(0),
- args->at(1)->AsVariableProxy(),
- node->position());
-
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Push the name of the function onto the frame.
- frame_->Push(name);
-
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result =
- frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
-
- } else {
- // -------------------------------------------
- // JavaScript example: 'array[index](1, 2, 3)'
- // -------------------------------------------
-
- // Load the function to call from the property through a reference.
-
- // Pass receiver to called function.
- if (property->is_synthetic()) {
- Reference ref(this, property);
- ref.GetValue();
- // Use global object as receiver.
- LoadGlobalReceiver();
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
-
- // Load the name of the function.
- Load(property->key());
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&key);
- frame_->Push(&receiver);
- key.Unuse();
- receiver.Unuse();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Place the key on top of stack and call the IC initialization code.
- frame_->PushElementAt(arg_count + 1);
- CodeForSourcePosition(node->position());
- Result result =
- frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting());
- frame_->Drop(); // Drop the key still on the stack.
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
- }
-
- } else {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is not global
- // ----------------------------------
-
- // Load the function.
- Load(function);
-
- // Pass the global proxy as the receiver.
- LoadGlobalReceiver();
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
- }
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ CallNew");
-
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments. This is different from ordinary calls, where the
- // actual function to call is resolved after the arguments have been
- // evaluated.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- Load(node->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = node->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallConstructor(arg_count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask));
- value.Unuse();
- destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
- frame_->CallRuntime(Runtime::kLog, 2);
- }
-#endif
- // Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(Factory::undefined_value());
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask | kSmiSignMask));
- value.Unuse();
- destination()->Split(zero);
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ Set(result_, Immediate(Factory::undefined_value()));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ Set(result_, Immediate(Factory::nan_value()));
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharCodeAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need two extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(object.reg(),
- index.reg(),
- scratch.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
-
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
- }
-
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharFromCode");
- ASSERT(args->length() == 1);
-
- Load(args->at(0));
-
- Result code = frame_->Pop();
- code.ToRegister();
- ASSERT(code.is_valid());
-
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
-
- DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
- code.reg(), result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Set(result_, Immediate(Smi::FromInt(0)));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ Set(result_, Immediate(Factory::empty_string()));
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need three extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch1 = allocator()->Allocate();
- ASSERT(scratch1.is_valid());
- Result scratch2 = allocator()->Allocate();
- ASSERT(scratch2.is_valid());
-
- DeferredStringCharAt* deferred =
- new DeferredStringCharAt(object.reg(),
- index.reg(),
- scratch1.reg(),
- scratch2.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // Check if the object is a JS array or not.
- __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, temp.reg());
- value.Unuse();
- temp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop, loop_condition,
- loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
- ASSERT(args->length() == 2);
- // We will leave the separator on the stack until the end of the function.
- Load(args->at(1));
- // Load this to eax (= array)
- Load(args->at(0));
- Result array_result = frame_->Pop();
- array_result.ToRegister(eax);
- frame_->SpillAll();
-
- // All aliases of the same register have disjoint lifetimes.
- Register array = eax;
- Register elements = no_reg; // Will be eax.
-
- Register index = edx;
-
- Register string_length = ecx;
-
- Register string = esi;
-
- Register scratch = ebx;
-
- Register array_length = edi;
- Register result_pos = no_reg; // Will be edi.
-
- // Separator operand is already pushed.
- Operand separator_operand = Operand(esp, 2 * kPointerSize);
- Operand result_operand = Operand(esp, 1 * kPointerSize);
- Operand array_length_operand = Operand(esp, 0);
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
- __ cld();
- // Check that the array is a JSArray
- __ test(array, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &bailout);
-
- // Check that the array has fast elements.
- __ test_b(FieldOperand(scratch, Map::kBitField2Offset),
- 1 << Map::kHasFastElements);
- __ j(zero, &bailout);
-
- // If the array has length zero, return the empty string.
- __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
- __ sar(array_length, 1);
- __ j(not_zero, &non_trivial_array);
- __ mov(result_operand, Factory::empty_string());
- __ jmp(&done);
-
- // Save the array length.
- __ bind(&non_trivial_array);
- __ mov(array_length_operand, array_length);
-
- // Save the FixedArray containing array's elements.
- // End of array's live range.
- elements = array;
- __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
- array = no_reg;
-
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, Immediate(0));
- __ Set(string_length, Immediate(0));
- // Loop condition: while (index < length).
- // Live loop registers: index, array_length, string,
- // scratch, string_length, elements.
- __ jmp(&loop_condition);
- __ bind(&loop);
- __ cmp(index, Operand(array_length));
- __ j(greater_equal, &done);
-
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
- __ add(string_length,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
- __ j(overflow, &bailout);
- __ add(Operand(index), Immediate(1));
- __ bind(&loop_condition);
- __ cmp(index, Operand(array_length));
- __ j(less, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, 1);
- __ j(not_equal, &not_size_one_array);
- __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
- __ mov(result_operand, scratch);
- __ jmp(&done);
-
- __ bind(&not_size_one_array);
-
- // End of array_length live range.
- result_pos = array_length;
- array_length = no_reg;
-
- // Live registers:
- // string_length: Sum of string lengths, as a smi.
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat ASCII string.
- __ mov(string, separator_operand);
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
-
- // Add (separator length times array_length) - separator length
- // to string_length.
- __ mov(scratch, separator_operand);
- __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
- __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
- __ imul(scratch, array_length_operand);
- __ j(overflow, &bailout);
- __ add(string_length, Operand(scratch));
- __ j(overflow, &bailout);
-
- __ shr(string_length, 1);
- // Live registers and stack values:
- // string_length
- // elements
- __ AllocateAsciiString(result_pos, string_length, scratch,
- index, string, &bailout);
- __ mov(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
-
-
- __ mov(string, separator_operand);
- __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ j(equal, &one_char_separator);
- __ j(greater, &long_separator);
-
-
- // Empty separator case
- __ mov(index, Immediate(0));
- __ jmp(&loop_1_condition);
- // Loop condition: while (index < length).
- __ bind(&loop_1);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // elements: the FixedArray of strings we are joining.
-
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
- __ bind(&loop_1_condition);
- __ cmp(index, array_length_operand);
- __ j(less, &loop_1); // End while (index < length).
- __ jmp(&done);
-
-
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its ascii character value.
- __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ mov_b(separator_operand, scratch);
-
- __ Set(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_2_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_2);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator character to the result.
- __ mov_b(scratch, separator_operand);
- __ mov_b(Operand(result_pos, 0), scratch);
- __ inc(result_pos);
-
- __ bind(&loop_2_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_2); // End while (index < length).
- __ jmp(&done);
-
-
- // Long separator case (separator is more than one character).
- __ bind(&long_separator);
-
- __ Set(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_3_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_3);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator to the result.
- __ mov(string, separator_operand);
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
-
- __ bind(&loop_3_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_3); // End while (index < length).
- __ jmp(&done);
-
-
- __ bind(&bailout);
- __ mov(result_operand, Factory::undefined_value());
- __ bind(&done);
- __ mov(eax, result_operand);
- // Drop temp values from the stack, and restore context register.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- frame_->Drop(1);
- frame_->Push(&array_result);
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // Check if the object is a regexp.
- __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, temp.reg());
- value.Unuse();
- temp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
-
- __ test(obj.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- __ cmp(obj.reg(), Factory::null_value());
- destination()->true_target()->Branch(equal);
-
- Result map = allocator()->Allocate();
- ASSERT(map.is_valid());
- __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- destination()->false_target()->Branch(not_zero);
- // Do a range test for JSObject type. We can't use
- // MacroAssembler::IsInstanceJSObjectType, because we are using a
- // ControlDestination, so we copy its implementation here.
- __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
- __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
- __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
- obj.Unuse();
- map.Unuse();
- destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
- // typeof(arg) == function).
- // It includes undetectable objects (as opposed to IsObject).
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ test(value.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
-
- // Check that this is an object.
- frame_->Spill(value.reg());
- __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, value.reg());
- value.Unuse();
- destination()->Split(above_equal);
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
- DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
- Register map_result,
- Register scratch1,
- Register scratch2)
- : object_(object),
- map_result_(map_result),
- scratch1_(scratch1),
- scratch2_(scratch2) { }
-
- virtual void Generate() {
- Label false_result;
-
- // Check that map is loaded as expected.
- if (FLAG_debug_code) {
- __ cmp(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ Assert(equal, "Map not in expected register");
- }
-
- // Check for fast case object. Generate false result for slow case object.
- __ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
- __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ cmp(scratch1_, Factory::hash_table_map());
- __ j(equal, &false_result);
-
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ mov(map_result_,
- FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
- __ mov(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
- // map_result_: descriptor array
- // scratch1_: length of descriptor array
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ lea(scratch1_,
- Operand(map_result_, scratch1_, times_2, FixedArray::kHeaderSize));
- // Calculate location of the first key name.
- __ add(Operand(map_result_),
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
- // Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(scratch2_, FieldOperand(map_result_, 0));
- __ cmp(scratch2_, Factory::value_of_symbol());
- __ j(equal, &false_result);
- __ add(Operand(map_result_), Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(map_result_, Operand(scratch1_));
- __ j(not_equal, &loop);
-
- // Reload map as register map_result_ was used as temporary above.
- __ mov(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that it's
- // prototype is the un-modified String prototype. If not result is false.
- __ mov(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
- __ test(scratch1_, Immediate(kSmiTagMask));
- __ j(zero, &false_result);
- __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ mov(scratch2_, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(scratch2_,
- FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
- __ cmp(scratch1_,
- ContextOperand(scratch2_,
- Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, &false_result);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ Set(map_result_, Immediate(1));
- __ jmp(exit_label());
- __ bind(&false_result);
- // Set false result.
- __ Set(map_result_, Immediate(0));
- }
-
- private:
- Register object_;
- Register map_result_;
- Register scratch1_;
- Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop(); // Pop the string wrapper.
- obj.ToRegister();
- ASSERT(obj.is_valid());
- if (FLAG_debug_code) {
- __ AbortIfSmi(obj.reg());
- }
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- Result map_result = allocator()->Allocate();
- ASSERT(map_result.is_valid());
- __ mov(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(map_result.reg(), Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- destination()->true_target()->Branch(not_zero);
-
- // We need an additional two scratch registers for the deferred code.
- Result temp1 = allocator()->Allocate();
- ASSERT(temp1.is_valid());
- Result temp2 = allocator()->Allocate();
- ASSERT(temp2.is_valid());
-
- DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
- new DeferredIsStringWrapperSafeForDefaultValueOf(
- obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
- deferred->Branch(zero);
- deferred->BindExit();
- __ test(map_result.reg(), Operand(map_result.reg()));
- obj.Unuse();
- map_result.Unuse();
- temp1.Unuse();
- temp2.Unuse();
- destination()->Split(not_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (%_ClassOf(arg) === 'Function')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- __ test(obj.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, temp.reg());
- obj.Unuse();
- temp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- __ test(obj.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- obj.Unuse();
- temp.Unuse();
- destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- // Get the frame pointer for the calling frame.
- Result fp = allocator()->Allocate();
- __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker);
- __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
- fp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Result fp = allocator_->Allocate();
- Result result = allocator_->Allocate();
- ASSERT(fp.is_valid() && result.is_valid());
-
- Label exit;
-
- // Get the number of formal parameters.
- __ Set(result.reg(), Immediate(Smi::FromInt(scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ mov(result.reg(),
- Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- result.set_type_info(TypeInfo::Smi());
- if (FLAG_debug_code) __ AbortIfNotSmi(result.reg());
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave, null, function, non_function_constructor;
- Load(args->at(0)); // Load the object.
- Result obj = frame_->Pop();
- obj.ToRegister();
- frame_->Spill(obj.reg());
-
- // If the object is a smi, we return null.
- __ test(obj.reg(), Immediate(kSmiTagMask));
- null.Branch(zero);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
- null.Branch(below);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
- function.Branch(equal);
-
- // Check if the constructor in the map is a function.
- { Result tmp = allocator()->Allocate();
- __ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
- non_function_constructor.Branch(not_equal);
- }
-
- // The map register now contains the constructor function. Grab the
- // instance class name from there.
- __ mov(obj.reg(),
- FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
- __ mov(obj.reg(),
- FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->Push(&obj);
- leave.Jump();
-
- // Functions have class 'Function'.
- function.Bind();
- frame_->Push(Factory::function_class_symbol());
- leave.Jump();
-
- // Objects with a non-function constructor have class 'Object'.
- non_function_constructor.Bind();
- frame_->Push(Factory::Object_symbol());
- leave.Jump();
-
- // Non-JS objects have class null.
- null.Bind();
- frame_->Push(Factory::null_value());
-
- // All done.
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- frame_->Dup();
- Result object = frame_->Pop();
- object.ToRegister();
- ASSERT(object.is_valid());
- // if (object->IsSmi()) return object.
- __ test(object.reg(), Immediate(kSmiTagMask));
- leave.Branch(zero, taken);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // if (!object->IsJSValue()) return object.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
- leave.Branch(not_equal, not_taken);
- __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
- object.Unuse();
- frame_->SetElementAt(0, &temp);
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- Result value = frame_->Pop();
- Result object = frame_->Pop();
- value.ToRegister();
- object.ToRegister();
-
- // if (object->IsSmi()) return value.
- __ test(object.reg(), Immediate(kSmiTagMask));
- leave.Branch(zero, &value, taken);
-
- // It is a heap object - get its map.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- // if (!object->IsJSValue()) return value.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
- leave.Branch(not_equal, &value, not_taken);
-
- // Store the value.
- __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- Result duplicate_value = allocator_->Allocate();
- ASSERT(duplicate_value.is_valid());
- __ mov(duplicate_value.reg(), value.reg());
- // The object register is also overwritten by the write barrier and
- // possibly aliased in the frame.
- frame_->Spill(object.reg());
- __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
- scratch.reg());
- object.Unuse();
- scratch.Unuse();
- duplicate_value.Unuse();
-
- // Leave.
- leave.Bind(&value);
- frame_->Push(&value);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in eax.
- Load(args->at(0));
- Result key = frame_->Pop();
- // Explicitly create a constant result.
- Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
- // Call the shared stub to get to arguments[key].
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- Result result = frame_->CallStub(&stub, &key, &count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Result right = frame_->Pop();
- Result left = frame_->Pop();
- right.ToRegister();
- left.ToRegister();
- __ cmp(right.reg(), Operand(left.reg()));
- right.Unuse();
- left.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- STATIC_ASSERT(kSmiTag == 0); // EBP value is aligned, so it looks like a Smi.
- Result ebp_as_smi = allocator_->Allocate();
- ASSERT(ebp_as_smi.is_valid());
- __ mov(ebp_as_smi.reg(), Operand(ebp));
- frame_->Push(&ebp_as_smi);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- frame_->SpillAll();
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(edi, eax);
-
- __ bind(&heapnumber_allocated);
-
- __ PrepareCallCFunction(0, ebx);
- __ CallCFunction(ExternalReference::random_uint32_function(), 0);
-
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- // This is implemented on both SSE2 and FPU.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, Operand(ebx));
- __ movd(xmm0, Operand(eax));
- __ cvtss2sd(xmm1, xmm1);
- __ pxor(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
- } else {
- // 0x4130000000000000 is 1.0 x 2^20 as a double.
- __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
- Immediate(0x41300000));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ fsubp(1);
- __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
- }
- __ mov(eax, edi);
-
- Result result = allocator_->Allocate(eax);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- SubStringStub stub;
- Result answer = frame_->CallStub(&stub, 3);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringCompareStub stub;
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- ASSERT_EQ(4, args->length());
-
- // Load the arguments on the stack and call the stub.
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
- Load(args->at(3));
-
- RegExpExecStub stub;
- Result result = frame_->CallStub(&stub, 4);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0)); // Size of array, smi.
- Load(args->at(1)); // "index" property value.
- Load(args->at(2)); // "input" property value.
-
- RegExpConstructResultStub stub;
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
- DeferredSearchCache(Register dst, Register cache, Register key)
- : dst_(dst), cache_(cache), key_(key) {
- set_comment("[ DeferredSearchCache");
- }
-
- virtual void Generate();
-
- private:
- Register dst_; // on invocation Smi index of finger, on exit
- // holds value being looked up.
- Register cache_; // instance of JSFunctionResultCache.
- Register key_; // key being looked up.
-};
-
-
-void DeferredSearchCache::Generate() {
- Label first_loop, search_further, second_loop, cache_miss;
-
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Smi* kEntrySizeSmi = Smi::FromInt(JSFunctionResultCache::kEntrySize);
- Smi* kEntriesIndexSmi = Smi::FromInt(JSFunctionResultCache::kEntriesIndex);
-
- // Check the cache from finger to start of the cache.
- __ bind(&first_loop);
- __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
- __ cmp(Operand(dst_), Immediate(kEntriesIndexSmi));
- __ j(less, &search_further);
-
- __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
- __ j(not_equal, &first_loop);
-
- __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&search_further);
-
- // Check the cache from end of cache up to finger.
- __ mov(dst_, FieldOperand(cache_, JSFunctionResultCache::kCacheSizeOffset));
-
- __ bind(&second_loop);
- __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
- // Consider prefetching into some reg.
- __ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
- __ j(less_equal, &cache_miss);
-
- __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
- __ j(not_equal, &second_loop);
-
- __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&cache_miss);
- __ push(cache_); // store a reference to cache
- __ push(key_); // store a key
- __ push(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ push(key_);
- // On ia32 function must be in edi.
- __ mov(edi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
- ParameterCount expected(1);
- __ InvokeFunction(edi, expected, CALL_FUNCTION);
-
- // Find a place to put new cached value into.
- Label add_new_entry, update_cache;
- __ mov(ecx, Operand(esp, kPointerSize)); // restore the cache
- // Possible optimization: cache size is constant for the given cache
- // so technically we could use a constant here. However, if we have
- // cache miss this optimization would hardly matter much.
-
- // Check if we could add new entry to cache.
- __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
- __ j(greater, &add_new_entry);
-
- // Check if we could evict entry after finger.
- __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
- __ add(Operand(edx), Immediate(kEntrySizeSmi));
- __ cmp(ebx, Operand(edx));
- __ j(greater, &update_cache);
-
- // Need to wrap over the cache.
- __ mov(edx, Immediate(kEntriesIndexSmi));
- __ jmp(&update_cache);
-
- __ bind(&add_new_entry);
- __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
- __ lea(ebx, Operand(edx, JSFunctionResultCache::kEntrySize << 1));
- __ mov(FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset), ebx);
-
- // Update the cache itself.
- // edx holds the index.
- __ bind(&update_cache);
- __ pop(ebx); // restore the key
- __ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx);
- // Store key.
- __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
- __ RecordWrite(ecx, 0, ebx, edx);
-
- // Store value.
- __ pop(ecx); // restore the cache.
- __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
- __ add(Operand(edx), Immediate(Smi::FromInt(1)));
- __ mov(ebx, eax);
- __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
- __ RecordWrite(ecx, 0, ebx, edx);
-
- if (!dst_.is(eax)) {
- __ mov(dst_, eax);
- }
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- frame_->Push(Factory::undefined_value());
- return;
- }
-
- Load(args->at(1));
- Result key = frame_->Pop();
- key.ToRegister();
-
- Result cache = allocator()->Allocate();
- ASSERT(cache.is_valid());
- __ mov(cache.reg(), ContextOperand(esi, Context::GLOBAL_INDEX));
- __ mov(cache.reg(),
- FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
- __ mov(cache.reg(),
- ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ mov(cache.reg(),
- FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
- cache.reg(),
- key.reg());
-
- // tmp.reg() now holds finger offset as a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ mov(tmp.reg(), FieldOperand(cache.reg(),
- JSFunctionResultCache::kFingerOffset));
- __ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
- deferred->Branch(not_equal);
-
- __ mov(tmp.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg(), 1));
-
- deferred->BindExit();
- frame_->Push(&tmp);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and call the stub.
- Load(args->at(0));
- NumberToStringStub stub;
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
-
- virtual void Generate();
-
- private:
- Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
- __ push(object_);
- __ push(index1_);
- __ push(index2_);
- __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- // Note: this code assumes that indices are passed are within
- // elements' bounds and refer to valid (not holes) values.
- Comment cmnt(masm_, "[ GenerateSwapElements");
-
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- Result index2 = frame_->Pop();
- index2.ToRegister();
-
- Result index1 = frame_->Pop();
- index1.ToRegister();
-
- Result object = frame_->Pop();
- object.ToRegister();
-
- Result tmp1 = allocator()->Allocate();
- tmp1.ToRegister();
- Result tmp2 = allocator()->Allocate();
- tmp2.ToRegister();
-
- frame_->Spill(object.reg());
- frame_->Spill(index1.reg());
- frame_->Spill(index2.reg());
-
- DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
- index1.reg(),
- index2.reg());
-
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
- deferred->Branch(below);
- __ test_b(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
- KeyedLoadIC::kSlowCaseBitFieldMask);
- deferred->Branch(not_zero);
-
- // Check the object's elements are in fast case and writable.
- __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
- __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- deferred->Branch(not_equal);
-
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- // Check that both indices are smis.
- __ mov(tmp2.reg(), index1.reg());
- __ or_(tmp2.reg(), Operand(index2.reg()));
- __ test(tmp2.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
-
- // Check that both indices are valid.
- __ mov(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
- __ cmp(tmp2.reg(), Operand(index1.reg()));
- deferred->Branch(below_equal);
- __ cmp(tmp2.reg(), Operand(index2.reg()));
- deferred->Branch(below_equal);
-
- // Bring addresses into index1 and index2.
- __ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg()));
- __ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg()));
-
- // Swap elements.
- __ mov(object.reg(), Operand(index1.reg(), 0));
- __ mov(tmp2.reg(), Operand(index2.reg(), 0));
- __ mov(Operand(index2.reg(), 0), object.reg());
- __ mov(Operand(index1.reg(), 0), tmp2.reg());
-
- Label done;
- __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
-
- __ mov(tmp2.reg(), tmp1.reg());
- __ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
- __ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
- __ bind(&done);
-
- deferred->BindExit();
- frame_->Push(Factory::undefined_value());
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateCallFunction");
-
- ASSERT(args->length() >= 2);
-
- int n_args = args->length() - 2; // for receiver and function.
- Load(args->at(0)); // receiver
- for (int i = 0; i < n_args; i++) {
- Load(args->at(i + 1));
- }
- Load(args->at(n_args + 1)); // function
- Result result = frame_->CallJSFunction(n_args);
- frame_->Push(&result);
-}
-
-
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Load(args->at(1));
- if (!CpuFeatures::IsSupported(SSE2)) {
- Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
- frame_->Push(&res);
- } else {
- CpuFeatures::Scope use_sse2(SSE2);
- Label allocate_return;
- // Load the two operands while leaving the values on the frame.
- frame()->Dup();
- Result exponent = frame()->Pop();
- exponent.ToRegister();
- frame()->Spill(exponent.reg());
- frame()->PushElementAt(1);
- Result base = frame()->Pop();
- base.ToRegister();
- frame()->Spill(base.reg());
-
- Result answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- ASSERT(!exponent.reg().is(base.reg()));
- JumpTarget call_runtime;
-
- // Save 1 in xmm3 - we need this several times later on.
- __ mov(answer.reg(), Immediate(1));
- __ cvtsi2sd(xmm3, Operand(answer.reg()));
-
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ test(exponent.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &exponent_nonsmi);
- __ test(base.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &base_nonsmi);
-
- // Optimized version when y is an integer.
- Label powi;
- __ SmiUntag(base.reg());
- __ cvtsi2sd(xmm0, Operand(base.reg()));
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
- call_runtime.Branch(not_equal);
-
- __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // Optimized version of pow if y is an integer.
- __ bind(&powi);
- __ SmiUntag(exponent.reg());
-
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ mov(base.reg(), exponent.reg());
-
- // Get absolute value of exponent.
- Label no_neg;
- __ cmp(exponent.reg(), 0);
- __ j(greater_equal, &no_neg);
- __ neg(exponent.reg());
- __ bind(&no_neg);
-
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
- __ bind(&while_true);
- __ shr(exponent.reg(), 1);
- __ j(not_carry, &no_multiply);
- __ mulsd(xmm1, xmm0);
- __ bind(&no_multiply);
- __ test(exponent.reg(), Operand(exponent.reg()));
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
-
- // x has the original value of y - if y is negative return 1/result.
- __ test(base.reg(), Operand(base.reg()));
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ mov(answer.reg(), Immediate(0x7FB00000));
- __ movd(xmm0, Operand(answer.reg()));
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- call_runtime.Branch(equal);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
- call_runtime.Branch(not_equal);
- __ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- call_runtime.Branch(parity_even);
-
- Label base_not_smi;
- Label handle_special_cases;
- __ test(base.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &base_not_smi);
- __ SmiUntag(base.reg());
- __ cvtsi2sd(xmm0, Operand(base.reg()));
- __ jmp(&handle_special_cases);
- __ bind(&base_not_smi);
- __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
- call_runtime.Branch(not_equal);
- __ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
- __ and_(answer.reg(), HeapNumber::kExponentMask);
- __ cmp(Operand(answer.reg()), Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- call_runtime.Branch(greater_equal);
- __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ mov(answer.reg(), Immediate(0xBF000000));
- __ movd(xmm2, Operand(answer.reg()));
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(&not_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- call_runtime.Branch(not_equal);
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
-
- JumpTarget done;
- Label failure, success;
- __ bind(&allocate_return);
- // Make a copy of the frame to enable us to handle allocation
- // failure after the JumpTarget jump.
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(answer.reg(), exponent.reg(),
- base.reg(), &failure);
- __ movdbl(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
- // Remove the two original values from the frame - we only need those
- // in the case where we branch to runtime.
- frame()->Drop(2);
- exponent.Unuse();
- base.Unuse();
- done.Jump(&answer);
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- // If we experience an allocation failure we branch to runtime.
- __ bind(&failure);
- call_runtime.Bind();
- answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
-
- done.Bind(&answer);
- frame()->Push(&answer);
- }
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
-
- if (!CpuFeatures::IsSupported(SSE2)) {
- Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
- frame()->Push(&result);
- } else {
- CpuFeatures::Scope use_sse2(SSE2);
- // Leave original value on the frame if we need to call runtime.
- frame()->Dup();
- Result result = frame()->Pop();
- result.ToRegister();
- frame()->Spill(result.reg());
- Label runtime;
- Label non_smi;
- Label load_done;
- JumpTarget end;
-
- __ test(result.reg(), Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi);
- __ SmiUntag(result.reg());
- __ cvtsi2sd(xmm0, Operand(result.reg()));
- __ jmp(&load_done);
- __ bind(&non_smi);
- __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(not_equal, &runtime);
- __ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
-
- __ bind(&load_done);
- __ sqrtsd(xmm0, xmm0);
- // A copy of the virtual frame to allow us to go to runtime after the
- // JumpTarget jump.
- Result scratch = allocator()->Allocate();
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(result.reg(), scratch.reg(), no_reg, &runtime);
-
- __ movdbl(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
- frame()->Drop(1);
- scratch.Unuse();
- end.Jump(&result);
- // We only branch to runtime if we have an allocation error.
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- __ bind(&runtime);
- result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-
- end.Bind(&result);
- frame()->Push(&result);
- }
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
- Load(args->at(0));
- Load(args->at(1));
- Result right_res = frame_->Pop();
- Result left_res = frame_->Pop();
- right_res.ToRegister();
- left_res.ToRegister();
- Result tmp_res = allocator()->Allocate();
- ASSERT(tmp_res.is_valid());
- Register right = right_res.reg();
- Register left = left_res.reg();
- Register tmp = tmp_res.reg();
- right_res.Unuse();
- left_res.Unuse();
- tmp_res.Unuse();
- __ cmp(left, Operand(right));
- destination()->true_target()->Branch(equal);
- // Fail if either is a non-HeapObject.
- __ mov(tmp, left);
- __ and_(Operand(tmp), right);
- __ test(Operand(tmp), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
- __ CmpObjectType(left, JS_REGEXP_TYPE, tmp);
- destination()->false_target()->Branch(not_equal);
- __ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
- destination()->false_target()->Branch(not_equal);
- __ mov(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- if (FLAG_debug_code) {
- __ AbortIfNotString(value.reg());
- }
-
- __ test(FieldOperand(value.reg(), String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
-
- value.Unuse();
- destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result string = frame_->Pop();
- string.ToRegister();
- if (FLAG_debug_code) {
- __ AbortIfNotString(string.reg());
- }
-
- Result number = allocator()->Allocate();
- ASSERT(number.is_valid());
- __ mov(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
- __ IndexFromHash(number.reg(), number.reg());
- string.Unuse();
- frame_->Push(&number);
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- ASSERT(!in_safe_int32_mode());
- if (CheckForInlineRuntimeCall(node)) {
- return;
- }
-
- ZoneList<Expression*>* args = node->arguments();
- Comment cmnt(masm_, "[ CallRuntime");
- Runtime::Function* function = node->function();
-
- if (function == NULL) {
- // Push the builtins object found in the current global object.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), GlobalObjectOperand());
- __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
- frame_->Push(&temp);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- if (function == NULL) {
- // Call the JS runtime function.
- frame_->Push(node->name());
- Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting_);
- frame_->RestoreContextRegister();
- frame_->Push(&answer);
- } else {
- // Call the C runtime function.
- Result answer = frame_->CallRuntime(function, arg_count);
- frame_->Push(&answer);
- }
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- Comment cmnt(masm_, "[ UnaryOperation");
-
- Token::Value op = node->op();
-
- if (op == Token::NOT) {
- // Swap the true and false targets but keep the same actual label
- // as the fall through.
- destination()->Invert();
- LoadCondition(node->expression(), destination(), true);
- // Swap the labels back.
- destination()->Invert();
-
- } else if (op == Token::DELETE) {
- Property* property = node->expression()->AsProperty();
- if (property != NULL) {
- Load(property->obj());
- Load(property->key());
- frame_->Push(Smi::FromInt(strict_mode_flag()));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
- frame_->Push(&answer);
- return;
- }
-
- Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
- if (variable != NULL) {
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is.
- ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
- Slot* slot = variable->AsSlot();
- if (variable->is_global()) {
- LoadGlobal();
- frame_->Push(variable->name());
- frame_->Push(Smi::FromInt(kNonStrictMode));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 3);
- frame_->Push(&answer);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to delete from the context holding the named
- // variable. Sync the virtual frame eagerly so we can push the
- // arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(variable->name()));
- Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
- frame_->Push(&answer);
- } else {
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->Push(Factory::false_value());
- }
- } else {
- // Default: Result of deleting expressions is true.
- Load(node->expression()); // may have side-effects
- frame_->SetElementAt(0, Factory::true_value());
- }
-
- } else if (op == Token::TYPEOF) {
- // Special case for loading the typeof expression; see comment on
- // LoadTypeofExpression().
- LoadTypeofExpression(node->expression());
- Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
- frame_->Push(&answer);
-
- } else if (op == Token::VOID) {
- Expression* expression = node->expression();
- if (expression && expression->AsLiteral() && (
- expression->AsLiteral()->IsTrue() ||
- expression->AsLiteral()->IsFalse() ||
- expression->AsLiteral()->handle()->IsNumber() ||
- expression->AsLiteral()->handle()->IsString() ||
- expression->AsLiteral()->handle()->IsJSRegExp() ||
- expression->AsLiteral()->IsNull())) {
- // Omit evaluating the value of the primitive literal.
- // It will be discarded anyway, and can have no side effect.
- frame_->Push(Factory::undefined_value());
- } else {
- Load(node->expression());
- frame_->SetElementAt(0, Factory::undefined_value());
- }
-
- } else {
- if (in_safe_int32_mode()) {
- Visit(node->expression());
- Result value = frame_->Pop();
- ASSERT(value.is_untagged_int32());
- // Registers containing an int32 value are not multiply used.
- ASSERT(!value.is_register() || !frame_->is_used(value.reg()));
- value.ToRegister();
- switch (op) {
- case Token::SUB: {
- __ neg(value.reg());
- frame_->Push(&value);
- if (node->no_negative_zero()) {
- // -MIN_INT is MIN_INT with the overflow flag set.
- unsafe_bailout_->Branch(overflow);
- } else {
- // MIN_INT and 0 both have bad negations. They both have 31 zeros.
- __ test(value.reg(), Immediate(0x7FFFFFFF));
- unsafe_bailout_->Branch(zero);
- }
- break;
- }
- case Token::BIT_NOT: {
- __ not_(value.reg());
- frame_->Push(&value);
- break;
- }
- case Token::ADD: {
- // Unary plus has no effect on int32 values.
- frame_->Push(&value);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else {
- Load(node->expression());
- bool can_overwrite = node->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- bool no_negative_zero = node->expression()->no_negative_zero();
- switch (op) {
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- UNREACHABLE(); // handled above
- break;
-
- case Token::SUB: {
- GenericUnaryOpStub stub(
- Token::SUB,
- overwrite,
- NO_UNARY_FLAGS,
- no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
- Result operand = frame_->Pop();
- Result answer = frame_->CallStub(&stub, &operand);
- answer.set_type_info(TypeInfo::Number());
- frame_->Push(&answer);
- break;
- }
- case Token::BIT_NOT: {
- // Smi check.
- JumpTarget smi_label;
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- TypeInfo operand_info = operand.type_info();
- operand.ToRegister();
- if (operand_info.IsSmi()) {
- if (FLAG_debug_code) __ AbortIfNotSmi(operand.reg());
- frame_->Spill(operand.reg());
- // Set smi tag bit. It will be reset by the not operation.
- __ lea(operand.reg(), Operand(operand.reg(), kSmiTagMask));
- __ not_(operand.reg());
- Result answer = operand;
- answer.set_type_info(TypeInfo::Smi());
- frame_->Push(&answer);
- } else {
- __ test(operand.reg(), Immediate(kSmiTagMask));
- smi_label.Branch(zero, &operand, taken);
-
- GenericUnaryOpStub stub(Token::BIT_NOT,
- overwrite,
- NO_UNARY_SMI_CODE_IN_STUB);
- Result answer = frame_->CallStub(&stub, &operand);
- continue_label.Jump(&answer);
-
- smi_label.Bind(&answer);
- answer.ToRegister();
- frame_->Spill(answer.reg());
- // Set smi tag bit. It will be reset by the not operation.
- __ lea(answer.reg(), Operand(answer.reg(), kSmiTagMask));
- __ not_(answer.reg());
-
- continue_label.Bind(&answer);
- answer.set_type_info(TypeInfo::Integer32());
- frame_->Push(&answer);
- }
- break;
- }
- case Token::ADD: {
- // Smi check.
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- TypeInfo operand_info = operand.type_info();
- operand.ToRegister();
- __ test(operand.reg(), Immediate(kSmiTagMask));
- continue_label.Branch(zero, &operand, taken);
-
- frame_->Push(&operand);
- Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
- CALL_FUNCTION, 1);
-
- continue_label.Bind(&answer);
- if (operand_info.IsSmi()) {
- answer.set_type_info(TypeInfo::Smi());
- } else if (operand_info.IsInteger32()) {
- answer.set_type_info(TypeInfo::Integer32());
- } else {
- answer.set_type_info(TypeInfo::Number());
- }
- frame_->Push(&answer);
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- }
-}
-
-
-// The value in dst was optimistically incremented or decremented. The
-// result overflowed or was not smi tagged. Undo the operation, call
-// into the runtime to convert the argument to a number, and call the
-// specialized add or subtract stub. The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
- DeferredPrefixCountOperation(Register dst,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPrefixCountOperation::Generate() {
- // Undo the optimistic smi operation.
- if (is_increment_) {
- __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
- } else {
- __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
- }
- Register left;
- if (input_type_.IsNumber()) {
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- left = eax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The value in dst was optimistically incremented or decremented. The
-// result overflowed or was not smi tagged. Undo the operation and call
-// into the runtime to convert the argument to a number. Update the
-// original value in old. Call the specialized add or subtract stub.
-// The result is left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
- DeferredPostfixCountOperation(Register dst,
- Register old,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst),
- old_(old),
- is_increment_(is_increment),
- input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Register old_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPostfixCountOperation::Generate() {
- // Undo the optimistic smi operation.
- if (is_increment_) {
- __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
- } else {
- __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
- }
- Register left;
- if (input_type_.IsNumber()) {
- __ push(dst_); // Save the input to use as the old value.
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ push(eax); // Save the result of ToNumber to use as the old value.
- left = eax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(eax)) __ mov(dst_, eax);
- __ pop(old_);
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ CountOperation");
-
- bool is_postfix = node->is_postfix();
- bool is_increment = node->op() == Token::INC;
-
- Variable* var = node->expression()->AsVariableProxy()->AsVariable();
- bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
- // Postfix operations need a stack slot under the reference to hold
- // the old value while the new value is being stored. This is so that
- // in the case that storing the new value requires a call, the old
- // value will be in the frame to be spilled.
- if (is_postfix) frame_->Push(Smi::FromInt(0));
-
- // A constant reference is not saved to, so a constant reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
- if (target.is_illegal()) {
- // Spoof the virtual frame to have the expected height (one higher
- // than on entry).
- if (!is_postfix) frame_->Push(Smi::FromInt(0));
- return;
- }
- target.TakeValue();
-
- Result new_value = frame_->Pop();
- new_value.ToRegister();
-
- Result old_value; // Only allocated in the postfix case.
- if (is_postfix) {
- // Allocate a temporary to preserve the old value.
- old_value = allocator_->Allocate();
- ASSERT(old_value.is_valid());
- __ mov(old_value.reg(), new_value.reg());
-
- // The return value for postfix operations is ToNumber(input).
- // Keep more precise type info if the input is some kind of
- // number already. If the input is not a number we have to wait
- // for the deferred code to convert it.
- if (new_value.type_info().IsNumber()) {
- old_value.set_type_info(new_value.type_info());
- }
- }
-
- // Ensure the new value is writable.
- frame_->Spill(new_value.reg());
-
- Result tmp;
- if (new_value.is_smi()) {
- if (FLAG_debug_code) __ AbortIfNotSmi(new_value.reg());
- } else {
- // We don't know statically if the input is a smi.
- // In order to combine the overflow and the smi tag check, we need
- // to be able to allocate a byte register. We attempt to do so
- // without spilling. If we fail, we will generate separate overflow
- // and smi tag checks.
- // We allocate and clear a temporary byte register before performing
- // the count operation since clearing the register using xor will clear
- // the overflow flag.
- tmp = allocator_->AllocateByteRegisterWithoutSpilling();
- if (tmp.is_valid()) {
- __ Set(tmp.reg(), Immediate(0));
- }
- }
-
- if (is_increment) {
- __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
- } else {
- __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
- }
-
- DeferredCode* deferred = NULL;
- if (is_postfix) {
- deferred = new DeferredPostfixCountOperation(new_value.reg(),
- old_value.reg(),
- is_increment,
- new_value.type_info());
- } else {
- deferred = new DeferredPrefixCountOperation(new_value.reg(),
- is_increment,
- new_value.type_info());
- }
-
- if (new_value.is_smi()) {
- // In case we have a smi as input just check for overflow.
- deferred->Branch(overflow);
- } else {
- // If the count operation didn't overflow and the result is a valid
- // smi, we're done. Otherwise, we jump to the deferred slow-case
- // code.
- // We combine the overflow and the smi tag check if we could
- // successfully allocate a temporary byte register.
- if (tmp.is_valid()) {
- __ setcc(overflow, tmp.reg());
- __ or_(Operand(tmp.reg()), new_value.reg());
- __ test(tmp.reg(), Immediate(kSmiTagMask));
- tmp.Unuse();
- deferred->Branch(not_zero);
- } else {
- // Otherwise we test separately for overflow and smi tag.
- deferred->Branch(overflow);
- __ test(new_value.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- }
- }
- deferred->BindExit();
-
- // Postfix count operations return their input converted to
- // number. The case when the input is already a number is covered
- // above in the allocation code for old_value.
- if (is_postfix && !new_value.type_info().IsNumber()) {
- old_value.set_type_info(TypeInfo::Number());
- }
-
- // The result of ++ or -- is an Integer32 if the
- // input is a smi. Otherwise it is a number.
- if (new_value.is_smi()) {
- new_value.set_type_info(TypeInfo::Integer32());
- } else {
- new_value.set_type_info(TypeInfo::Number());
- }
-
- // Postfix: store the old value in the allocated slot under the
- // reference.
- if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-
- frame_->Push(&new_value);
- // Non-constant: update the reference.
- if (!is_const) target.SetValue(NOT_CONST_INIT);
- }
-
- // Postfix: drop the new value and use the old.
- if (is_postfix) frame_->Drop();
-}
-
-
-void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) {
- Token::Value op = node->op();
- Comment cmnt(masm_, "[ Int32BinaryOperation");
- ASSERT(in_safe_int32_mode());
- ASSERT(safe_int32_mode_enabled());
- ASSERT(FLAG_safe_int32_compiler);
-
- if (op == Token::COMMA) {
- // Discard left value.
- frame_->Nip(1);
- return;
- }
-
- Result right = frame_->Pop();
- Result left = frame_->Pop();
-
- ASSERT(right.is_untagged_int32());
- ASSERT(left.is_untagged_int32());
- // Registers containing an int32 value are not multiply used.
- ASSERT(!left.is_register() || !frame_->is_used(left.reg()));
- ASSERT(!right.is_register() || !frame_->is_used(right.reg()));
-
- switch (op) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- UNREACHABLE();
- break;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- if (left.is_constant() || right.is_constant()) {
- int32_t value; // Put constant in value, non-constant in left.
- // Constants are known to be int32 values, from static analysis,
- // or else will be converted to int32 by implicit ECMA [[ToInt32]].
- if (left.is_constant()) {
- ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
- value = NumberToInt32(*left.handle());
- left = right;
- } else {
- ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
- value = NumberToInt32(*right.handle());
- }
-
- left.ToRegister();
- if (op == Token::BIT_OR) {
- __ or_(Operand(left.reg()), Immediate(value));
- } else if (op == Token::BIT_XOR) {
- __ xor_(Operand(left.reg()), Immediate(value));
- } else {
- ASSERT(op == Token::BIT_AND);
- __ and_(Operand(left.reg()), Immediate(value));
- }
- } else {
- ASSERT(left.is_register());
- ASSERT(right.is_register());
- if (op == Token::BIT_OR) {
- __ or_(left.reg(), Operand(right.reg()));
- } else if (op == Token::BIT_XOR) {
- __ xor_(left.reg(), Operand(right.reg()));
- } else {
- ASSERT(op == Token::BIT_AND);
- __ and_(left.reg(), Operand(right.reg()));
- }
- }
- frame_->Push(&left);
- right.Unuse();
- break;
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- bool test_shr_overflow = false;
- left.ToRegister();
- if (right.is_constant()) {
- ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
- int shift_amount = NumberToInt32(*right.handle()) & 0x1F;
- if (op == Token::SAR) {
- __ sar(left.reg(), shift_amount);
- } else if (op == Token::SHL) {
- __ shl(left.reg(), shift_amount);
- } else {
- ASSERT(op == Token::SHR);
- __ shr(left.reg(), shift_amount);
- if (shift_amount == 0) test_shr_overflow = true;
- }
- } else {
- // Move right to ecx
- if (left.is_register() && left.reg().is(ecx)) {
- right.ToRegister();
- __ xchg(left.reg(), right.reg());
- left = right; // Left is unused here, copy of right unused by Push.
- } else {
- right.ToRegister(ecx);
- left.ToRegister();
- }
- if (op == Token::SAR) {
- __ sar_cl(left.reg());
- } else if (op == Token::SHL) {
- __ shl_cl(left.reg());
- } else {
- ASSERT(op == Token::SHR);
- __ shr_cl(left.reg());
- test_shr_overflow = true;
- }
- }
- {
- Register left_reg = left.reg();
- frame_->Push(&left);
- right.Unuse();
- if (test_shr_overflow && !node->to_int32()) {
- // Uint32 results with top bit set are not Int32 values.
- // If they will be forced to Int32, skip the test.
- // Test is needed because shr with shift amount 0 does not set flags.
- __ test(left_reg, Operand(left_reg));
- unsafe_bailout_->Branch(sign);
- }
- }
- break;
- }
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- if ((left.is_constant() && op != Token::SUB) || right.is_constant()) {
- int32_t value; // Put constant in value, non-constant in left.
- if (right.is_constant()) {
- ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
- value = NumberToInt32(*right.handle());
- } else {
- ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
- value = NumberToInt32(*left.handle());
- left = right;
- }
-
- left.ToRegister();
- if (op == Token::ADD) {
- __ add(Operand(left.reg()), Immediate(value));
- } else if (op == Token::SUB) {
- __ sub(Operand(left.reg()), Immediate(value));
- } else {
- ASSERT(op == Token::MUL);
- __ imul(left.reg(), left.reg(), value);
- }
- } else {
- left.ToRegister();
- ASSERT(left.is_register());
- ASSERT(right.is_register());
- if (op == Token::ADD) {
- __ add(left.reg(), Operand(right.reg()));
- } else if (op == Token::SUB) {
- __ sub(left.reg(), Operand(right.reg()));
- } else {
- ASSERT(op == Token::MUL);
- // We have statically verified that a negative zero can be ignored.
- __ imul(left.reg(), Operand(right.reg()));
- }
- }
- right.Unuse();
- frame_->Push(&left);
- if (!node->to_int32() || op == Token::MUL) {
- // If ToInt32 is called on the result of ADD, SUB, we don't
- // care about overflows.
- // Result of MUL can be non-representable precisely in double so
- // we have to check for overflow.
- unsafe_bailout_->Branch(overflow);
- }
- break;
- case Token::DIV:
- case Token::MOD: {
- if (right.is_register() && (right.reg().is(eax) || right.reg().is(edx))) {
- if (left.is_register() && left.reg().is(edi)) {
- right.ToRegister(ebx);
- } else {
- right.ToRegister(edi);
- }
- }
- left.ToRegister(eax);
- Result edx_reg = allocator_->Allocate(edx);
- right.ToRegister();
- // The results are unused here because BreakTarget::Branch cannot handle
- // live results.
- Register right_reg = right.reg();
- left.Unuse();
- right.Unuse();
- edx_reg.Unuse();
- __ cmp(right_reg, 0);
- // Ensure divisor is positive: no chance of non-int32 or -0 result.
- unsafe_bailout_->Branch(less_equal);
- __ cdq(); // Sign-extend eax into edx:eax
- __ idiv(right_reg);
- if (op == Token::MOD) {
- // Negative zero can arise as a negative divident with a zero result.
- if (!node->no_negative_zero()) {
- Label not_negative_zero;
- __ test(edx, Operand(edx));
- __ j(not_zero, &not_negative_zero);
- __ test(eax, Operand(eax));
- unsafe_bailout_->Branch(negative);
- __ bind(&not_negative_zero);
- }
- Result edx_result(edx, TypeInfo::Integer32());
- edx_result.set_untagged_int32(true);
- frame_->Push(&edx_result);
- } else {
- ASSERT(op == Token::DIV);
- __ test(edx, Operand(edx));
- unsafe_bailout_->Branch(not_equal);
- Result eax_result(eax, TypeInfo::Integer32());
- eax_result.set_untagged_int32(true);
- frame_->Push(&eax_result);
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- // According to ECMA-262 section 11.11, page 58, the binary logical
- // operators must yield the result of one of the two expressions
- // before any ToBoolean() conversions. This means that the value
- // produced by a && or || operator is not necessarily a boolean.
-
- // NOTE: If the left hand side produces a materialized value (not
- // control flow), we force the right hand side to do the same. This
- // is necessary because we assume that if we get control flow on the
- // last path out of an expression we got it on all paths.
- if (node->op() == Token::AND) {
- ASSERT(!in_safe_int32_mode());
- JumpTarget is_true;
- ControlDestination dest(&is_true, destination()->false_target(), true);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The current false target was used as the fall-through. If
- // there are no dangling jumps to is_true then the left
- // subexpression was unconditionally false. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_true.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current false target was a forward jump then we have a
- // valid frame, we have just bound the false target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->false_target()->Unuse();
- destination()->false_target()->Jump();
- }
- is_true.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have actually just jumped to or bound the current false
- // target but the current control destination is not marked as
- // used.
- destination()->Use(false);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_true
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_true
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'false' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&pop_and_continue, &exit, true);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_true.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
-
- } else {
- ASSERT(node->op() == Token::OR);
- ASSERT(!in_safe_int32_mode());
- JumpTarget is_false;
- ControlDestination dest(destination()->true_target(), &is_false, false);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.true_was_fall_through()) {
- // The current true target was used as the fall-through. If
- // there are no dangling jumps to is_false then the left
- // subexpression was unconditionally true. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_false.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current true target was a forward jump then we have a
- // valid frame, we have just bound the true target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->true_target()->Unuse();
- destination()->true_target()->Jump();
- }
- is_false.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have just jumped to or bound the current true target but
- // the current control destination is not marked as used.
- destination()->Use(true);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_false
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_false
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'true' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&exit, &pop_and_continue, false);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_false.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
- }
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- Comment cmnt(masm_, "[ BinaryOperation");
-
- if (node->op() == Token::AND || node->op() == Token::OR) {
- GenerateLogicalBooleanOperation(node);
- } else if (in_safe_int32_mode()) {
- Visit(node->left());
- Visit(node->right());
- Int32BinaryOperation(node);
- } else {
- // NOTE: The code below assumes that the slow cases (calls to runtime)
- // never return a constant/immutable object.
- OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_RIGHT;
- }
-
- if (node->left()->IsTrivial()) {
- Load(node->right());
- Result right = frame_->Pop();
- frame_->Push(node->left());
- frame_->Push(&right);
- } else {
- Load(node->left());
- Load(node->right());
- }
- GenericBinaryOperation(node, overwrite_mode);
- }
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- ASSERT(!in_safe_int32_mode());
- frame_->PushFunction();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ CompareOperation");
-
- bool left_already_loaded = false;
-
- // Get the expressions from the node.
- Expression* left = node->left();
- Expression* right = node->right();
- Token::Value op = node->op();
- // To make typeof testing for natives implemented in JavaScript really
- // efficient, we generate special code for expressions of the form:
- // 'typeof <expression> == <string>'.
- UnaryOperation* operation = left->AsUnaryOperation();
- if ((op == Token::EQ || op == Token::EQ_STRICT) &&
- (operation != NULL && operation->op() == Token::TYPEOF) &&
- (right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsString())) {
- Handle<String> check(String::cast(*right->AsLiteral()->handle()));
-
- // Load the operand and move it to a register.
- LoadTypeofExpression(operation->expression());
- Result answer = frame_->Pop();
- answer.ToRegister();
-
- if (check->Equals(Heap::number_symbol())) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->true_target()->Branch(zero);
- frame_->Spill(answer.reg());
- __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ cmp(answer.reg(), Factory::heap_number_map());
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(Heap::string_symbol())) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
-
- // It can be an undetectable string object.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(temp.reg(), FIRST_NONSTRING_TYPE);
- temp.Unuse();
- answer.Unuse();
- destination()->Split(below);
-
- } else if (check->Equals(Heap::boolean_symbol())) {
- __ cmp(answer.reg(), Factory::true_value());
- destination()->true_target()->Branch(equal);
- __ cmp(answer.reg(), Factory::false_value());
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(Heap::undefined_symbol())) {
- __ cmp(answer.reg(), Factory::undefined_value());
- destination()->true_target()->Branch(equal);
-
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
-
- // It can be an undetectable object.
- frame_->Spill(answer.reg());
- __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(answer.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- answer.Unuse();
- destination()->Split(not_zero);
-
- } else if (check->Equals(Heap::function_symbol())) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- frame_->Spill(answer.reg());
- __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
- destination()->true_target()->Branch(equal);
- // Regular expressions are callable so typeof == 'function'.
- __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
- answer.Unuse();
- destination()->Split(equal);
- } else if (check->Equals(Heap::object_symbol())) {
- __ test(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
- __ cmp(answer.reg(), Factory::null_value());
- destination()->true_target()->Branch(equal);
-
- Result map = allocator()->Allocate();
- ASSERT(map.is_valid());
- // Regular expressions are typeof == 'function', not 'object'.
- __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg());
- destination()->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- destination()->false_target()->Branch(not_zero);
- // Do a range test for JSObject type. We can't use
- // MacroAssembler::IsInstanceJSObjectType, because we are using a
- // ControlDestination, so we copy its implementation here.
- __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
- __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
- __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
- answer.Unuse();
- map.Unuse();
- destination()->Split(below_equal);
- } else {
- // Uncommon case: typeof testing against a string literal that is
- // never returned from the typeof operator.
- answer.Unuse();
- destination()->Goto(false);
- }
- return;
- } else if (op == Token::LT &&
- right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsHeapNumber()) {
- Handle<HeapNumber> check(HeapNumber::cast(*right->AsLiteral()->handle()));
- if (check->value() == 2147483648.0) { // 0x80000000.
- Load(left);
- left_already_loaded = true;
- Result lhs = frame_->Pop();
- lhs.ToRegister();
- __ test(lhs.reg(), Immediate(kSmiTagMask));
- destination()->true_target()->Branch(zero); // All Smis are less.
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
- __ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset));
- __ cmp(scratch.reg(), Factory::heap_number_map());
- JumpTarget not_a_number;
- not_a_number.Branch(not_equal, &lhs);
- __ mov(scratch.reg(),
- FieldOperand(lhs.reg(), HeapNumber::kExponentOffset));
- __ cmp(Operand(scratch.reg()), Immediate(0xfff00000));
- not_a_number.Branch(above_equal, &lhs); // It's a negative NaN or -Inf.
- const uint32_t borderline_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch.reg()), Immediate(borderline_exponent));
- scratch.Unuse();
- lhs.Unuse();
- destination()->true_target()->Branch(less);
- destination()->false_target()->Jump();
-
- not_a_number.Bind(&lhs);
- frame_->Push(&lhs);
- }
- }
-
- Condition cc = no_condition;
- bool strict = false;
- switch (op) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN: {
- if (!left_already_loaded) Load(left);
- Load(right);
- Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
- frame_->Push(&answer); // push the result
- return;
- }
- case Token::INSTANCEOF: {
- if (!left_already_loaded) Load(left);
- Load(right);
- InstanceofStub stub(InstanceofStub::kNoFlags);
- Result answer = frame_->CallStub(&stub, 2);
- answer.ToRegister();
- __ test(answer.reg(), Operand(answer.reg()));
- answer.Unuse();
- destination()->Split(zero);
- return;
- }
- default:
- UNREACHABLE();
- }
-
- if (left->IsTrivial()) {
- if (!left_already_loaded) {
- Load(right);
- Result right_result = frame_->Pop();
- frame_->Push(left);
- frame_->Push(&right_result);
- } else {
- Load(right);
- }
- } else {
- if (!left_already_loaded) Load(left);
- Load(right);
- }
- Comparison(node, cc, strict, destination());
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
- ASSERT(!in_safe_int32_mode());
- Comment cmnt(masm_, "[ CompareToNull");
-
- Load(node->expression());
- Result operand = frame_->Pop();
- operand.ToRegister();
- __ cmp(operand.reg(), Factory::null_value());
- if (node->is_strict()) {
- operand.Unuse();
- destination()->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- destination()->true_target()->Branch(equal);
- __ cmp(operand.reg(), Factory::undefined_value());
- destination()->true_target()->Branch(equal);
- __ test(operand.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- temp.Unuse();
- operand.Unuse();
- destination()->Split(not_zero);
- }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
- return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
- && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0))
- && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0))
- && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0))
- && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0));
-}
-#endif
-
-
-// Emit a LoadIC call to get the value from receiver and leave it in
-// dst.
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
- DeferredReferenceGetNamedValue(Register dst,
- Register receiver,
- Handle<String> name,
- bool is_contextual)
- : dst_(dst),
- receiver_(receiver),
- name_(name),
- is_contextual_(is_contextual),
- is_dont_delete_(false) {
- set_comment(is_contextual
- ? "[ DeferredReferenceGetNamedValue (contextual)"
- : "[ DeferredReferenceGetNamedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- void set_is_dont_delete(bool value) {
- ASSERT(is_contextual_);
- is_dont_delete_ = value;
- }
-
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Handle<String> name_;
- bool is_contextual_;
- bool is_dont_delete_;
-};
-
-
-void DeferredReferenceGetNamedValue::Generate() {
- if (!receiver_.is(eax)) {
- __ mov(eax, receiver_);
- }
- __ Set(ecx, Immediate(name_));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- RelocInfo::Mode mode = is_contextual_
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- __ call(ic, mode);
- // The call must be followed by:
- // - a test eax instruction to indicate that the inobject property
- // case was inlined.
- // - a mov ecx or mov edx instruction to indicate that the
- // contextual property load was inlined.
- //
- // Store the delta to the map check instruction here in the test
- // instruction. Use masm_-> instead of the __ macro since the
- // latter can't return a value.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- if (is_contextual_) {
- masm_->mov(is_dont_delete_ ? edx : ecx, -delta_to_patch_site);
- __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
- if (is_dont_delete_) {
- __ IncrementCounter(&Counters::dont_delete_hint_miss, 1);
- }
- } else {
- masm_->test(eax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::named_load_inline_miss, 1);
- }
-
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
- explicit DeferredReferenceGetKeyedValue(Register dst,
- Register receiver,
- Register key)
- : dst_(dst), receiver_(receiver), key_(key) {
- set_comment("[ DeferredReferenceGetKeyedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Register key_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
- if (!receiver_.is(eax)) {
- // Register eax is available for key.
- if (!key_.is(eax)) {
- __ mov(eax, key_);
- }
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- } else if (!key_.is(edx)) {
- // Register edx is available for receiver.
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- if (!key_.is(eax)) {
- __ mov(eax, key_);
- }
- } else {
- __ xchg(edx, eax);
- }
- // Calculate the delta from the IC call instruction to the map check
- // cmp instruction in the inlined version. This delta is stored in
- // a test(eax, delta) instruction after the call so that we can find
- // it in the IC initialization code and patch the cmp instruction.
- // This means that we cannot allow test instructions after calls to
- // KeyedLoadIC stubs in other places.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_-> directly here instead of the __
- // macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->test(eax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
-
- if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver,
- Register scratch,
- StrictModeFlag strict_mode)
- : value_(value),
- key_(key),
- receiver_(receiver),
- scratch_(scratch),
- strict_mode_(strict_mode) {
- set_comment("[ DeferredReferenceSetKeyedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Register value_;
- Register key_;
- Register receiver_;
- Register scratch_;
- Label patch_site_;
- StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
- __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
- // Move value_ to eax, key_ to ecx, and receiver_ to edx.
- Register old_value = value_;
-
- // First, move value to eax.
- if (!value_.is(eax)) {
- if (key_.is(eax)) {
- // Move key_ out of eax, preferably to ecx.
- if (!value_.is(ecx) && !receiver_.is(ecx)) {
- __ mov(ecx, key_);
- key_ = ecx;
- } else {
- __ mov(scratch_, key_);
- key_ = scratch_;
- }
- }
- if (receiver_.is(eax)) {
- // Move receiver_ out of eax, preferably to edx.
- if (!value_.is(edx) && !key_.is(edx)) {
- __ mov(edx, receiver_);
- receiver_ = edx;
- } else {
- // Both moves to scratch are from eax, also, no valid path hits both.
- __ mov(scratch_, receiver_);
- receiver_ = scratch_;
- }
- }
- __ mov(eax, value_);
- value_ = eax;
- }
-
- // Now value_ is in eax. Move the other two to the right positions.
- // We do not update the variables key_ and receiver_ to ecx and edx.
- if (key_.is(ecx)) {
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- } else if (key_.is(edx)) {
- if (receiver_.is(ecx)) {
- __ xchg(edx, ecx);
- } else {
- __ mov(ecx, key_);
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- }
- } else { // Key is not in edx or ecx.
- if (!receiver_.is(edx)) {
- __ mov(edx, receiver_);
- }
- __ mov(ecx, key_);
- }
-
- // Call the IC stub.
- Handle<Code> ic(Builtins::builtin(
- (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_-> directly here instead of the
- // __ macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->test(eax, Immediate(-delta_to_patch_site));
- // Restore value (returned from store IC) register.
- if (!old_value.is(eax)) __ mov(old_value, eax);
-}
-
-
-Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
-
- bool contextual_load_in_builtin =
- is_contextual &&
- (Bootstrapper::IsActive() ||
- (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
-
- Result result;
- // Do not inline in the global code or when not in loop.
- if (scope()->is_global_scope() ||
- loop_nesting() == 0 ||
- contextual_load_in_builtin) {
- Comment cmnt(masm(), "[ Load from named Property");
- frame()->Push(name);
-
- RelocInfo::Mode mode = is_contextual
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- result = frame()->CallLoadIC(mode);
- // A test eax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test eax
- // instruction here.
- __ nop();
- } else {
- // Inline the property load.
- Comment cmnt(masm(), is_contextual
- ? "[ Inlined contextual property load"
- : "[ Inlined named property load");
- Result receiver = frame()->Pop();
- receiver.ToRegister();
-
- result = allocator()->Allocate();
- ASSERT(result.is_valid());
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(result.reg(),
- receiver.reg(),
- name,
- is_contextual);
-
- if (!is_contextual) {
- // Check that the receiver is a heap object.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
- }
-
- __ bind(deferred->patch_site());
- // This is the map check instruction that will be patched (so we can't
- // use the double underscore macro that may insert instructions).
- // Initially use an invalid map to force a failure.
- masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
- // This branch is always a forwards branch so it's always a fixed size
- // which allows the assert below to succeed and patching to work.
- deferred->Branch(not_equal);
-
- // The delta from the patch label to the actual load must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
- LoadIC::kOffsetToLoadInstruction);
-
- if (is_contextual) {
- // Load the (initialy invalid) cell and get its value.
- masm()->mov(result.reg(), Factory::null_value());
- if (FLAG_debug_code) {
- __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Factory::global_property_cell_map());
- __ Assert(equal, "Uninitialized inlined contextual load");
- }
- __ mov(result.reg(),
- FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
- bool is_dont_delete = false;
- if (!info_->closure().is_null()) {
- // When doing lazy compilation we can check if the global cell
- // already exists and use its "don't delete" status as a hint.
- AssertNoAllocation no_gc;
- v8::internal::GlobalObject* global_object =
- info_->closure()->context()->global();
- LookupResult lookup;
- global_object->LocalLookupRealNamedProperty(*name, &lookup);
- if (lookup.IsProperty() && lookup.type() == NORMAL) {
- ASSERT(lookup.holder() == global_object);
- ASSERT(global_object->property_dictionary()->ValueAt(
- lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
- is_dont_delete = lookup.IsDontDelete();
- }
- }
- deferred->set_is_dont_delete(is_dont_delete);
- if (!is_dont_delete) {
- __ cmp(result.reg(), Factory::the_hole_value());
- deferred->Branch(equal);
- } else if (FLAG_debug_code) {
- __ cmp(result.reg(), Factory::the_hole_value());
- __ Check(not_equal, "DontDelete cells can't contain the hole");
- }
- __ IncrementCounter(&Counters::named_load_global_inline, 1);
- if (is_dont_delete) {
- __ IncrementCounter(&Counters::dont_delete_hint_hit, 1);
- }
- } else {
- // The initial (invalid) offset has to be large enough to force a 32-bit
- // instruction encoding to allow patching with an arbitrary offset. Use
- // kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
- __ IncrementCounter(&Counters::named_load_inline, 1);
- }
-
- deferred->BindExit();
- }
- ASSERT(frame()->height() == original_height - 1);
- return result;
-}
-
-
-Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
- Result result;
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- // A test eax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test eax
- // instruction here.
- __ nop();
- } else {
- // Inline the in-object property case.
- JumpTarget slow, done;
- Label patch_site;
-
- // Get the value and receiver from the stack.
- Result value = frame()->Pop();
- value.ToRegister();
- Result receiver = frame()->Pop();
- receiver.ToRegister();
-
- // Allocate result register.
- result = allocator()->Allocate();
- ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
-
- // Check that the receiver is a heap object.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- slow.Branch(zero, &value, &receiver);
-
- // This is the map check instruction that will be patched (so we can't
- // use the double underscore macro that may insert instructions).
- // Initially use an invalid map to force a failure.
- __ bind(&patch_site);
- masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
- // This branch is always a forwards branch so it's always a fixed size
- // which allows the assert below to succeed and patching to work.
- slow.Branch(not_equal, &value, &receiver);
-
- // The delta from the patch label to the store offset must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
- StoreIC::kOffsetToStoreInstruction);
-
- // The initial (invalid) offset has to be large enough to force a 32-bit
- // instruction encoding to allow patching with an arbitrary offset. Use
- // kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- __ mov(FieldOperand(receiver.reg(), offset), value.reg());
- __ mov(result.reg(), Operand(value.reg()));
-
- // Allocate scratch register for write barrier.
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- // The write barrier clobbers all input registers, so spill the
- // receiver and the value.
- frame_->Spill(receiver.reg());
- frame_->Spill(value.reg());
-
- // If the receiver and the value share a register allocate a new
- // register for the receiver.
- if (receiver.reg().is(value.reg())) {
- receiver = allocator()->Allocate();
- ASSERT(receiver.is_valid());
- __ mov(receiver.reg(), Operand(value.reg()));
- }
-
- // Update the write barrier. To save instructions in the inlined
- // version we do not filter smis.
- Label skip_write_barrier;
- __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
- int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
- __ lea(scratch.reg(), Operand(receiver.reg(), offset));
- __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
- if (FLAG_debug_code) {
- __ mov(receiver.reg(), Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(value.reg(), Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(scratch.reg(), Immediate(BitCast<int32_t>(kZapValue)));
- }
- __ bind(&skip_write_barrier);
- value.Unuse();
- scratch.Unuse();
- receiver.Unuse();
- done.Jump(&result);
-
- slow.Bind(&value, &receiver);
- frame()->Push(&receiver);
- frame()->Push(&value);
- result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- // Encode the offset to the map check instruction and the offset
- // to the write barrier store address computation in a test eax
- // instruction.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
- __ test(eax,
- Immediate((delta_to_record_write << 16) | delta_to_patch_site));
- done.Bind(&result);
- }
-
- ASSERT_EQ(expected_height, frame()->height());
- return result;
-}
-
-
-Result CodeGenerator::EmitKeyedLoad() {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Inline array load code if inside of a loop. We do not know the
- // receiver map yet, so we initially generate the code with a check
- // against an invalid map. In the inline cache code, we patch the map
- // check if appropriate.
- if (loop_nesting() > 0) {
- Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- Result elements = allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // If key and receiver are shared registers on the frame, their values will
- // be automatically saved and restored when going to deferred code.
- // The result is in elements, which is guaranteed non-shared.
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(elements.reg(),
- receiver.reg(),
- key.reg());
-
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
-
- // Check that the receiver has the expected map.
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching.
- masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
- deferred->Branch(not_equal);
-
- // Check that the key is a smi.
- if (!key.is_smi()) {
- __ test(key.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
- }
-
- // Get the elements array from the receiver.
- __ mov(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ AssertFastElements(elements.reg());
-
- // Check that the key is within bounds.
- __ cmp(key.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Load and check that the result is not the hole.
- // Key holds a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ mov(elements.reg(),
- FieldOperand(elements.reg(),
- key.reg(),
- times_2,
- FixedArray::kHeaderSize));
- result = elements;
- __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
- deferred->BindExit();
- } else {
- Comment cmnt(masm_, "[ Load from keyed Property");
- result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- }
- ASSERT(frame()->height() == original_height - 2);
- return result;
-}
-
-
-Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Generate inlined version of the keyed store if the code is in a loop
- // and the key is likely to be a smi.
- if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
- Comment cmnt(masm(), "[ Inlined store to keyed Property");
-
- // Get the receiver, key and value into registers.
- result = frame()->Pop();
- Result key = frame()->Pop();
- Result receiver = frame()->Pop();
-
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid());
- Result tmp2 = allocator_->Allocate();
- ASSERT(tmp2.is_valid());
-
- // Determine whether the value is a constant before putting it in a
- // register.
- bool value_is_constant = result.is_constant();
-
- // Make sure that value, key and receiver are in registers.
- result.ToRegister();
- key.ToRegister();
- receiver.ToRegister();
-
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(result.reg(),
- key.reg(),
- receiver.reg(),
- tmp.reg(),
- strict_mode_flag());
-
- // Check that the receiver is not a smi.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
-
- // Check that the key is a smi.
- if (!key.is_smi()) {
- __ test(key.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
- }
-
- // Check that the receiver is a JSArray.
- __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, tmp.reg());
- deferred->Branch(not_equal);
-
- // Check that the key is within bounds. Both the key and the length of
- // the JSArray are smis. Use unsigned comparison to handle negative keys.
- __ cmp(key.reg(),
- FieldOperand(receiver.reg(), JSArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Get the elements array from the receiver and check that it is not a
- // dictionary.
- __ mov(tmp.reg(),
- FieldOperand(receiver.reg(), JSArray::kElementsOffset));
-
- // Check whether it is possible to omit the write barrier. If the elements
- // array is in new space or the value written is a smi we can safely update
- // the elements array without write barrier.
- Label in_new_space;
- __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
- if (!value_is_constant) {
- __ test(result.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- }
-
- __ bind(&in_new_space);
- // Bind the deferred code patch site to be able to locate the fixed
- // array map comparison. When debugging, we patch this comparison to
- // always fail so that we will hit the IC call in the deferred code
- // which will allow the debugger to break for fast case stores.
- __ bind(deferred->patch_site());
- __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- deferred->Branch(not_equal);
-
- // Store the value.
- __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
- __ IncrementCounter(&Counters::keyed_store_inline, 1);
-
- deferred->BindExit();
- } else {
- result = frame()->CallKeyedStoreIC(strict_mode_flag());
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed store.
- __ nop();
- }
- ASSERT(frame()->height() == original_height - 3);
- return result;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>::cast(raw_name->handle());
- }
-}
-
-
-void Reference::GetValue() {
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
-
- // Record the source position for the property load.
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- if (!persist_after_get_) set_unloaded();
- break;
- }
-
- case NAMED: {
- Variable* var = expression_->AsVariableProxy()->AsVariable();
- bool is_global = var != NULL;
- ASSERT(!is_global || var->is_global());
- if (persist_after_get_) cgen_->frame()->Dup();
- Result result = cgen_->EmitNamedLoad(GetName(), is_global);
- if (!persist_after_get_) set_unloaded();
- cgen_->frame()->Push(&result);
- break;
- }
-
- case KEYED: {
- if (persist_after_get_) {
- cgen_->frame()->PushElementAt(1);
- cgen_->frame()->PushElementAt(1);
- }
- Result value = cgen_->EmitKeyedLoad();
- cgen_->frame()->Push(&value);
- if (!persist_after_get_) set_unloaded();
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void Reference::TakeValue() {
- // For non-constant frame-allocated slots, we invalidate the value in the
- // slot. For all others, we fall back on GetValue.
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(!is_illegal());
- if (type_ != SLOT) {
- GetValue();
- return;
- }
-
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP ||
- slot->type() == Slot::CONTEXT ||
- slot->var()->mode() == Variable::CONST ||
- slot->is_arguments()) {
- GetValue();
- return;
- }
-
- // Only non-constant, frame-allocated parameters and locals can
- // reach here. Be careful not to use the optimizations for arguments
- // object access since it may not have been initialized yet.
- ASSERT(!slot->is_arguments());
- if (slot->type() == Slot::PARAMETER) {
- cgen_->frame()->TakeParameterAt(slot->index());
- } else {
- ASSERT(slot->type() == Slot::LOCAL);
- cgen_->frame()->TakeLocalAt(slot->index());
- }
-
- ASSERT(persist_after_get_);
- // Do not unload the reference, because it is used in SetValue.
-}
-
-
-void Reference::SetValue(InitState init_state) {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- cgen_->StoreToSlot(slot, init_state);
- set_unloaded();
- break;
- }
-
- case NAMED: {
- Comment cmnt(masm, "[ Store to named Property");
- Result answer = cgen_->EmitNamedStore(GetName(), false);
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
- }
-
- case KEYED: {
- Comment cmnt(masm, "[ Store to keyed Property");
- Property* property = expression()->AsProperty();
- ASSERT(property != NULL);
-
- Result answer = cgen_->EmitKeyedStore(property->key()->type());
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
- }
-
- case UNLOADED:
- case ILLEGAL:
- UNREACHABLE();
- }
-}
-
-
-#undef __
-
#define __ masm.
-
static void MemCopyWrapper(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
}
-MemCopyFunction CreateMemCopyFunction() {
- HandleScope scope;
- MacroAssembler masm(NULL, 1 * KB);
+OS::MemCopyFunction CreateMemCopyFunction() {
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
+ if (buffer == NULL) return &MemCopyWrapper;
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// Generated code is put into a fixed, unmovable, buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
@@ -10165,7 +84,7 @@ MemCopyFunction CreateMemCopyFunction() {
if (FLAG_debug_code) {
__ cmp(Operand(esp, kSizeOffset + stack_offset),
- Immediate(kMinComplexMemCopy));
+ Immediate(OS::kMinComplexMemCopy));
Label ok;
__ j(greater_equal, &ok);
__ int3();
@@ -10199,7 +118,6 @@ MemCopyFunction CreateMemCopyFunction() {
__ test(Operand(src), Immediate(0x0F));
__ j(not_zero, &unaligned_source);
{
- __ IncrementCounter(&Counters::memcopy_aligned, 1);
// Copy loop for aligned source and destination.
__ mov(edx, count);
Register loop_count = ecx;
@@ -10247,7 +165,6 @@ MemCopyFunction CreateMemCopyFunction() {
// Copy loop for unaligned source and aligned destination.
// If source is not aligned, we can't read it as efficiently.
__ bind(&unaligned_source);
- __ IncrementCounter(&Counters::memcopy_unaligned, 1);
__ mov(edx, ecx);
Register loop_count = ecx;
Register count = edx;
@@ -10291,7 +208,6 @@ MemCopyFunction CreateMemCopyFunction() {
}
} else {
- __ IncrementCounter(&Counters::memcopy_noxmm, 1);
// SSE2 not supported. Unlikely to happen in practice.
__ push(edi);
__ push(esi);
@@ -10338,13 +254,8 @@ MemCopyFunction CreateMemCopyFunction() {
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
- // Copy the generated code into an executable chunk and return a pointer
- // to the first instruction in it as a C++ function pointer.
- LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
- if (chunk == NULL) return &MemCopyWrapper;
- memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
- CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
- return FUNCTION_CAST<MemCopyFunction>(chunk->GetStartAddress());
+ CPU::FlushICache(buffer, actual_size);
+ return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
}
#undef __
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 27e339620..8f090b124 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,275 +30,18 @@
#include "ast.h"
#include "ic-inl.h"
-#include "jump-target-heavy.h"
namespace v8 {
namespace internal {
// Forward declarations
class CompilationInfo;
-class DeferredCode;
-class FrameRegisterState;
-class RegisterAllocator;
-class RegisterFile;
-class RuntimeCallHelper;
-
-
-// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame. The reference may be consumed
-// by GetValue, TakeValue and SetValue.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Like GetValue except that the slot is expected to be written to before
- // being read from again. The value of the reference may be invalidated,
- // causing subsequent attempts to read it to fail.
- void TakeValue();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state);
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- // Keep the reference on the stack after get, so it can be used by set later.
- bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Control destinations.
-
-// A control destination encapsulates a pair of jump targets and a
-// flag indicating which one is the preferred fall-through. The
-// preferred fall-through must be unbound, the other may be already
-// bound (ie, a backward target).
-//
-// The true and false targets may be jumped to unconditionally or
-// control may split conditionally. Unconditional jumping and
-// splitting should be emitted in tail position (as the last thing
-// when compiling an expression) because they can cause either label
-// to be bound or the non-fall through to be jumped to leaving an
-// invalid virtual frame.
-//
-// The labels in the control destination can be extracted and
-// manipulated normally without affecting the state of the
-// destination.
-
-class ControlDestination BASE_EMBEDDED {
- public:
- ControlDestination(JumpTarget* true_target,
- JumpTarget* false_target,
- bool true_is_fall_through)
- : true_target_(true_target),
- false_target_(false_target),
- true_is_fall_through_(true_is_fall_through),
- is_used_(false) {
- ASSERT(true_is_fall_through ? !true_target->is_bound()
- : !false_target->is_bound());
- }
-
- // Accessors for the jump targets. Directly jumping or branching to
- // or binding the targets will not update the destination's state.
- JumpTarget* true_target() const { return true_target_; }
- JumpTarget* false_target() const { return false_target_; }
-
- // True if the the destination has been jumped to unconditionally or
- // control has been split to both targets. This predicate does not
- // test whether the targets have been extracted and manipulated as
- // raw jump targets.
- bool is_used() const { return is_used_; }
-
- // True if the destination is used and the true target (respectively
- // false target) was the fall through. If the target is backward,
- // "fall through" included jumping unconditionally to it.
- bool true_was_fall_through() const {
- return is_used_ && true_is_fall_through_;
- }
-
- bool false_was_fall_through() const {
- return is_used_ && !true_is_fall_through_;
- }
-
- // Emit a branch to one of the true or false targets, and bind the
- // other target. Because this binds the fall-through target, it
- // should be emitted in tail position (as the last thing when
- // compiling an expression).
- void Split(Condition cc) {
- ASSERT(!is_used_);
- if (true_is_fall_through_) {
- false_target_->Branch(NegateCondition(cc));
- true_target_->Bind();
- } else {
- true_target_->Branch(cc);
- false_target_->Bind();
- }
- is_used_ = true;
- }
-
- // Emit an unconditional jump in tail position, to the true target
- // (if the argument is true) or the false target. The "jump" will
- // actually bind the jump target if it is forward, jump to it if it
- // is backward.
- void Goto(bool where) {
- ASSERT(!is_used_);
- JumpTarget* target = where ? true_target_ : false_target_;
- if (target->is_bound()) {
- target->Jump();
- } else {
- target->Bind();
- }
- is_used_ = true;
- true_is_fall_through_ = where;
- }
-
- // Mark this jump target as used as if Goto had been called, but
- // without generating a jump or binding a label (the control effect
- // should have already happened). This is used when the left
- // subexpression of the short-circuit boolean operators are
- // compiled.
- void Use(bool where) {
- ASSERT(!is_used_);
- ASSERT((where ? true_target_ : false_target_)->is_bound());
- is_used_ = true;
- true_is_fall_through_ = where;
- }
-
- // Swap the true and false targets but keep the same actual label as
- // the fall through. This is used when compiling negated
- // expressions, where we want to swap the targets but preserve the
- // state.
- void Invert() {
- JumpTarget* temp_target = true_target_;
- true_target_ = false_target_;
- false_target_ = temp_target;
-
- true_is_fall_through_ = !true_is_fall_through_;
- }
-
- private:
- // True and false jump targets.
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-
- // Before using the destination: true if the true target is the
- // preferred fall through, false if the false target is. After
- // using the destination: true if the true target was actually used
- // as the fall through, false if the false target was.
- bool true_is_fall_through_;
-
- // True if the Split or Goto functions have been called.
- bool is_used_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the jump target pair). It is threaded through
-// the call stack. Constructing a state implicitly pushes it on the owning
-// code generator's stack of states, and destroying one implicitly pops it.
-//
-// The code generator state is only used for expressions, so statements have
-// the initial state.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
- // Create a code generator state based on a code generator's current
- // state. The new state has its own control destination.
- CodeGenState(CodeGenerator* owner, ControlDestination* destination);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- ~CodeGenState();
-
- // Accessors for the state.
- ControlDestination* destination() const { return destination_; }
-
- private:
- // The owning code generator.
- CodeGenerator* owner_;
-
- // A control destination in case the expression has a control-flow
- // effect.
- ControlDestination* destination_;
-
- // The previous state of the owning code generator, restored when
- // this state is destroyed.
- CodeGenState* previous_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode.
-
-enum ArgumentsAllocationMode {
- NO_ARGUMENTS_ALLOCATION,
- EAGER_ARGUMENTS_ALLOCATION,
- LAZY_ARGUMENTS_ALLOCATION
-};
-
// -------------------------------------------------------------------------
// CodeGenerator
-class CodeGenerator: public AstVisitor {
+class CodeGenerator {
public:
- static bool MakeCode(CompilationInfo* info);
-
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(CompilationInfo* info);
@@ -318,33 +61,7 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
-
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
- bool in_spilled_code() const { return in_spilled_code_; }
- void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
- // Return a position of the element at |index_as_smi| + |additional_offset|
- // in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi.
static Operand FixedArrayElementOperand(Register array,
Register index_as_smi,
int additional_offset = 0) {
@@ -353,442 +70,6 @@ class CodeGenerator: public AstVisitor {
}
private:
- // Type of a member function that generates inline code for a native function.
- typedef void (CodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- // Construction/Destruction
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors
- inline bool is_eval();
- inline Scope* scope();
- inline StrictModeFlag strict_mode_flag();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- // State
- ControlDestination* destination() const { return state_->destination(); }
-
- // Control of side-effect-free int32 expression compilation.
- bool in_safe_int32_mode() { return in_safe_int32_mode_; }
- void set_in_safe_int32_mode(bool value) { in_safe_int32_mode_ = value; }
- bool safe_int32_mode_enabled() {
- return FLAG_safe_int32_compiler && safe_int32_mode_enabled_;
- }
- void set_safe_int32_mode_enabled(bool value) {
- safe_int32_mode_enabled_ = value;
- }
- void set_unsafe_bailout(BreakTarget* unsafe_bailout) {
- unsafe_bailout_ = unsafe_bailout;
- }
-
- // Take the Result that is an untagged int32, and convert it to a tagged
- // Smi or HeapNumber. Remove the untagged_int32 flag from the result.
- void ConvertInt32ResultToNumber(Result* value);
- void ConvertInt32ResultToSmi(Result* value);
-
- // Track loop nesting level.
- int loop_nesting() const { return loop_nesting_; }
- void IncrementLoopNesting() { loop_nesting_++; }
- void DecrementLoopNesting() { loop_nesting_--; }
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
- virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Visit a statement and then spill the virtual frame if control flow can
- // reach the end of the statement (ie, it does not exit via break,
- // continue, return, or throw). This function is used temporarily while
- // the code generator is being transformed.
- void VisitAndSpill(Statement* statement);
-
- // Visit a list of statements and then spill the virtual frame if control
- // flow can reach the end of the list.
- void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // Generate the return sequence code. Should be called no more than
- // once per compiled function, immediately after binding the return
- // target (which can not be done more than once).
- void GenerateReturnSequence(Result* return_value);
-
- // Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode();
-
- // Store the arguments object and allocate it if necessary.
- Result StoreArgumentsObject(bool initial);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
-
- Operand SlotOperand(Slot* slot, Register tmp);
-
- Operand ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow);
-
- // Expressions
- void LoadCondition(Expression* expr,
- ControlDestination* destination,
- bool force_control);
- void Load(Expression* expr);
- void LoadGlobal();
- void LoadGlobalReceiver();
-
- // Generate code to push the value of an expression on top of the frame
- // and then spill the frame fully to memory. This function is used
- // temporarily while the code generator is being transformed.
- void LoadAndSpill(Expression* expression);
-
- // Evaluate an expression and place its value on top of the frame,
- // using, or not using, the side-effect-free expression compiler.
- void LoadInSafeInt32Mode(Expression* expr, BreakTarget* unsafe_bailout);
- void LoadWithSafeInt32ModeDisabled(Expression* expr);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
- Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow);
-
- // Support for loading from local/global variables and arguments
- // whose location is known unless they are shadowed by
- // eval-introduced bindings. Generates no code for unsupported slot
- // types and therefore expects to fall through to the slow jump target.
- void EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done);
-
- // Store the value on top of the expression stack into a slot, leaving the
- // value in place.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- // Support for compiling assignment expressions.
- void EmitSlotAssignment(Assignment* node);
- void EmitNamedPropertyAssignment(Assignment* node);
- void EmitKeyedPropertyAssignment(Assignment* node);
-
- // Receiver is passed on the frame and consumed.
- Result EmitNamedLoad(Handle<String> name, bool is_contextual);
-
- // If the store is contextual, value is passed on the frame and consumed.
- // Otherwise, receiver and value are passed on the frame and consumed.
- Result EmitNamedStore(Handle<String> name, bool is_contextual);
-
- // Receiver and key are passed on the frame and consumed.
- Result EmitKeyedLoad();
-
- // Receiver, key, and value are passed on the frame and consumed.
- Result EmitKeyedStore(StaticType* key_type);
-
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
-
- // Translate the value on top of the frame into control flow to the
- // control destination.
- void ToBoolean(ControlDestination* destination);
-
- // Generate code that computes a shortcutting logical operation.
- void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
- void GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode);
-
- // Emits code sequence that jumps to a JumpTarget if the inputs
- // are both smis. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
- // Allocates a temporary register, possibly spilling from the frame,
- // if it needs to check both left and right.
- void JumpIfBothSmiUsingTypeInfo(Result* left,
- Result* right,
- JumpTarget* both_smi);
-
- // Emits code sequence that jumps to deferred code if the inputs
- // are not both smis. Cannot be in MacroAssembler because it takes
- // a deferred code object.
- void JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- Register scratch,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred);
-
- // Emits code sequence that jumps to the label if the inputs
- // are not both smis.
- void JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- Register scratch,
- TypeInfo left_info,
- TypeInfo right_info,
- Label* on_non_smi);
-
- // If possible, combine two constant smi values using op to produce
- // a smi result, and push it on the virtual frame, all at compile time.
- // Returns true if it succeeds. Otherwise it has no effect.
- bool FoldConstantSmis(Token::Value op, int left, int right);
-
- // Emit code to perform a binary operation on a constant
- // smi and a likely smi. Consumes the Result operand.
- Result ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> constant_operand,
- bool reversed,
- OverwriteMode overwrite_mode);
-
- // Emit code to perform a binary operation on two likely smis.
- // The code to handle smi arguments is produced inline.
- // Consumes the Results left and right.
- Result LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
-
-
- // Emit code to perform a binary operation on two untagged int32 values.
- // The values are on top of the frame, and the result is pushed on the frame.
- void Int32BinaryOperation(BinaryOperation* node);
-
-
- // Generate a stub call from the virtual frame.
- Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
- Result* left,
- Result* right);
-
- void Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* destination);
-
- // If at least one of the sides is a constant smi, generate optimized code.
- void ConstantSmiComparison(Condition cc,
- bool strict,
- ControlDestination* destination,
- Result* left_side,
- Result* right_side,
- bool left_side_constant_smi,
- bool right_side_constant_smi,
- bool is_loop_condition);
-
- void GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest);
-
- // To prevent long attacker-controlled byte sequences, integer constants
- // from the JavaScript source are loaded in two parts if they are larger
- // than 17 bits.
- static const int kMaxSmiInlinedBits = 17;
- bool IsUnsafeSmi(Handle<Object> value);
- // Load an integer constant x into a register target or into the stack using
- // at most 16 bits of user-controlled data per assembly operation.
- void MoveUnsafeSmi(Register target, Handle<Object> value);
- void StoreUnsafeSmiToLocal(int offset, Handle<Object> value);
- void PushUnsafeSmi(Handle<Object> value);
-
- void CallWithArguments(ZoneList<Expression*>* arguments,
- CallFunctionFlags flags,
- int position);
-
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position);
-
- void CheckStack();
-
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function based on the shared function info.
- Result InstantiateFunction(Handle<SharedFunctionInfo> function_info,
- bool pretenure);
-
- // Support for types.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharAt(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- void GenerateGetFramePointer(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- // Fast support for StringAdd.
- void GenerateStringAdd(ZoneList<Expression*>* args);
-
- // Fast support for SubString.
- void GenerateSubString(ZoneList<Expression*>* args);
-
- // Fast support for StringCompare.
- void GenerateStringCompare(ZoneList<Expression*>* args);
-
- // Support for direct calls from JavaScript to native RegExp code.
- void GenerateRegExpExec(ZoneList<Expression*>* args);
-
- // Construct a RegExp exec result with two in-object properties.
- void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
- // Support for fast native caches.
- void GenerateGetFromCache(ZoneList<Expression*>* args);
-
- // Fast support for number to string.
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast swapping of elements. Takes three expressions, the object and two
- // indices. This should only be used if the indices are known to be
- // non-negative and within bounds of the elements array at the call site.
- void GenerateSwapElements(ZoneList<Expression*>* args);
-
- // Fast call for custom callbacks.
- void GenerateCallFunction(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
- void GenerateMathLog(ZoneList<Expression*>* args);
-
- // Check whether two RegExps are equivalent.
- void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
- void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* stmt);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
- void SetTypeForStackSlot(Slot* slot, TypeInfo info);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block. There should
- // be no frame-external references to (non-reserved) registers.
- bool HasValidEntryRegisters();
-#endif
-
- ZoneList<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- CodeGenState* state_;
- int loop_nesting_;
- bool in_safe_int32_mode_;
- bool safe_int32_mode_enabled_;
-
- // Jump targets.
- // The target of the return from the function.
- BreakTarget function_return_;
- // The target of the bailout from a side-effect-free int32 subexpression.
- BreakTarget* unsafe_bailout_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- // True when we are in code that expects the virtual frame to be fully
- // spilled. Some virtual frame function are disabled in DEBUG builds when
- // called from spilled code, because they do not leave the virtual frame
- // in a spilled state.
- bool in_spilled_code_;
-
- // A cookie that is used for JIT IMM32 Encoding. Initialized to a
- // random number when the command-line
- // FLAG_mask_constants_with_cookie is true, zero otherwise.
- int jit_cookie_;
-
- friend class VirtualFrame;
- friend class JumpTarget;
- friend class Reference;
- friend class Result;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
- friend class LCodeGen;
-
- friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
-
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc
index d64257f39..615dbfed2 100644
--- a/deps/v8/src/ia32/cpu-ia32.cc
+++ b/deps/v8/src/ia32/cpu-ia32.cc
@@ -42,11 +42,12 @@ namespace v8 {
namespace internal {
void CPU::Setup() {
- CpuFeatures::Clear();
- CpuFeatures::Probe(true);
- if (!CpuFeatures::IsSupported(SSE2) || Serializer::enabled()) {
- V8::DisableCrankshaft();
- }
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return CpuFeatures::IsSupported(SSE2);
}
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 678cc9311..238994886 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
@@ -49,7 +49,8 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >=
Assembler::kCallInstructionLength);
- rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
+ Isolate* isolate = Isolate::Current();
+ rinfo()->PatchCodeWithCall(isolate->debug()->debug_break_return()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
@@ -78,8 +79,9 @@ bool BreakLocationIterator::IsDebugBreakAtSlot() {
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
+ Isolate* isolate = Isolate::Current();
rinfo()->PatchCodeWithCall(
- Debug::debug_break_slot()->entry(),
+ isolate->debug()->debug_break_slot()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
}
@@ -126,7 +128,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ Set(eax, Immediate(0)); // No arguments.
- __ mov(ebx, Immediate(ExternalReference::debug_break()));
+ __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1);
__ CallStub(&ceb);
@@ -161,7 +163,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget());
+ ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
__ jmp(Operand::StaticVariable(after_break_target));
}
@@ -277,7 +279,8 @@ void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
- ExternalReference(Debug_Address::RestarterFrameFunctionPointer());
+ ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
+ masm->isolate());
__ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
// We do not know our frame height, but set esp based on ebp.
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 21193ce0b..72fdac8c6 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -56,7 +56,8 @@ static void ZapCodeRange(Address start, Address end) {
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
- HandleScope scope;
+ Isolate* isolate = code->GetIsolate();
+ HandleScope scope(isolate);
// Compute the size of relocation information needed for the code
// patching in Deoptimizer::DeoptimizeFunction.
@@ -103,8 +104,9 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Allocate new relocation info and copy old relocation to the end
// of the new relocation info array because relocation info is
// written and read backwards.
+ Factory* factory = isolate->factory();
Handle<ByteArray> new_reloc =
- Factory::NewByteArray(reloc_length + padding, TENURED);
+ factory->NewByteArray(reloc_length + padding, TENURED);
memcpy(new_reloc->GetDataStartAddress() + padding,
code->relocation_info()->GetDataStartAddress(),
reloc_length);
@@ -130,10 +132,12 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- AssertNoAllocation no_allocation;
-
if (!function->IsOptimized()) return;
+ Isolate* isolate = function->GetIsolate();
+ HandleScope scope(isolate);
+ AssertNoAllocation no_allocation;
+
// Get the optimized code.
Code* code = function->code();
Address code_start_address = code->instruction_start();
@@ -192,12 +196,14 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// a non-live object in the extra space at the end of the former reloc info.
Address junk_address = reloc_info->address() + reloc_info->Size();
ASSERT(junk_address <= reloc_end_address);
- Heap::CreateFillerObjectAt(junk_address, reloc_end_address - junk_address);
+ isolate->heap()->CreateFillerObjectAt(junk_address,
+ reloc_end_address - junk_address);
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- node->set_next(deoptimizing_code_list_);
- deoptimizing_code_list_ = node;
+ DeoptimizerData* data = isolate->deoptimizer_data();
+ node->set_next(data->deoptimizing_code_list_);
+ data->deoptimizing_code_list_ = node;
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -206,6 +212,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+#ifdef DEBUG
+ if (FLAG_print_code) {
+ code->PrintLn();
+ }
+#endif
}
}
@@ -358,13 +369,31 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
- for (int i = 0; ok && i < 4; i++) {
+ for (int i = StandardFrameConstants::kCallerPCOffset;
+ ok && i >= StandardFrameConstants::kMarkerOffset;
+ i -= kPointerSize) {
uint32_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
- PrintF(" [esp + %d] <- 0x%08x ; [esp + %d] (fixed part)\n",
+ const char* name = "UNKNOWN";
+ switch (i) {
+ case StandardFrameConstants::kCallerPCOffset:
+ name = "caller's pc";
+ break;
+ case StandardFrameConstants::kCallerFPOffset:
+ name = "fp";
+ break;
+ case StandardFrameConstants::kContextOffset:
+ name = "context";
+ break;
+ case StandardFrameConstants::kMarkerOffset:
+ name = "function";
+ break;
+ }
+ PrintF(" [esp + %d] <- 0x%08x ; [esp + %d] (fixed part - %s)\n",
output_offset,
input_value,
- input_offset);
+ input_offset,
+ name);
}
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
@@ -391,7 +420,8 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
- Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+ Code* continuation =
+ function->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
@@ -558,9 +588,10 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Set the continuation for the topmost frame.
if (is_topmost) {
+ Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
- ? Builtins::builtin(Builtins::NotifyDeoptimized)
- : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+ ? builtins->builtin(Builtins::kNotifyDeoptimized)
+ : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
}
@@ -575,6 +606,8 @@ void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
CpuFeatures::Scope scope(SSE2);
+ Isolate* isolate = masm()->isolate();
+
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -608,14 +641,16 @@ void Deoptimizer::EntryGenerator::Generate() {
__ neg(edx);
// Allocate a new deoptimizer object.
- __ PrepareCallCFunction(5, eax);
+ __ PrepareCallCFunction(6, eax);
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
__ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
+ __ mov(Operand(esp, 5 * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
// Preserve deoptimizer object in register eax and get the input
// frame descriptor pointer.
@@ -663,7 +698,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ push(eax);
__ PrepareCallCFunction(1, ebx);
__ mov(Operand(esp, 0 * kPointerSize), eax);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 1);
__ pop(eax);
// Replace the current frame with the output frames.
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index e0cbe35c0..7a59a4f62 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -331,6 +331,7 @@ class DisassemblerIA32 {
int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
int PrintRightOperand(byte* modrmp);
int PrintRightByteOperand(byte* modrmp);
+ int PrintRightXMMOperand(byte* modrmp);
int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
int PrintImmediateOp(byte* data);
int F7Instruction(byte* data);
@@ -367,9 +368,11 @@ void DisassemblerIA32::AppendToBuffer(const char* format, ...) {
int DisassemblerIA32::PrintRightOperandHelper(
byte* modrmp,
- RegisterNameMapping register_name) {
+ RegisterNameMapping direct_register_name) {
int mod, regop, rm;
get_modrm(*modrmp, &mod, &regop, &rm);
+ RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
+ &DisassemblerIA32::NameOfCPURegister;
switch (mod) {
case 0:
if (rm == ebp) {
@@ -454,6 +457,12 @@ int DisassemblerIA32::PrintRightByteOperand(byte* modrmp) {
}
+int DisassemblerIA32::PrintRightXMMOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp,
+ &DisassemblerIA32::NameOfXMMRegister);
+}
+
+
// Returns number of bytes used including the current *data.
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
int DisassemblerIA32::PrintOperands(const char* mnem,
@@ -937,7 +946,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
if (regop == eax) {
AppendToBuffer("test_b ");
- data += PrintRightOperand(data);
+ data += PrintRightByteOperand(data);
int32_t imm = *data;
AppendToBuffer(",0x%x", imm);
data++;
@@ -972,6 +981,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte == 0x57) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("xorps %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
@@ -1035,11 +1052,19 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0xC6: // imm8
{ bool is_byte = *data == 0xC6;
data++;
- AppendToBuffer("%s ", is_byte ? "mov_b" : "mov");
- data += PrintRightOperand(data);
- int32_t imm = is_byte ? *data : *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += is_byte ? 1 : 4;
+ if (is_byte) {
+ AppendToBuffer("%s ", "mov_b");
+ data += PrintRightByteOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ } else {
+ AppendToBuffer("%s ", "mov");
+ data += PrintRightOperand(data);
+ int32_t imm = *reinterpret_cast<int32_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 4;
+ }
}
break;
@@ -1054,7 +1079,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
default: UnimplementedInstruction();
}
AppendToBuffer("%s ", mnem);
- data += PrintRightOperand(data);
+ data += PrintRightByteOperand(data);
int32_t imm = *data;
AppendToBuffer(",0x%x", imm);
data++;
@@ -1067,9 +1092,15 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
data++;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("%s ", is_byte ? "mov_b" : "mov");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
+ if (is_byte) {
+ AppendToBuffer("%s ", "mov_b");
+ data += PrintRightByteOperand(data);
+ AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+ } else {
+ AppendToBuffer("%s ", "mov");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ }
}
break;
@@ -1181,7 +1212,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
+ data += PrintRightXMMOperand(data);
} else if (*data == 0x70) {
data++;
int mod, regop, rm;
@@ -1224,7 +1255,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
+ data += PrintRightXMMOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (*data == 0x7E) {
data++;
@@ -1242,12 +1273,16 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm));
data++;
} else if (*data == 0xE7) {
- AppendToBuffer("movntdq ");
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ if (mod == 3) {
+ AppendToBuffer("movntdq ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else {
+ UnimplementedInstruction();
+ }
} else if (*data == 0xEF) {
data++;
int mod, regop, rm;
@@ -1338,14 +1373,20 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
+ data += PrintRightXMMOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (b2 == 0x10) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x5A) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("cvtsd2ss %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else {
const char* mnem = "?";
switch (b2) {
@@ -1361,27 +1402,11 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
if (b2 == 0x2A) {
- if (mod != 0x3) {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else {
- AppendToBuffer("%s %s,%s",
- mnem,
- NameOfXMMRegister(regop),
- NameOfCPURegister(rm));
- data++;
- }
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
} else if (b2 == 0x2C) {
- if (mod != 0x3) {
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
- data += PrintRightOperand(data);
- } else {
- AppendToBuffer("%s %s,%s",
- mnem,
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm));
- data++;
- }
+ AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (b2 == 0xC2) {
// Intel manual 2A, Table 3-18.
const char* const pseudo_op[] = {
@@ -1400,16 +1425,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm));
data += 2;
} else {
- if (mod != 0x3) {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else {
- AppendToBuffer("%s %s,%s",
- mnem,
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- }
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
}
}
} else {
@@ -1419,29 +1436,44 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0xF3:
if (*(data+1) == 0x0F) {
- if (*(data+2) == 0x2C) {
+ byte b2 = *(data+2);
+ if (b2 == 0x11) {
+ AppendToBuffer("movss ");
data += 3;
- data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
- } else if (*(data+2) == 0x5A) {
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (b2 == 0x10) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvtss2sd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*(data+2) == 0x6F) {
+ AppendToBuffer("movss %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x2C) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("cvttss2si %s,", NameOfCPURegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x5A) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x6F) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else if (*(data+2) == 0x7F) {
+ data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x7F) {
AppendToBuffer("movdqu ");
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
+ data += PrintRightXMMOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else {
UnimplementedInstruction();
@@ -1514,9 +1546,8 @@ static const char* xmm_regs[8] = {
const char* NameConverter::NameOfAddress(byte* addr) const {
- static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
- v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
- return tmp_buffer.start();
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
}
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index 80846949a..bc65ddfad 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -80,8 +80,8 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = +1 * kPointerSize;
@@ -94,7 +94,9 @@ class ExitFrameConstants : public AllStatic {
class StandardFrameConstants : public AllStatic {
public:
- static const int kFixedFrameSize = 4;
+ // StandardFrame::IterateExpressions assumes that kContextOffset is the last
+ // object pointer.
+ static const int kFixedFrameSize = 4; // Currently unused.
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
@@ -108,7 +110,7 @@ class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kSavedRegistersOffset = +2 * kPointerSize;
+ static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 67e0e8fac..c341b084c 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
@@ -45,6 +45,12 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+static unsigned GetPropertyId(Property* property) {
+ if (property->is_synthetic()) return AstNode::kNoNumber;
+ return property->id();
+}
+
+
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -57,14 +63,18 @@ class JumpPatchSite BASE_EMBEDDED {
ASSERT(patch_site_.is_bound() == info_emitted_);
}
- void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+ void EmitJumpIfNotSmi(Register reg,
+ Label* target,
+ Label::Distance distance = Label::kFar) {
__ test(reg, Immediate(kSmiTagMask));
- EmitJump(not_carry, target); // Always taken before patched.
+ EmitJump(not_carry, target, distance); // Always taken before patched.
}
- void EmitJumpIfSmi(Register reg, NearLabel* target) {
+ void EmitJumpIfSmi(Register reg,
+ Label* target,
+ Label::Distance distance = Label::kFar) {
__ test(reg, Immediate(kSmiTagMask));
- EmitJump(carry, target); // Never taken before patched.
+ EmitJump(carry, target, distance); // Never taken before patched.
}
void EmitPatchInfo() {
@@ -80,11 +90,11 @@ class JumpPatchSite BASE_EMBEDDED {
private:
// jc will be patched with jz, jnc will become jnz.
- void EmitJump(Condition cc, NearLabel* target) {
+ void EmitJump(Condition cc, Label* target, Label::Distance distance) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
ASSERT(cc == carry || cc == not_carry);
__ bind(&patch_site_);
- __ j(cc, target);
+ __ j(cc, target, distance);
}
MacroAssembler* masm_;
@@ -121,6 +131,21 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
#endif
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). ecx is zero for method calls and non-zero for
+ // function calls.
+ if (info->is_strict_mode() || info->is_native()) {
+ Label ok;
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ mov(Operand(esp, receiver_offset),
+ Immediate(isolate()->factory()->undefined_value()));
+ __ bind(&ok);
+ }
+
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@@ -129,9 +154,9 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = scope()->num_stack_slots();
if (locals_count == 1) {
- __ push(Immediate(Factory::undefined_value()));
+ __ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
- __ mov(eax, Immediate(Factory::undefined_value()));
+ __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
for (int i = 0; i < locals_count; i++) {
__ push(eax);
}
@@ -150,7 +175,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in both eax and esi. It replaces the context
@@ -192,17 +217,23 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
- __ push(Immediate(Smi::FromInt(scope()->num_parameters())));
- // Arguments to ArgumentsAccessStub:
+ __ SafePush(Immediate(Smi::FromInt(scope()->num_parameters())));
+ // Arguments to ArgumentsAccessStub and/or New...:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ ArgumentsAccessStub::Type type;
+ if (is_strict_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
__ CallStub(&stub);
- __ mov(ecx, eax); // Duplicate result.
+
Move(arguments->AsSlot(), eax, ebx, edx);
- Slot* dot_arguments_slot = scope()->arguments_shadow()->AsSlot();
- Move(dot_arguments_slot, ecx, ebx, edx);
}
if (FLAG_trace) {
@@ -226,12 +257,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- NearLabel ok;
+ PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ Label ok;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
+ ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, taken);
+ __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
@@ -247,7 +278,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Always emit a 'return undefined' in case control fell off the end of
// the body.
{ Comment cmnt(masm_, "[ return <undefined>;");
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, isolate()->factory()->undefined_value());
EmitReturnSequence();
}
}
@@ -260,10 +291,11 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
Comment cmnt(masm_, "[ Stack check");
- NearLabel ok;
- ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, taken);
+ __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
@@ -343,7 +375,7 @@ void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
// For simplicity we always test the accumulator register.
codegen()->Move(result_register(), slot);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -375,13 +407,20 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
- __ Set(result_register(), Immediate(lit));
+ if (lit->IsSmi()) {
+ __ SafeSet(result_register(), Immediate(lit));
+ } else {
+ __ Set(result_register(), Immediate(lit));
+ }
}
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- // Immediates can be pushed directly.
- __ push(Immediate(lit));
+ if (lit->IsSmi()) {
+ __ SafePush(Immediate(lit));
+ } else {
+ __ push(Immediate(lit));
+ }
}
@@ -410,7 +449,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
} else {
// For simplicity we always test the accumulator register.
__ mov(result_register(), lit);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -446,7 +485,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
__ Drop(count);
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -460,12 +499,12 @@ void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
void FullCodeGenerator::AccumulatorValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
- NearLabel done;
+ Label done;
__ bind(materialize_true);
- __ mov(result_register(), Factory::true_value());
- __ jmp(&done);
+ __ mov(result_register(), isolate()->factory()->true_value());
+ __ jmp(&done, Label::kNear);
__ bind(materialize_false);
- __ mov(result_register(), Factory::false_value());
+ __ mov(result_register(), isolate()->factory()->false_value());
__ bind(&done);
}
@@ -473,12 +512,12 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
- NearLabel done;
+ Label done;
__ bind(materialize_true);
- __ push(Immediate(Factory::true_value()));
- __ jmp(&done);
+ __ push(Immediate(isolate()->factory()->true_value()));
+ __ jmp(&done, Label::kNear);
__ bind(materialize_false);
- __ push(Immediate(Factory::false_value()));
+ __ push(Immediate(isolate()->factory()->false_value()));
__ bind(&done);
}
@@ -495,15 +534,17 @@ void FullCodeGenerator::EffectContext::Plug(bool flag) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Handle<Object> value =
- flag ? Factory::true_value() : Factory::false_value();
+ Handle<Object> value = flag
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value();
__ mov(result_register(), value);
}
void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Handle<Object> value =
- flag ? Factory::true_value() : Factory::false_value();
+ Handle<Object> value = flag
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value();
__ push(Immediate(value));
}
@@ -521,28 +562,14 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const {
}
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
Label* if_false,
Label* fall_through) {
- // Emit the inlined tests assumed by the stub.
- __ cmp(result_register(), Factory::undefined_value());
- __ j(equal, if_false);
- __ cmp(result_register(), Factory::true_value());
- __ j(equal, if_true);
- __ cmp(result_register(), Factory::false_value());
- __ j(equal, if_false);
- STATIC_ASSERT(kSmiTag == 0);
- __ test(result_register(), Operand(result_register()));
- __ j(zero, if_false);
- __ test(result_register(), Immediate(kSmiTagMask));
- __ j(zero, if_true);
-
- // Call the ToBoolean stub for all other cases.
- ToBooleanStub stub;
+ ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub);
- __ test(eax, Operand(eax));
-
+ __ test(result_register(), Operand(result_register()));
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -614,8 +641,8 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
// preparation to avoid preparing with the same AST id twice.
if (!context()->IsTest() || !info_->IsOptimizable()) return;
- NearLabel skip;
- if (should_normalize) __ jmp(&skip);
+ Label skip;
+ if (should_normalize) __ jmp(&skip, Label::kNear);
ForwardBailoutStack* current = forward_bailout_stack_;
while (current != NULL) {
@@ -624,7 +651,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
}
if (should_normalize) {
- __ cmp(eax, Factory::true_value());
+ __ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, NULL);
__ bind(&skip);
}
@@ -645,7 +672,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
case Slot::LOCAL:
if (mode == Variable::CONST) {
__ mov(Operand(ebp, SlotOffset(slot)),
- Immediate(Factory::the_hole_value()));
+ Immediate(isolate()->factory()->the_hole_value()));
} else if (function != NULL) {
VisitForAccumulatorValue(function);
__ mov(Operand(ebp, SlotOffset(slot)), result_register());
@@ -660,14 +687,16 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check that we're not inside a 'with'.
- __ mov(ebx, ContextOperand(esi, Context::FCONTEXT_INDEX));
- __ cmp(ebx, Operand(esi));
- __ Check(equal, "Unexpected declaration in current context.");
+ // Check that we're not inside a with or catch context.
+ __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
+ __ cmp(ebx, isolate()->factory()->with_context_map());
+ __ Check(not_equal, "Declaration in with context.");
+ __ cmp(ebx, isolate()->factory()->catch_context_map());
+ __ Check(not_equal, "Declaration in catch context.");
}
if (mode == Variable::CONST) {
__ mov(ContextOperand(esi, slot->index()),
- Immediate(Factory::the_hole_value()));
+ Immediate(isolate()->factory()->the_hole_value()));
// No write barrier since the hole value is in old space.
} else if (function != NULL) {
VisitForAccumulatorValue(function);
@@ -690,7 +719,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (mode == Variable::CONST) {
- __ push(Immediate(Factory::the_hole_value()));
+ __ push(Immediate(isolate()->factory()->the_hole_value()));
} else if (function != NULL) {
VisitForStackValue(function);
} else {
@@ -702,32 +731,30 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
}
} else if (prop != NULL) {
- if (function != NULL || mode == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value. We cannot
- // visit the rewrite because it's shared and we risk recording
- // duplicate AST IDs for bailouts from optimized code.
+ // A const declaration aliasing a parameter is an illegal redeclaration.
+ ASSERT(mode != Variable::CONST);
+ if (function != NULL) {
+ // We are declaring a function that rewrites to a property.
+ // Use (keyed) IC to set the initial value. We cannot visit the
+ // rewrite because it's shared and we risk recording duplicate AST
+ // IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
- if (function != NULL) {
- __ push(eax);
- VisitForAccumulatorValue(function);
- __ pop(edx);
- } else {
- __ mov(edx, eax);
- __ mov(eax, Factory::the_hole_value());
- }
+ __ push(eax);
+ VisitForAccumulatorValue(function);
+ __ pop(edx);
+
ASSERT(prop->key()->AsLiteral() != NULL &&
prop->key()->AsLiteral()->handle()->IsSmi());
- __ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
+ __ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(is_strict()
- ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
}
}
}
@@ -765,7 +792,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
- clause->body_target()->entry_label()->Unuse();
+ clause->body_target()->Unuse();
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
@@ -785,26 +812,26 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
- NearLabel slow_case;
+ Label slow_case;
__ mov(ecx, edx);
__ or_(ecx, Operand(eax));
- patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
+ patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
__ cmp(edx, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ __ jmp(clause->body_target());
__ bind(&slow_case);
}
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site);
+ EmitCallIC(ic, &patch_site, clause->CompareId());
__ test(eax, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ __ jmp(clause->body_target());
}
// Discard the test value and jump to the default if present, otherwise to
@@ -814,14 +841,15 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (default_clause == NULL) {
__ jmp(nested_statement.break_target());
} else {
- __ jmp(default_clause->body_target()->entry_label());
+ __ jmp(default_clause->body_target());
}
// Compile all the case bodies.
for (int i = 0; i < clauses->length(); i++) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target()->entry_label());
+ __ bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
VisitStatements(clause->statements());
}
@@ -842,17 +870,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// ignore null and undefined in contrast to the specification; see
// ECMA-262 section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
- __ cmp(eax, Factory::undefined_value());
+ __ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, &exit);
- __ cmp(eax, Factory::null_value());
+ __ cmp(eax, isolate()->factory()->null_value());
__ j(equal, &exit);
// Convert the object to a JS object.
- NearLabel convert, done_convert;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &convert);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &done_convert);
+ Label convert, done_convert;
+ __ JumpIfSmi(eax, &convert, Label::kNear);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &done_convert, Label::kNear);
__ bind(&convert);
__ push(eax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
@@ -870,43 +897,41 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check that there are no elements. Register ecx contains the
// current JS object we've reached through the prototype chain.
__ cmp(FieldOperand(ecx, JSObject::kElementsOffset),
- Factory::empty_fixed_array());
+ isolate()->factory()->empty_fixed_array());
__ j(not_equal, &call_runtime);
// Check that instance descriptors are not empty so that we can
// check for an enum cache. Leave the map in ebx for the subsequent
// prototype load.
__ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
- __ cmp(edx, Factory::empty_descriptor_array());
- __ j(equal, &call_runtime);
+ __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
+ __ JumpIfSmi(edx, &call_runtime);
// Check that there is an enum cache in the non-empty instance
// descriptors (edx). This is the case if the next enumeration
// index field does not contain a smi.
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime);
+ __ JumpIfSmi(edx, &call_runtime);
// For all objects but the receiver, check that the cache is empty.
- NearLabel check_prototype;
+ Label check_prototype;
__ cmp(ecx, Operand(eax));
- __ j(equal, &check_prototype);
+ __ j(equal, &check_prototype, Label::kNear);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(edx, Factory::empty_fixed_array());
+ __ cmp(edx, isolate()->factory()->empty_fixed_array());
__ j(not_equal, &call_runtime);
// Load the prototype from the map and loop if non-null.
__ bind(&check_prototype);
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ cmp(ecx, Factory::null_value());
+ __ cmp(ecx, isolate()->factory()->null_value());
__ j(not_equal, &next);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
- NearLabel use_cache;
+ Label use_cache;
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ jmp(&use_cache);
+ __ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
@@ -916,13 +941,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
- NearLabel fixed_array;
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::meta_map());
- __ j(not_equal, &fixed_array);
+ Label fixed_array;
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->meta_map());
+ __ j(not_equal, &fixed_array, Label::kNear);
// We got a map in register eax. Get the enumeration cache from it.
__ bind(&use_cache);
- __ mov(ecx, FieldOperand(eax, Map::kInstanceDescriptorsOffset));
+ __ LoadInstanceDescriptors(eax, ecx);
__ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
__ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -958,10 +984,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check if the expected map still matches that of the enumerable.
// If not, we have to filter the key.
- NearLabel update_each;
+ Label update_each;
__ mov(ecx, Operand(esp, 4 * kPointerSize));
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ j(equal, &update_each);
+ __ j(equal, &update_each, Label::kNear);
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
@@ -1013,18 +1039,18 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
// doesn't just get a copy of the existing unoptimized code.
if (!FLAG_always_opt &&
!FLAG_prepare_always_opt &&
+ !pretenure &&
scope()->is_function_scope() &&
- info->num_literals() == 0 &&
- !pretenure) {
- FastNewClosureStub stub;
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
__ push(Immediate(info));
__ CallStub(&stub);
} else {
__ push(esi);
__ push(Immediate(info));
__ push(Immediate(pretenure
- ? Factory::true_value()
- : Factory::false_value()));
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value()));
__ CallRuntime(Runtime::kNewClosure, 3);
}
context()->Plug(eax);
@@ -1054,8 +1080,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
__ j(not_equal, slow);
}
// Load next context in chain.
- __ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering esi.
context = temp;
}
@@ -1069,21 +1094,20 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
if (s != NULL && s->is_eval_scope()) {
// Loop up the context chain. There is no frame effect so it is
// safe to use raw labels here.
- NearLabel next, fast;
+ Label next, fast;
if (!context.is(temp)) {
__ mov(temp, context);
}
__ bind(&next);
// Terminate at global context.
__ cmp(FieldOperand(temp, HeapObject::kMapOffset),
- Immediate(Factory::global_context_map()));
- __ j(equal, &fast);
+ Immediate(isolate()->factory()->global_context_map()));
+ __ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
__ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
- __ mov(temp, ContextOperand(temp, Context::CLOSURE_INDEX));
- __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
__ bind(&fast);
}
@@ -1092,11 +1116,11 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
// load IC call.
__ mov(eax, GlobalObjectOperand());
__ mov(ecx, slot->var()->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- EmitCallIC(ic, mode);
+ EmitCallIC(ic, mode, AstNode::kNoNumber);
}
@@ -1115,8 +1139,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
Immediate(0));
__ j(not_equal, slow);
}
- __ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering esi.
context = temp;
}
@@ -1153,9 +1176,9 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
__ mov(eax,
ContextSlotOperandCheckExtensions(potential_slot, slow));
if (potential_slot->var()->mode() == Variable::CONST) {
- __ cmp(eax, Factory::the_hole_value());
+ __ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, isolate()->factory()->undefined_value());
}
__ jmp(done);
} else if (rewrite != NULL) {
@@ -1174,9 +1197,10 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
__ mov(edx,
ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
slow));
- __ mov(eax, Immediate(key_literal->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ __ SafeSet(eax, Immediate(key_literal->handle()));
+ Handle<Code> ic =
+ isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@@ -1186,23 +1210,22 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
- if (var->is_global() && !var->is_this()) {
+ if (slot == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
__ mov(eax, GlobalObjectOperand());
__ mov(ecx, var->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
context()->Plug(eax);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ } else if (slot->type() == Slot::LOOKUP) {
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -1218,61 +1241,31 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
context()->Plug(eax);
- } else if (slot != NULL) {
+ } else {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
if (var->mode() == Variable::CONST) {
// Constants may be the hole value if they have not been initialized.
// Unhole them.
- NearLabel done;
+ Label done;
MemOperand slot_operand = EmitSlotSearch(slot, eax);
__ mov(eax, slot_operand);
- __ cmp(eax, Factory::the_hole_value());
- __ j(not_equal, &done);
- __ mov(eax, Factory::undefined_value());
+ __ cmp(eax, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &done, Label::kNear);
+ __ mov(eax, isolate()->factory()->undefined_value());
__ bind(&done);
context()->Plug(eax);
} else {
context()->Plug(slot);
}
-
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- MemOperand object_loc = EmitSlotSearch(object_slot, eax);
- __ mov(edx, object_loc);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ mov(eax, Immediate(key_literal->handle()));
-
- // Do a keyed property load.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
-
- // Drop key and object left on the stack by IC.
- context()->Plug(eax);
}
}
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- NearLabel materialized;
+ Label materialized;
// Registers will be used as follows:
// edi = JS function.
// ecx = literals array.
@@ -1283,8 +1276,8 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, Factory::undefined_value());
- __ j(not_equal, &materialized);
+ __ cmp(ebx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &materialized, Label::kNear);
// Create regexp literal using runtime function
// Result will be in eax.
@@ -1330,7 +1323,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->constant_properties()));
- __ push(Immediate(Smi::FromInt(expr->fast_elements() ? 1 : 0)));
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ push(Immediate(Smi::FromInt(flags)));
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
@@ -1366,10 +1365,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(ecx, Immediate(key->handle()));
__ mov(edx, Operand(esp, 0));
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1402,6 +1401,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ push(Operand(esp, 0));
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1420,12 +1425,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->constant_elements()));
- if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ if (expr->constant_elements()->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
ASSERT(expr->depth() == 1);
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
__ CallStub(&stub);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
} else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -1485,7 +1491,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
@@ -1511,55 +1517,38 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case KEYED_PROPERTY: {
if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
- __ push(slot_operand);
- __ mov(eax, Immediate(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
} else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
- __ push(slot_operand);
- __ push(Immediate(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
}
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
if (expr->is_compound()) {
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
}
}
- // For property compound assignments we need another deoptimization
- // point after the property load.
- if (property != NULL) {
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- }
-
Token::Value op = expr->binary_op();
__ push(eax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
@@ -1570,13 +1559,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr,
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
mode,
expr->target(),
expr->value());
} else {
- EmitBinaryOp(op, mode);
+ EmitBinaryOp(expr->binary_operation(), op, mode);
}
// Deoptimization point in case the binary operation may have side effects.
@@ -1609,38 +1598,39 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
+ ASSERT(!key->handle()->IsSmi());
__ mov(ecx, Immediate(key->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode,
Expression* left,
Expression* right) {
// Do combined smi check of the operands. Left operand is on the
// stack. Right operand is in eax.
- NearLabel done, smi_case, stub_call;
+ Label smi_case, done, stub_call;
__ pop(edx);
__ mov(ecx, eax);
__ or_(eax, Operand(edx));
JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(eax, &smi_case);
+ patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
__ bind(&stub_call);
__ mov(eax, ecx);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
- __ jmp(&done);
+ BinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ jmp(&done, Label::kNear);
// Smi case.
__ bind(&smi_case);
@@ -1693,7 +1683,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
__ imul(eax, Operand(ecx));
__ j(overflow, &stub_call);
__ test(eax, Operand(eax));
- __ j(not_zero, &done, taken);
+ __ j(not_zero, &done, Label::kNear);
__ mov(ebx, edx);
__ or_(ebx, Operand(ecx));
__ j(negative, &stub_call);
@@ -1717,11 +1707,13 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
OverwriteMode mode) {
__ pop(edx);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), NULL); // NULL signals no inlined smi code.
+ BinaryOpStub stub(op, mode);
+ // NULL signals no inlined smi code.
+ EmitCallIC(stub.GetCode(), NULL, expr->id());
context()->Plug(eax);
}
@@ -1735,7 +1727,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
@@ -1758,10 +1750,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ mov(edx, eax);
__ pop(eax); // Restore value.
__ mov(ecx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
break;
}
case KEYED_PROPERTY: {
@@ -1773,7 +1765,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
__ mov(edx, eax);
- __ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
+ __ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
@@ -1781,10 +1773,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ pop(edx);
}
__ pop(eax); // Restore value.
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
break;
}
}
@@ -1795,8 +1787,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->AsSlot() != NULL);
@@ -1807,10 +1797,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// ecx, and the global object on the stack.
__ mov(ecx, var->name());
__ mov(edx, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1826,21 +1816,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
break;
case Slot::LOCAL:
__ mov(edx, Operand(ebp, SlotOffset(slot)));
- __ cmp(edx, Factory::the_hole_value());
+ __ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &skip);
__ mov(Operand(ebp, SlotOffset(slot)), eax);
break;
- case Slot::CONTEXT: {
- __ mov(ecx, ContextOperand(esi, Context::FCONTEXT_INDEX));
- __ mov(edx, ContextOperand(ecx, slot->index()));
- __ cmp(edx, Factory::the_hole_value());
- __ j(not_equal, &skip);
- __ mov(ContextOperand(ecx, slot->index()), eax);
- int offset = Context::SlotOffset(slot->index());
- __ mov(edx, eax); // Preserve the stored value in eax.
- __ RecordWrite(ecx, offset, edx, ebx);
- break;
- }
+ case Slot::CONTEXT:
case Slot::LOOKUP:
__ push(eax);
__ push(esi);
@@ -1910,10 +1890,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
} else {
__ pop(edx);
}
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1950,10 +1930,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2002,8 +1982,9 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Record source position of the IC call.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, mode);
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+ EmitCallIC(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2012,8 +1993,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key,
- RelocInfo::Mode mode) {
+ Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
@@ -2034,9 +2014,10 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
// Record source position of the IC call.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
+ arg_count, in_loop);
__ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, mode);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2044,7 +2025,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2056,7 +2037,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+ CallFunctionStub stub(arg_count, in_loop, flags);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2071,7 +2052,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
if (arg_count > 0) {
__ push(Operand(esp, arg_count * kPointerSize));
} else {
- __ push(Immediate(Factory::undefined_value()));
+ __ push(Immediate(isolate()->factory()->undefined_value()));
}
// Push the receiver of the enclosing function.
@@ -2107,7 +2088,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
{ PreservePositionScope pos_scope(masm()->positions_recorder());
VisitForStackValue(fun);
// Reserved receiver slot.
- __ push(Immediate(Factory::undefined_value()));
+ __ push(Immediate(isolate()->factory()->undefined_value()));
// Push the arguments.
for (int i = 0; i < arg_count; i++) {
@@ -2147,7 +2128,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+ CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_IMPLICIT);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2189,13 +2170,16 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ bind(&done);
// Push function.
__ push(eax);
- // Push global receiver.
- __ mov(ebx, GlobalObjectOperand());
- __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ push(Immediate(isolate()->factory()->the_hole_value()));
__ bind(&call);
}
- EmitCallWithStub(expr);
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot. That object could be the hole if the
+ // receiver is implicitly the global object.
+ EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
} else if (fun->AsProperty() != NULL) {
// Call to an object property.
Property* prop = fun->AsProperty();
@@ -2226,31 +2210,22 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source code position for IC call.
SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
// Push result (function).
__ push(eax);
// Push Global receiver.
__ mov(ecx, GlobalObjectOperand());
__ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
- EmitCallWithStub(expr);
+ EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
} else {
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(prop->obj());
}
- EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
+ EmitKeyedCallWithIC(expr, prop->key());
}
}
} else {
- // Call to some other expression. If the expression is an anonymous
- // function literal not called in a loop, mark it as one that should
- // also use the full code generator.
- FunctionLiteral* lit = fun->AsFunctionLiteral();
- if (lit != NULL &&
- lit->name()->Equals(Heap::empty_string()) &&
- loop_depth() == 0) {
- lit->set_try_full_codegen(true);
- }
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(fun);
}
@@ -2258,7 +2233,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ mov(ebx, GlobalObjectOperand());
__ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
// Emit function call.
- EmitCallWithStub(expr);
+ EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
}
#ifdef DEBUG
@@ -2291,10 +2266,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
SetSourcePosition(expr->position());
// Load function and argument count into edi and eax.
- __ Set(eax, Immediate(arg_count));
+ __ SafeSet(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
- Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> construct_builtin =
+ isolate()->builtins()->JSConstructCall();
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
context()->Plug(eax);
}
@@ -2352,9 +2328,8 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
- __ cmp(eax, Factory::null_value());
+ __ JumpIfSmi(eax, if_false);
+ __ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
@@ -2362,9 +2337,9 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ test(ecx, Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(below, if_false);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
@@ -2384,9 +2359,8 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, if_false);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
@@ -2406,8 +2380,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(eax, if_false);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsUndetectable));
@@ -2431,10 +2404,72 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // TODO(3110205): Implement this.
- // Currently unimplemented. Emit false, a safe choice.
+ if (FLAG_debug_code) __ AbortIfSmi(eax);
+
+ // Check whether this map has already been checked to be safe for default
+ // valueOf.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
+ 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ j(not_zero, if_true);
+
+ // Check for fast case object. Return false for slow case objects.
+ __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ cmp(ecx, FACTORY->hash_table_map());
+ __ j(equal, if_false);
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ LoadInstanceDescriptors(ebx, ebx);
+ __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+ // ebx: descriptor array
+ // ecx: length of descriptor array
+ // Calculate the end of the descriptor array.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kPointerSize == 4);
+ __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+ // Calculate location of the first key name.
+ __ add(Operand(ebx),
+ Immediate(FixedArray::kHeaderSize +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, FieldOperand(ebx, 0));
+ __ cmp(edx, FACTORY->value_of_symbol());
+ __ j(equal, if_false);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(ebx, Operand(ecx));
+ __ j(not_equal, &loop);
+
+ // Reload map as register ebx was used as temporary above.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+ __ JumpIfSmi(ecx, if_false);
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(edx,
+ FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+ __ cmp(ecx,
+ ContextOperand(edx,
+ Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ j(not_equal, if_false);
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ or_(FieldOperand(ebx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ jmp(if_true);
+
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ jmp(if_false);
context()->Plug(if_true, if_false);
}
@@ -2451,8 +2486,7 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2473,8 +2507,7 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, if_false);
+ __ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2495,8 +2528,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, if_false);
+ __ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2567,7 +2599,7 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
// parameter count in eax.
VisitForAccumulatorValue(args->at(0));
__ mov(edx, eax);
- __ mov(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+ __ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(eax);
@@ -2579,7 +2611,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
Label exit;
// Get the number of formal parameters.
- __ Set(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+ __ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2604,21 +2636,22 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
VisitForAccumulatorValue(args->at(0));
// If the object is a smi, we return null.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &null);
+ __ JumpIfSmi(eax, &null);
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, eax); // Map is now in eax.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
+ // Map is now in eax.
__ j(below, &null);
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &function);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &function);
// Check if the constructor in the map is a function.
__ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
@@ -2633,17 +2666,17 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Functions have class 'Function'.
__ bind(&function);
- __ mov(eax, Factory::function_class_symbol());
+ __ mov(eax, isolate()->factory()->function_class_symbol());
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ mov(eax, Factory::Object_symbol());
+ __ mov(eax, isolate()->factory()->Object_symbol());
__ jmp(&done);
// Non-JS objects have class null.
__ bind(&null);
- __ mov(eax, Factory::null_value());
+ __ mov(eax, isolate()->factory()->null_value());
// All done.
__ bind(&done);
@@ -2669,7 +2702,7 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
}
#endif
// Finally, we're expected to leave a value on the top of the stack.
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, isolate()->factory()->undefined_value());
context()->Plug(eax);
}
@@ -2690,8 +2723,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ bind(&heapnumber_allocated);
- __ PrepareCallCFunction(0, ebx);
- __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+ __ PrepareCallCFunction(1, ebx);
+ __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()),
+ 1);
// Convert 32 random bits in eax to 0.(32 random bits) in a double
// by computing:
@@ -2703,7 +2738,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ movd(xmm1, Operand(ebx));
__ movd(xmm0, Operand(eax));
__ cvtss2sd(xmm1, xmm1);
- __ pxor(xmm0, xmm1);
+ __ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1);
__ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
} else {
@@ -2752,13 +2787,12 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- NearLabel done;
+ Label done;
// If the object is a smi return the object.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done);
+ __ JumpIfSmi(eax, &done, Label::kNear);
// If the object is not a value type, return the object.
__ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
__ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
__ bind(&done);
@@ -2772,8 +2806,12 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub;
- __ CallStub(&stub);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ MathPowStub stub;
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kMath_pow, 2);
+ }
context()->Plug(eax);
}
@@ -2785,14 +2823,13 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
VisitForAccumulatorValue(args->at(1)); // Load the value.
__ pop(ebx); // eax = value. ebx = object.
- NearLabel done;
+ Label done;
// If the object is a smi, return the value.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &done);
+ __ JumpIfSmi(ebx, &done, Label::kNear);
// If the object is not a value type, return the value.
__ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
// Store the value.
__ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
@@ -2866,13 +2903,13 @@ void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// NaN.
- __ Set(result, Immediate(Factory::nan_value()));
+ __ Set(result, Immediate(isolate()->factory()->nan_value()));
__ jmp(&done);
__ bind(&need_conversion);
// Move the undefined value into the result register, which will
// trigger conversion.
- __ Set(result, Immediate(Factory::undefined_value()));
+ __ Set(result, Immediate(isolate()->factory()->undefined_value()));
__ jmp(&done);
NopRuntimeCallHelper call_helper;
@@ -2915,7 +2952,7 @@ void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ Set(result, Immediate(Factory::empty_string()));
+ __ Set(result, Immediate(isolate()->factory()->empty_string()));
__ jmp(&done);
__ bind(&need_conversion);
@@ -3001,17 +3038,17 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() >= 2);
- int arg_count = args->length() - 2; // For receiver and function.
- VisitForStackValue(args->at(0)); // Receiver.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i + 1));
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; ++i) {
+ VisitForStackValue(args->at(i));
}
- VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
+ VisitForAccumulatorValue(args->last()); // Function.
- // InvokeFunction requires function in edi. Move it in there.
- if (!result_register().is(edi)) __ mov(edi, result_register());
+ // InvokeFunction requires the function in edi. Move it in there.
+ __ mov(edi, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION);
+ __ InvokeFunction(edi, count, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->Plug(eax);
}
@@ -3045,8 +3082,8 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
// Fetch the map and check if array is in fast case.
// Check that object doesn't require security checks and
// has no indexed interceptor.
- __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp);
- __ j(below, &slow_case);
+ __ CmpObjectType(object, JS_ARRAY_TYPE, temp);
+ __ j(not_equal, &slow_case);
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
KeyedLoadIC::kSlowCaseBitFieldMask);
__ j(not_zero, &slow_case);
@@ -3054,7 +3091,7 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
// Check the object's elements are in fast case and writable.
__ mov(elements, FieldOperand(object, JSObject::kElementsOffset));
__ cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(isolate()->factory()->fixed_array_map()));
__ j(not_equal, &slow_case);
// Check that both indices are smis.
@@ -3062,8 +3099,7 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ mov(index_2, Operand(esp, 0));
__ mov(temp, index_1);
__ or_(temp, Operand(index_2));
- __ test(temp, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case);
+ __ JumpIfNotSmi(temp, &slow_case);
// Check that both indices are valid.
__ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
@@ -3092,7 +3128,7 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined.
__ add(Operand(esp), Immediate(3 * kPointerSize));
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, isolate()->factory()->undefined_value());
__ jmp(&done);
__ bind(&slow_case);
@@ -3110,10 +3146,10 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
+ isolate()->global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, isolate()->factory()->undefined_value());
context()->Plug(eax);
return;
}
@@ -3168,9 +3204,9 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
// Fail if either is a non-HeapObject.
__ mov(tmp, left);
__ and_(Operand(tmp), right);
- __ test(Operand(tmp), Immediate(kSmiTagMask));
- __ j(zero, &fail);
- __ CmpObjectType(left, JS_REGEXP_TYPE, tmp);
+ __ JumpIfSmi(tmp, &fail);
+ __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
+ __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
__ j(not_equal, &fail);
__ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
__ j(not_equal, &fail);
@@ -3178,10 +3214,10 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
__ j(equal, &ok);
__ bind(&fail);
- __ mov(eax, Immediate(Factory::false_value()));
+ __ mov(eax, Immediate(isolate()->factory()->false_value()));
__ jmp(&done);
__ bind(&ok);
- __ mov(eax, Immediate(Factory::true_value()));
+ __ mov(eax, Immediate(isolate()->factory()->true_value()));
__ bind(&done);
context()->Plug(eax);
@@ -3260,21 +3296,18 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
- __ test(array, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
+ __ JumpIfSmi(array, &bailout);
__ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
__ j(not_equal, &bailout);
// Check that the array has fast elements.
- __ test_b(FieldOperand(scratch, Map::kBitField2Offset),
- 1 << Map::kHasFastElements);
- __ j(zero, &bailout);
+ __ CheckFastElements(scratch, &bailout);
// If the array has length zero, return the empty string.
__ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ SmiUntag(array_length);
__ j(not_zero, &non_trivial_array);
- __ mov(result_operand, Factory::empty_string());
+ __ mov(result_operand, isolate()->factory()->empty_string());
__ jmp(&done);
// Save the array length.
@@ -3304,8 +3337,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
index,
times_pointer_size,
FixedArray::kHeaderSize));
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
+ __ JumpIfSmi(string, &bailout);
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
@@ -3338,8 +3370,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// Check that the separator is a flat ASCII string.
__ mov(string, separator_operand);
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
+ __ JumpIfSmi(string, &bailout);
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
@@ -3485,7 +3516,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ bind(&bailout);
- __ mov(result_operand, Factory::undefined_value());
+ __ mov(result_operand, isolate()->factory()->undefined_value());
__ bind(&done);
__ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
@@ -3523,8 +3554,10 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the JS runtime function via a call IC.
__ Set(ecx, Immediate(expr->name()));
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+ Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
+ arg_count, in_loop, mode);
+ EmitCallIC(ic, mode, expr->id());
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} else {
@@ -3589,7 +3622,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
VisitForEffect(expr->expression());
- context()->Plug(Factory::undefined_value());
+ context()->Plug(isolate()->factory()->undefined_value());
break;
}
@@ -3629,8 +3662,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmt(masm_, "[ UnaryOperation (ADD)");
VisitForAccumulatorValue(expr->expression());
Label no_conversion;
- __ test(result_register(), Immediate(kSmiTagMask));
- __ j(zero, &no_conversion);
+ __ JumpIfSmi(result_register(), &no_conversion);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -3638,48 +3670,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
- case Token::SUB: {
- Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register eax.
- VisitForAccumulatorValue(expr->expression());
- __ CallStub(&stub);
- context()->Plug(eax);
+ case Token::SUB:
+ EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
break;
- }
- case Token::BIT_NOT: {
- Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- // The generic unary operation stub expects the argument to be
- // in the accumulator register eax.
- VisitForAccumulatorValue(expr->expression());
- Label done;
- bool inline_smi_case = ShouldInlineSmiCase(expr->op());
- if (inline_smi_case) {
- NearLabel call_stub;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &call_stub);
- __ lea(eax, Operand(eax, kSmiTagMask));
- __ not_(eax);
- __ jmp(&done);
- __ bind(&call_stub);
- }
- bool overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode mode =
- overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpFlags flags = inline_smi_case
- ? NO_UNARY_SMI_CODE_IN_STUB
- : NO_UNARY_FLAGS;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
- __ CallStub(&stub);
- __ bind(&done);
- context()->Plug(eax);
+ case Token::BIT_NOT:
+ EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
break;
- }
default:
UNREACHABLE();
@@ -3687,6 +3684,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+ const char* comment) {
+ // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+ Comment cmt(masm_, comment);
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode overwrite =
+ can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ UnaryOpStub stub(expr->op(), overwrite);
+ // UnaryOpStub expects the argument to be in the
+ // accumulator register eax.
+ VisitForAccumulatorValue(expr->expression());
+ SetSourcePosition(expr->position());
+ EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
@@ -3699,7 +3713,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
@@ -3726,16 +3740,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ push(eax);
EmitNamedPropertyLoad(prop);
} else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
- __ push(slot_operand);
- __ mov(eax, Immediate(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
EmitKeyedPropertyLoad(prop);
@@ -3744,13 +3750,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
- PrepareForBailout(expr->increment(), TOS_REG);
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ }
// Call ToNumber only if operand is not a smi.
- NearLabel no_conversion;
+ Label no_conversion;
if (ShouldInlineSmiCase(expr->op())) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &no_conversion);
+ __ JumpIfSmi(eax, &no_conversion, Label::kNear);
}
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
@@ -3777,7 +3786,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Inline smi case if we are in a loop.
- NearLabel stub_call, done;
+ Label done, stub_call;
JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(expr->op())) {
@@ -3786,10 +3795,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
__ sub(Operand(eax), Immediate(Smi::FromInt(1)));
}
- __ j(overflow, &stub_call);
+ __ j(overflow, &stub_call, Label::kNear);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(eax, &done);
+ patch_site.EmitJumpIfSmi(eax, &done, Label::kNear);
__ bind(&stub_call);
// Call stub. Undo operation first.
@@ -3806,8 +3815,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub for +1/-1.
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- EmitCallIC(stub.GetCode(), &patch_site);
+ BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+ EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
__ bind(&done);
// Store the value returned in eax.
@@ -3837,10 +3846,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->handle());
__ pop(edx);
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3854,10 +3863,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(ecx);
__ pop(edx);
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -3882,10 +3891,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Comment cmnt(masm_, "Global variable");
__ mov(eax, GlobalObjectOperand());
__ mov(ecx, Immediate(proxy->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
PrepareForBailout(expr, TOS_REG);
context()->Plug(eax);
} else if (proxy != NULL &&
@@ -3908,95 +3917,79 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(eax);
} else {
// This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
+ VisitInCurrentContext(expr);
}
}
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
+ VisitForTypeofValue(expr);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- if (check->Equals(Heap::number_symbol())) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_true);
+ if (check->Equals(isolate()->heap()->number_symbol())) {
+ __ JumpIfSmi(eax, if_true);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ isolate()->factory()->heap_number_map());
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::string_symbol())) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+ __ j(above_equal, if_false);
// Check for undetectable objects => false.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(ecx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
- Split(below, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::boolean_symbol())) {
- __ cmp(eax, Factory::true_value());
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ Split(zero, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ __ cmp(eax, isolate()->factory()->true_value());
__ j(equal, if_true);
- __ cmp(eax, Factory::false_value());
+ __ cmp(eax, isolate()->factory()->false_value());
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::undefined_symbol())) {
- __ cmp(eax, Factory::undefined_value());
+ } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ __ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, if_true);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(eax, if_false);
// Check for undetectable objects => true.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
__ test(ecx, Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::function_symbol())) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
- __ j(equal, if_true);
- // Regular expressions => 'function' (they are callable).
- __ CmpInstanceType(edx, JS_REGEXP_TYPE);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::object_symbol())) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
- __ cmp(eax, Factory::null_value());
+ } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
+ Split(above_equal, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ __ JumpIfSmi(eax, if_false);
+ __ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
- // Regular expressions => 'function', not 'object'.
- __ CmpObjectType(eax, JS_REGEXP_TYPE, edx);
- __ j(equal, if_false);
+ __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
+ __ j(below, if_false);
+ __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, if_false);
// Check for undetectable objects => false.
- __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(ecx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- // Check for JS objects => true.
- __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(less, if_false);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
- Split(less_equal, if_true, if_false, fall_through);
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ Split(zero, if_true, if_false, fall_through);
} else {
if (if_false != fall_through) __ jmp(if_false);
}
+}
+
- return true;
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ Split(equal, if_true, if_false, fall_through);
}
@@ -4016,21 +4009,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
+ Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (expr->op()) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- __ cmp(eax, Factory::true_value());
+ __ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
break;
@@ -4086,10 +4077,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
- NearLabel slow_case;
+ Label slow_case;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
- patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
+ patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
__ cmp(edx, Operand(eax));
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
@@ -4098,7 +4089,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site);
+ EmitCallIC(ic, &patch_site, expr->id());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, Operand(eax));
@@ -4123,15 +4114,14 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
VisitForAccumulatorValue(expr->expression());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ cmp(eax, Factory::null_value());
+ __ cmp(eax, isolate()->factory()->null_value());
if (expr->is_strict()) {
Split(equal, if_true, if_false, fall_through);
} else {
__ j(equal, if_true);
- __ cmp(eax, Factory::undefined_value());
+ __ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, if_true);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(eax, if_false);
// It can be an undetectable object.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset));
@@ -4158,69 +4148,50 @@ Register FullCodeGenerator::context_register() {
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+ RelocInfo::Mode mode,
+ unsigned ast_id) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1);
+ __ IncrementCounter(isolate()->counters()->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1);
+ __ IncrementCounter(isolate()->counters()->keyed_load_full(), 1);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1);
+ __ IncrementCounter(isolate()->counters()->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1);
+ __ IncrementCounter(isolate()->counters()->keyed_store_full(), 1);
default:
break;
}
-
- __ call(ic, mode);
-
- // Crankshaft doesn't need patching of inlined loads and stores.
- // When compiling the snapshot we need to produce code that works
- // with and without Crankshaft.
- if (V8::UseCrankshaft() && !Serializer::enabled()) {
- return;
- }
-
- // If we're calling a (keyed) load or store stub, we have to mark
- // the call as containing no inlined code so we will not attempt to
- // patch it.
- switch (ic->kind()) {
- case Code::LOAD_IC:
- case Code::KEYED_LOAD_IC:
- case Code::STORE_IC:
- case Code::KEYED_STORE_IC:
- __ nop(); // Signals no inlined code.
- break;
- default:
- // Do nothing.
- break;
- }
+ __ call(ic, mode, ast_id);
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+ JumpPatchSite* patch_site,
+ unsigned ast_id) {
+ Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1);
+ __ IncrementCounter(counters->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1);
+ __ IncrementCounter(counters->keyed_load_full(), 1);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1);
+ __ IncrementCounter(counters->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1);
+ __ IncrementCounter(counters->keyed_store_full(), 1);
default:
break;
}
-
- __ call(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET, ast_id);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
@@ -4240,6 +4211,25 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
}
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ if (scope()->is_global_scope()) {
+ // Contexts nested in the global context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ push(Immediate(Smi::FromInt(0)));
+ } else if (scope()->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(scope()->is_function_scope());
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Non-local control flow support.
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 6b9e74962..0f5820254 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
@@ -50,11 +50,11 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// Register usage:
// type: holds the receiver instance type on entry.
__ cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, global_object, not_taken);
+ __ j(equal, global_object);
__ cmp(type, JS_BUILTINS_OBJECT_TYPE);
- __ j(equal, global_object, not_taken);
+ __ j(equal, global_object);
__ cmp(type, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, global_object, not_taken);
+ __ j(equal, global_object);
}
@@ -72,17 +72,16 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
// r1: used to hold receivers map.
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
__ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
- __ cmp(r0, FIRST_JS_OBJECT_TYPE);
- __ j(below, miss, not_taken);
+ __ cmp(r0, FIRST_SPEC_OBJECT_TYPE);
+ __ j(below, miss);
// If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
GenerateGlobalInstanceTypeCheck(masm, r0, miss);
@@ -90,68 +89,13 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
__ test_b(FieldOperand(r1, Map::kBitFieldOffset),
(1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor));
- __ j(not_zero, miss, not_taken);
+ __ j(not_zero, miss);
__ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CheckMap(r0, Factory::hash_table_map(), miss, true);
-}
-
-
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r0|. Jump to the |miss| label
-// otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
-
- // Compute the capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
- __ mov(r1, FieldOperand(elements, kCapacityOffset));
- __ shr(r1, kSmiTagSize); // convert smi to int
- __ dec(r1);
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- static const int kProbes = 4;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- for (int i = 0; i < kProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
- __ shr(r0, String::kHashShift);
- if (i > 0) {
- __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(r0, Operand(r1));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
-
- // Check if the key is identical to the name.
- __ cmp(name, Operand(elements, r0, times_4,
- kElementsStartOffset - kHeapObjectTag));
- if (i != kProbes - 1) {
- __ j(equal, done, taken);
- } else {
- __ j(not_equal, miss, not_taken);
- }
- }
+ __ CheckMap(r0, FACTORY->hash_table_map(), miss, DONT_DO_SMI_CHECK);
}
-
// Helper function used to load a property from a dictionary backing
// storage. This function may fail to load a property even though it is
// in the dictionary, so code at miss_label must always call a backup
@@ -183,13 +127,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
@@ -201,7 +145,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
- __ j(not_zero, miss_label, not_taken);
+ __ j(not_zero, miss_label);
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
@@ -238,13 +182,13 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
@@ -259,7 +203,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
__ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label, not_taken);
+ __ j(not_zero, miss_label);
// Store the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
@@ -349,9 +293,9 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
times_pointer_size,
NumberDictionary::kElementsStartOffset));
if (i != (kProbes - 1)) {
- __ j(equal, &done, taken);
+ __ j(equal, &done);
} else {
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
}
}
@@ -371,12 +315,6 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
}
-// The offset from the inlined patch site to the start of the
-// inlined load instruction. It is 7 bytes (test eax, imm) plus
-// 6 bytes (jne slow_label).
-const int LoadIC::kOffsetToLoadInstruction = 13;
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : receiver
@@ -434,8 +372,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// map - used to hold the map of the receiver.
// Check that the object isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, slow, not_taken);
+ __ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -443,7 +380,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Check bit field.
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
(1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
- __ j(not_zero, slow, not_taken);
+ __ j(not_zero, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing
@@ -451,7 +388,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpInstanceType(map, JS_OBJECT_TYPE);
- __ j(below, slow, not_taken);
+ __ j(below, slow);
}
@@ -475,7 +412,10 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
__ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
- __ CheckMap(scratch, Factory::fixed_array_map(), not_fast_array, true);
+ __ CheckMap(scratch,
+ FACTORY->fixed_array_map(),
+ not_fast_array,
+ DONT_DO_SMI_CHECK);
} else {
__ AssertFastElements(scratch);
}
@@ -485,7 +425,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Fast case: Do the load.
ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(Operand(scratch), Immediate(Factory::the_hole_value()));
+ __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
@@ -514,12 +454,87 @@ static void GenerateKeyStringCheck(MacroAssembler* masm,
// Is the string an array index, with cached numeric value?
__ mov(hash, FieldOperand(key, String::kHashFieldOffset));
__ test(hash, Immediate(String::kContainsCachedArrayIndexMask));
- __ j(zero, index_string, not_taken);
+ __ j(zero, index_string);
// Is the string a symbol?
ASSERT(kSymbolTag != 0);
__ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsSymbolMask);
- __ j(zero, not_symbol, not_taken);
+ __ j(zero, not_symbol);
+}
+
+
+static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+ Factory* factory = masm->isolate()->factory();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+ __ j(below, slow_case);
+
+ // Check that the key is a positive smi.
+ __ test(key, Immediate(0x8000001));
+ __ j(not_zero, slow_case);
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
+ __ cmp(key, Operand(scratch2));
+ __ j(greater_equal, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+ __ mov(scratch2, FieldOperand(scratch1,
+ key,
+ times_half_pointer_size,
+ kHeaderSize));
+ __ cmp(scratch2, factory->the_hole_value());
+ __ j(equal, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ const int kContextOffset = FixedArray::kHeaderSize;
+ __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
+ return FieldOperand(scratch1,
+ scratch2,
+ times_half_pointer_size,
+ Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(scratch));
+ __ j(greater_equal, slow_case);
+ return FieldOperand(backing_store,
+ key,
+ times_half_pointer_size,
+ FixedArray::kHeaderSize);
}
@@ -530,11 +545,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- esp[0] : return address
// -----------------------------------
Label slow, check_string, index_smi, index_string, property_array_property;
- Label check_pixel_array, probe_dictionary, check_number_dictionary;
+ Label probe_dictionary, check_number_dictionary;
// Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string, not_taken);
+ __ JumpIfNotSmi(eax, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
@@ -542,11 +556,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(
masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
- // Check the "has fast elements" bit in the receiver's map which is
- // now in ecx.
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kHasFastElements);
- __ j(zero, &check_pixel_array, not_taken);
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(ecx, &check_number_dictionary);
GenerateFastArrayLoad(masm,
edx,
@@ -555,27 +566,25 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
eax,
NULL,
&slow);
- __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ Isolate* isolate = masm->isolate();
+ Counters* counters = isolate->counters();
+ __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
__ ret(0);
- __ bind(&check_pixel_array);
- GenerateFastPixelArrayLoad(masm,
- edx,
- eax,
- ecx,
- ebx,
- eax,
- &check_number_dictionary,
- NULL,
- &slow);
-
__ bind(&check_number_dictionary);
+ __ mov(ebx, eax);
+ __ SmiUntag(ebx);
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+
// Check whether the elements is a number dictionary.
// edx: receiver
// ebx: untagged index
// eax: key
// ecx: elements
- __ CheckMap(ecx, Factory::hash_table_map(), &slow, true);
+ __ CheckMap(ecx,
+ isolate->factory()->hash_table_map(),
+ &slow,
+ DONT_DO_SMI_CHECK);
Label slow_pop_receiver;
// Push receiver on the stack to free up a register for the dictionary
// probing.
@@ -600,7 +609,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: jump to runtime.
// edx: receiver
// eax: key
- __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
+ __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
@@ -613,7 +622,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// cache. Otherwise probe the dictionary.
__ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
+ Immediate(isolate->factory()->hash_table_map()));
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
@@ -628,8 +637,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the key (consisting of map and symbol) from the cache and
// check for match.
- ExternalReference cache_keys
- = ExternalReference::keyed_lookup_cache_keys();
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(masm->isolate());
__ mov(edi, ecx);
__ shl(edi, kPointerSizeLog2 + 1);
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
@@ -643,8 +652,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ebx : receiver's map
// eax : key
// ecx : lookup cache index
- ExternalReference cache_field_offsets
- = ExternalReference::keyed_lookup_cache_field_offsets();
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
@@ -655,7 +664,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, Operand(edi));
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Load property array property.
@@ -663,7 +672,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(eax, FieldOperand(edx, JSObject::kPropertiesOffset));
__ mov(eax, FieldOperand(eax, edi, times_pointer_size,
FixedArray::kHeaderSize));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
@@ -675,7 +684,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateGlobalInstanceTypeCheck(masm, ecx, &slow);
GenerateDictionaryLoad(masm, &slow, ebx, eax, ecx, edi, eax);
- __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
+ __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
__ ret(0);
__ bind(&index_string);
@@ -715,7 +724,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm);
+ GenerateMiss(masm, false);
}
@@ -728,12 +737,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
Label slow;
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ JumpIfSmi(edx, &slow);
// Check that the key is an array index, that is Uint32.
__ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
- __ j(not_zero, &slow, not_taken);
+ __ j(not_zero, &slow);
// Get the map of the receiver.
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
@@ -743,7 +751,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
__ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
__ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow, not_taken);
+ __ j(not_zero, &slow);
// Everything is fine, call runtime.
__ pop(ecx);
@@ -752,12 +760,67 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ push(ecx); // return address
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(
- IC_Utility(kKeyedLoadPropertyWithInterceptor));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
+ masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
__ bind(&slow);
- GenerateMiss(masm);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Factory* factory = masm->isolate()->factory();
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, eax, ebx, ecx, &notin, &slow);
+ __ mov(eax, mapped_location);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, eax, ebx, ecx, &slow);
+ __ cmp(unmapped_location, factory->the_hole_value());
+ __ j(equal, &slow);
+ __ mov(eax, unmapped_location);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, edi, &notin, &slow);
+ __ mov(mapped_location, eax);
+ __ lea(ecx, mapped_location);
+ __ mov(edx, eax);
+ __ RecordWrite(ebx, ecx, edx);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, ecx, ebx, edi, &slow);
+ __ mov(unmapped_location, eax);
+ __ lea(edi, unmapped_location);
+ __ mov(edx, eax);
+ __ RecordWrite(ebx, edi, edx);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
}
@@ -769,26 +832,28 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label slow, fast, array, extra, check_pixel_array;
+ Label slow, fast, array, extra;
// Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ JumpIfSmi(edx, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
- __ j(not_zero, &slow, not_taken);
+ __ j(not_zero, &slow);
// Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
+ __ JumpIfNotSmi(ecx, &slow);
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array);
- // Check that the object is some kind of JS object.
- __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow, not_taken);
+ // Check that the object is some kind of JSObject.
+ __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
+ __ j(below, &slow);
+ __ CmpInstanceType(edi, JS_PROXY_TYPE);
+ __ j(equal, &slow);
+ __ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, &slow);
// Object case: Check key against length in the elements array.
// eax: value
@@ -796,32 +861,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// ecx: key (a smi)
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode and writable.
- __ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
+ __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
- __ j(below, &fast, taken);
+ __ j(below, &fast);
// Slow case: call runtime.
__ bind(&slow);
GenerateRuntimeSetProperty(masm, strict_mode);
- // Check whether the elements is a pixel array.
- __ bind(&check_pixel_array);
- // eax: value
- // ecx: key (a smi)
- // edx: receiver
- // edi: elements array
- GenerateFastPixelArrayStore(masm,
- edx,
- ecx,
- eax,
- edi,
- ebx,
- false,
- NULL,
- &slow,
- &slow,
- &slow);
-
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
@@ -831,9 +878,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// ecx: key, a smi.
// edi: receiver->elements, a FixedArray
// flags: compare (ecx, edx.length())
- __ j(not_equal, &slow, not_taken); // do not leave holes in the array
+ // do not leave holes in the array:
+ __ j(not_equal, &slow);
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow, not_taken);
+ __ j(above_equal, &slow);
// Add 1 to receiver->length, and go to fast array write.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
@@ -847,12 +895,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// edx: receiver, a JSArray
// ecx: key, a smi.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
+ __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &extra, not_taken);
+ __ j(above_equal, &extra);
// Fast case: Do the store.
__ bind(&fast);
@@ -872,7 +920,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
- Code::Kind kind) {
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
@@ -883,10 +932,11 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
MONOMORPHIC,
- Code::kNoExtraICState,
+ extra_ic_state,
NORMAL,
argc);
- StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, eax);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
+ eax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -894,10 +944,9 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// to probe.
//
// Check for number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &number, not_taken);
+ __ JumpIfSmi(edx, &number);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
- __ j(not_equal, &non_number, taken);
+ __ j(not_equal, &non_number);
__ bind(&number);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::NUMBER_FUNCTION_INDEX, edx);
@@ -906,24 +955,25 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Check for string.
__ bind(&non_number);
__ CmpInstanceType(ebx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string, taken);
+ __ j(above_equal, &non_string);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::STRING_FUNCTION_INDEX, edx);
__ jmp(&probe);
// Check for boolean.
__ bind(&non_string);
- __ cmp(edx, Factory::true_value());
- __ j(equal, &boolean, not_taken);
- __ cmp(edx, Factory::false_value());
- __ j(not_equal, &miss, taken);
+ __ cmp(edx, FACTORY->true_value());
+ __ j(equal, &boolean);
+ __ cmp(edx, FACTORY->false_value());
+ __ j(not_equal, &miss);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
// Probe the stub cache for the value object.
__ bind(&probe);
- StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
+ no_reg);
__ bind(&miss);
}
@@ -941,16 +991,16 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
// -----------------------------------
// Check that the result is not a smi.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ JumpIfSmi(edi, miss);
// Check that the value is a JavaScript function, fetching its map into eax.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
// Invoke the function.
ParameterCount actual(argc);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
// The generated code falls through if the call should be handled by runtime.
@@ -980,7 +1030,8 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
static void GenerateCallMiss(MacroAssembler* masm,
int argc,
- IC::UtilityId id) {
+ IC::UtilityId id,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -989,10 +1040,11 @@ static void GenerateCallMiss(MacroAssembler* masm,
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
+ Counters* counters = masm->isolate()->counters();
if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(&Counters::call_miss, 1);
+ __ IncrementCounter(counters->call_miss(), 1);
} else {
- __ IncrementCounter(&Counters::keyed_call_miss, 1);
+ __ IncrementCounter(counters->keyed_call_miss(), 1);
}
// Get the receiver of the function from the stack; 1 ~ return address.
@@ -1008,7 +1060,7 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Call the entry.
CEntryStub stub(1);
__ mov(eax, Immediate(2));
- __ mov(ebx, Immediate(ExternalReference(IC_Utility(id))));
+ __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
__ CallStub(&stub);
// Move result to edi and exit the internal frame.
@@ -1020,14 +1072,13 @@ static void GenerateCallMiss(MacroAssembler* masm,
if (id == IC::kCallIC_Miss) {
Label invoke, global;
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &invoke, not_taken);
+ __ JumpIfSmi(edx, &invoke, Label::kNear);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, &global);
+ __ j(equal, &global, Label::kNear);
__ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke);
+ __ j(not_equal, &invoke, Label::kNear);
// Patch the receiver on the stack.
__ bind(&global);
@@ -1037,12 +1088,21 @@ static void GenerateCallMiss(MacroAssembler* masm,
}
// Invoke the function.
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
ParameterCount actual(argc);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+ __ InvokeFunction(edi,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ call_kind);
}
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1053,8 +1113,9 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
- GenerateMiss(masm, argc);
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+
+ GenerateMiss(masm, argc, extra_ic_state);
}
@@ -1068,11 +1129,13 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -----------------------------------
GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc);
+ GenerateMiss(masm, argc, Code::kNoExtraICState);
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1081,7 +1144,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+ GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
}
@@ -1102,8 +1165,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
Label index_smi, index_string;
// Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string, not_taken);
+ __ JumpIfNotSmi(ecx, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
@@ -1114,7 +1176,9 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateFastArrayLoad(
masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1);
+ Isolate* isolate = masm->isolate();
+ Counters* counters = isolate->counters();
+ __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
__ bind(&do_call);
// receiver in edx is not used after this point.
@@ -1126,14 +1190,17 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// eax: elements
// ecx: smi key
// Check whether the elements is a number dictionary.
- __ CheckMap(eax, Factory::hash_table_map(), &slow_load, true);
+ __ CheckMap(eax,
+ isolate->factory()->hash_table_map(),
+ &slow_load,
+ DONT_DO_SMI_CHECK);
__ mov(ebx, ecx);
__ SmiUntag(ebx);
// ebx: untagged index
// Receiver in edx will be clobbered, need to reload it on miss.
GenerateNumberDictionaryLoad(
masm, &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1);
+ __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
__ jmp(&do_call);
__ bind(&slow_reload_receiver);
@@ -1142,7 +1209,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&slow_load);
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
- __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1);
+ __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
__ EnterInternalFrame();
__ push(ecx); // save the key
__ push(edx); // pass the receiver
@@ -1164,15 +1231,21 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
masm, edx, eax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
__ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ CheckMap(ebx, Factory::hash_table_map(), &lookup_monomorphic_cache, true);
+ __ CheckMap(ebx,
+ isolate->factory()->hash_table_map(),
+ &lookup_monomorphic_cache,
+ DONT_DO_SMI_CHECK);
GenerateDictionaryLoad(masm, &slow_load, ebx, ecx, eax, edi, edi);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
__ jmp(&do_call);
__ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1);
- GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
+ GenerateMonomorphicCacheProbe(masm,
+ argc,
+ Code::KEYED_CALL_IC,
+ Code::kNoExtraICState);
// Fall through on miss.
__ bind(&slow_call);
@@ -1182,7 +1255,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
- __ IncrementCounter(&Counters::keyed_call_generic_slow, 1);
+ __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
GenerateMiss(masm, argc);
__ bind(&index_string);
@@ -1192,6 +1265,35 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
}
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ Label slow, notin;
+ Factory* factory = masm->isolate()->factory();
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
+ __ mov(edi, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow);
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
+ __ cmp(unmapped_location, factory->the_hole_value());
+ __ j(equal, &slow);
+ __ mov(edi, unmapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
+}
+
+
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
@@ -1203,8 +1305,7 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Check if the name is a string.
Label miss;
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(ecx, &miss);
Condition cond = masm->IsObjectStringType(ecx, eax, eax);
__ j(NegateCondition(cond), &miss);
GenerateCallNormal(masm, argc);
@@ -1222,7 +1323,7 @@ void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
+ GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
}
@@ -1237,7 +1338,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, eax, ecx, ebx, edx);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, eax, ecx, ebx,
+ edx);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1272,7 +1374,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- esp[0] : return address
// -----------------------------------
- __ IncrementCounter(&Counters::load_miss, 1);
+ __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
__ pop(ebx);
__ push(eax); // receiver
@@ -1280,185 +1382,20 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ push(ebx); // return address
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
}
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // If the instruction following the call is not a test eax, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction.
- int delta = *reinterpret_cast<int*>(delta_address);
-
- // The map address is the last 4 bytes of the 7-byte
- // operand-immediate compare instruction, so we add 3 to get the
- // offset to the last 4 bytes.
- Address map_address = test_instruction_address + delta + 3;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // The offset is in the last 4 bytes of a six byte
- // memory-to-register move instruction, so we add 2 to get the
- // offset to the last 4 bytes.
- Address offset_address =
- test_instruction_address + delta + kOffsetToLoadInstruction + 2;
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
- return true;
-}
-
-
-// One byte opcode for mov ecx,0xXXXXXXXX.
-// Marks inlined contextual loads using all kinds of cells. Generated
-// code has the hole check:
-// mov reg, <cell>
-// mov reg, (<cell>, value offset)
-// cmp reg, <the hole>
-// je slow
-// ;; use reg
-static const byte kMovEcxByte = 0xB9;
-
-// One byte opcode for mov edx,0xXXXXXXXX.
-// Marks inlined contextual loads using only "don't delete"
-// cells. Generated code doesn't have the hole check:
-// mov reg, <cell>
-// mov reg, (<cell>, value offset)
-// ;; use reg
-static const byte kMovEdxByte = 0xBA;
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address mov_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // If the instruction following the call is not a mov ecx/edx,
- // nothing was inlined.
- byte b = *mov_instruction_address;
- if (b != kMovEcxByte && b != kMovEdxByte) return false;
- // If we don't have the hole check generated, we can only support
- // "don't delete" cells.
- if (b == kMovEdxByte && !is_dont_delete) return false;
-
- Address delta_address = mov_instruction_address + 1;
- // The delta to the start of the map check instruction.
- int delta = *reinterpret_cast<int*>(delta_address);
-
- // The map address is the last 4 bytes of the 7-byte
- // operand-immediate compare instruction, so we add 3 to get the
- // offset to the last 4 bytes.
- Address map_address = mov_instruction_address + delta + 3;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // The cell is in the last 4 bytes of a five byte mov reg, imm32
- // instruction, so we add 1 to get the offset to the last 4 bytes.
- Address offset_address =
- mov_instruction_address + delta + kOffsetToLoadInstruction + 1;
- *reinterpret_cast<Object**>(offset_address) = cell;
- return true;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test eax, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- // Extract the encoded deltas from the test eax instruction.
- Address encoded_offsets_address = test_instruction_address + 1;
- int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
- int delta_to_map_check = -(encoded_offsets & 0xFFFF);
- int delta_to_record_write = encoded_offsets >> 16;
-
- // Patch the map to check. The map address is the last 4 bytes of
- // the 7-byte operand-immediate compare instruction.
- Address map_check_address = test_instruction_address + delta_to_map_check;
- Address map_address = map_check_address + 3;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // Patch the offset in the store instruction. The offset is in the
- // last 4 bytes of a six byte register-to-memory move instruction.
- Address offset_address =
- map_check_address + StoreIC::kOffsetToStoreInstruction + 2;
- // The offset should have initial value (kMaxInt - 1), cleared value
- // (-1) or we should be clearing the inlined version.
- ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
- *reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == Heap::null_value()));
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
- // Patch the offset in the write-barrier code. The offset is the
- // last 4 bytes of a six byte lea instruction.
- offset_address = map_check_address + delta_to_record_write + 2;
- // The offset should have initial value (kMaxInt), cleared value
- // (-1) or we should be clearing the inlined version.
- ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
- *reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == Heap::null_value()));
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
- return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
- if (V8::UseCrankshaft()) return false;
-
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // The keyed load has a fast inlined case if the IC call instruction
- // is immediately followed by a test instruction.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- // Fetch the offset from the test instruction to the map cmp
- // instruction. This offset is stored in the last 4 bytes of the 5
- // byte test instruction.
- Address delta_address = test_instruction_address + 1;
- int delta = *reinterpret_cast<int*>(delta_address);
- // Compute the map address. The map address is in the last 4 bytes
- // of the 7-byte operand-immediate compare instruction, so we add 3
- // to the offset to get the map address.
- Address map_address = test_instruction_address + delta + 3;
- // Patch the map check.
- *(reinterpret_cast<Object**>(map_address)) = map;
- return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- return PatchInlinedMapCheck(address, map);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- __ IncrementCounter(&Counters::keyed_load_miss, 1);
+ __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
__ pop(ebx);
__ push(edx); // receiver
@@ -1466,7 +1403,10 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ push(ebx); // return address
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
+ ExternalReference ref = force_generic
+ ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
+ masm->isolate())
+ : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
}
@@ -1501,7 +1441,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
NOT_IN_LOOP,
MONOMORPHIC,
strict_mode);
- StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
+ no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1523,17 +1464,12 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
__ push(ebx);
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
-// The offset from the inlined patch site to the start of the inlined
-// store instruction. It is 7 bytes (test reg, imm) plus 6 bytes (jne
-// slow_label).
-const int StoreIC::kOffsetToStoreInstruction = 13;
-
-
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
@@ -1543,9 +1479,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// -----------------------------------
//
// This accepts as a receiver anything JSObject::SetElementsLength accepts
- // (currently anything except for external and pixel arrays which means
- // anything with elements of FixedArray type.), but currently is restricted
- // to JSArray.
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type.), but currently is restricted to JSArray.
// Value must be a number, but only smis are accepted as the most common case.
Label miss;
@@ -1555,23 +1490,21 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
Register scratch = ebx;
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ JumpIfSmi(receiver, &miss);
// Check that the object is a JS array.
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Check that elements are FixedArray.
// We rely on StoreIC_ArrayLength below to deal with all types of
// fast elements (including COW).
__ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Check that value is a smi.
- __ test(value, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, not_taken);
+ __ JumpIfNotSmi(value, &miss);
// Prepare tail call to StoreIC_ArrayLength.
__ pop(scratch);
@@ -1579,7 +1512,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ push(value);
__ push(scratch); // return address
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
__ bind(&miss);
@@ -1606,14 +1540,15 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
__ push(edx);
GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
__ Drop(1);
- __ IncrementCounter(&Counters::store_normal_hit, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1);
__ ret(0);
__ bind(&restore_miss);
__ pop(edx);
__ bind(&miss);
- __ IncrementCounter(&Counters::store_normal_miss, 1);
+ __ IncrementCounter(counters->store_normal_miss(), 1);
GenerateMiss(masm);
}
@@ -1661,7 +1596,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -1676,7 +1611,30 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ push(ebx);
// Do tail-call to runtime routine.
- ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
+ ExternalReference ref = force_generic
+ ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+ masm->isolate())
+ : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
diff --git a/deps/v8/src/ia32/jump-target-ia32.cc b/deps/v8/src/ia32/jump-target-ia32.cc
deleted file mode 100644
index 76c0d02d4..000000000
--- a/deps/v8/src/ia32/jump-target-ia32.cc
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
- ASSERT(cgen()->has_valid_frame());
- // Live non-frame registers are not allowed at unconditional jumps
- // because we have no way of invalidating the corresponding results
- // which are still live in the C++ code.
- ASSERT(cgen()->HasValidEntryRegisters());
-
- if (is_bound()) {
- // Backward jump. There is an expected frame to merge to.
- ASSERT(direction_ == BIDIRECTIONAL);
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- } else if (entry_frame_ != NULL) {
- // Forward jump with a preconfigured entry frame. Assert the
- // current frame matches the expected one and jump to the block.
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- } else {
- // Forward jump. Remember the current frame and emit a jump to
- // its merge code.
- AddReachingFrame(cgen()->frame());
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- __ jmp(&merge_labels_.last());
- }
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint hint) {
- ASSERT(cgen() != NULL);
- ASSERT(cgen()->has_valid_frame());
-
- if (is_bound()) {
- ASSERT(direction_ == BIDIRECTIONAL);
- // Backward branch. We have an expected frame to merge to on the
- // backward edge.
-
- // Swap the current frame for a copy (we do the swapping to get
- // the off-frame registers off the fall through) to use for the
- // branch.
- VirtualFrame* fall_through_frame = cgen()->frame();
- VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers;
- cgen()->SetFrame(branch_frame, &non_frame_registers);
-
- // Check if we can avoid merge code.
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- if (cgen()->frame()->Equals(entry_frame_)) {
- // Branch right in to the block.
- cgen()->DeleteFrame();
- __ j(cc, &entry_label_, hint);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
-
- // Check if we can reuse existing merge code.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL &&
- cgen()->frame()->Equals(reaching_frames_[i])) {
- // Branch to the merge code.
- cgen()->DeleteFrame();
- __ j(cc, &merge_labels_[i], hint);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
- }
-
- // To emit the merge code here, we negate the condition and branch
- // around the merge code on the fall through path.
- Label original_fall_through;
- __ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- __ bind(&original_fall_through);
-
- } else if (entry_frame_ != NULL) {
- // Forward branch with a preconfigured entry frame. Assert the
- // current frame matches the expected one and branch to the block.
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- // Explicitly use the macro assembler instead of __ as forward
- // branches are expected to be a fixed size (no inserted
- // coverage-checking instructions please). This is used in
- // Reference::GetValue.
- cgen()->masm()->j(cc, &entry_label_, hint);
-
- } else {
- // Forward branch. A copy of the current frame is remembered and
- // a branch to the merge code is emitted. Explicitly use the
- // macro assembler instead of __ as forward branches are expected
- // to be a fixed size (no inserted coverage-checking instructions
- // please). This is used in Reference::GetValue.
- AddReachingFrame(new VirtualFrame(cgen()->frame()));
- cgen()->masm()->j(cc, &merge_labels_.last(), hint);
- }
-}
-
-
-void JumpTarget::Call() {
- // Call is used to push the address of the catch block on the stack as
- // a return address when compiling try/catch and try/finally. We
- // fully spill the frame before making the call. The expected frame
- // at the label (which should be the only one) is the spilled current
- // frame plus an in-memory return address. The "fall-through" frame
- // at the return site is the spilled current frame.
- ASSERT(cgen() != NULL);
- ASSERT(cgen()->has_valid_frame());
- // There are no non-frame references across the call.
- ASSERT(cgen()->HasValidEntryRegisters());
- ASSERT(!is_linked());
-
- cgen()->frame()->SpillAll();
- VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
- target_frame->Adjust(1);
- // We do not expect a call with a preconfigured entry frame.
- ASSERT(entry_frame_ == NULL);
- AddReachingFrame(target_frame);
- __ call(&merge_labels_.last());
-}
-
-
-void JumpTarget::DoBind() {
- ASSERT(cgen() != NULL);
- ASSERT(!is_bound());
-
- // Live non-frame registers are not allowed at the start of a basic
- // block.
- ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
- // Fast case: the jump target was manually configured with an entry
- // frame to use.
- if (entry_frame_ != NULL) {
- // Assert no reaching frames to deal with.
- ASSERT(reaching_frames_.is_empty());
- ASSERT(!cgen()->has_valid_frame());
-
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- // Copy the entry frame so the original can be used for a
- // possible backward jump.
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- } else {
- // Take ownership of the entry frame.
- cgen()->SetFrame(entry_frame_, &empty);
- entry_frame_ = NULL;
- }
- __ bind(&entry_label_);
- return;
- }
-
- if (!is_linked()) {
- ASSERT(cgen()->has_valid_frame());
- if (direction_ == FORWARD_ONLY) {
- // Fast case: no forward jumps and no possible backward jumps.
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- VirtualFrame* frame = cgen()->frame();
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ add(Operand(esp), Immediate(difference * kPointerSize));
- }
- } else {
- ASSERT(direction_ == BIDIRECTIONAL);
- // Fast case: no forward jumps, possible backward ones. Remove
- // constants and copies above the watermark on the fall-through
- // frame and use it as the entry frame.
- cgen()->frame()->MakeMergable();
- entry_frame_ = new VirtualFrame(cgen()->frame());
- }
- __ bind(&entry_label_);
- return;
- }
-
- if (direction_ == FORWARD_ONLY &&
- !cgen()->has_valid_frame() &&
- reaching_frames_.length() == 1) {
- // Fast case: no fall-through, a single forward jump, and no
- // possible backward jumps. Pick up the only reaching frame, take
- // ownership of it, and use it for the block about to be emitted.
- VirtualFrame* frame = reaching_frames_[0];
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[0] = NULL;
- __ bind(&merge_labels_[0]);
-
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ add(Operand(esp), Immediate(difference * kPointerSize));
- }
-
- __ bind(&entry_label_);
- return;
- }
-
- // If there is a current frame, record it as the fall-through. It
- // is owned by the reaching frames for now.
- bool had_fall_through = false;
- if (cgen()->has_valid_frame()) {
- had_fall_through = true;
- AddReachingFrame(cgen()->frame()); // Return value ignored.
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- }
-
- // Compute the frame to use for entry to the block.
- ComputeEntryFrame();
-
- // Some moves required to merge to an expected frame require purely
- // frame state changes, and do not require any code generation.
- // Perform those first to increase the possibility of finding equal
- // frames below.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL) {
- reaching_frames_[i]->PrepareMergeTo(entry_frame_);
- }
- }
-
- if (is_linked()) {
- // There were forward jumps. Handle merging the reaching frames
- // to the entry frame.
-
- // Loop over the (non-null) reaching frames and process any that
- // need merge code. Iterate backwards through the list to handle
- // the fall-through frame first. Set frames that will be
- // processed after 'i' to NULL if we want to avoid processing
- // them.
- for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
- VirtualFrame* frame = reaching_frames_[i];
-
- if (frame != NULL) {
- // Does the frame (probably) need merge code?
- if (!frame->Equals(entry_frame_)) {
- // We could have a valid frame as the fall through to the
- // binding site or as the fall through from a previous merge
- // code block. Jump around the code we are about to
- // generate.
- if (cgen()->has_valid_frame()) {
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- }
- // Pick up the frame for this block. Assume ownership if
- // there cannot be backward jumps.
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- cgen()->SetFrame(new VirtualFrame(frame), &empty);
- } else {
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- __ bind(&merge_labels_[i]);
-
- // Loop over the remaining (non-null) reaching frames,
- // looking for any that can share merge code with this one.
- for (int j = 0; j < i; j++) {
- VirtualFrame* other = reaching_frames_[j];
- if (other != NULL && other->Equals(cgen()->frame())) {
- // Set the reaching frame element to null to avoid
- // processing it later, and then bind its entry label.
- reaching_frames_[j] = NULL;
- __ bind(&merge_labels_[j]);
- }
- }
-
- // Emit the merge code.
- cgen()->frame()->MergeTo(entry_frame_);
- } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
- // If this is the fall through frame, and it didn't need
- // merge code, we need to pick up the frame so we can jump
- // around subsequent merge blocks if necessary.
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- }
- }
-
- // The code generator may not have a current frame if there was no
- // fall through and none of the reaching frames needed merging.
- // In that case, clone the entry frame as the current frame.
- if (!cgen()->has_valid_frame()) {
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- }
-
- // There may be unprocessed reaching frames that did not need
- // merge code. They will have unbound merge labels. Bind their
- // merge labels to be the same as the entry label and deallocate
- // them.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (!merge_labels_[i].is_bound()) {
- reaching_frames_[i] = NULL;
- __ bind(&merge_labels_[i]);
- }
- }
-
- // There are non-NULL reaching frames with bound labels for each
- // merge block, but only on backward targets.
- } else {
- // There were no forward jumps. There must be a current frame and
- // this must be a bidirectional target.
- ASSERT(reaching_frames_.length() == 1);
- ASSERT(reaching_frames_[0] != NULL);
- ASSERT(direction_ == BIDIRECTIONAL);
-
- // Use a copy of the reaching frame so the original can be saved
- // for possible reuse as a backward merge block.
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
- __ bind(&merge_labels_[0]);
- cgen()->frame()->MergeTo(entry_frame_);
- }
-
- __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
- // Drop leftover statement state from the frame before merging, without
- // emitting code.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
- // Drop leftover statement state from the frame before merging, without
- // emitting code.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- DoJump();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even on
- // the fall through. This is so we can bind the return target with state
- // on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- }
- DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_ + 1);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even on
- // the fall through. This is so we can bind the return target with state
- // on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- }
- DoBind();
- *arg = cgen()->frame()->Pop();
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index a1be11d83..7f35310f7 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -40,7 +40,7 @@ namespace internal {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public PostCallGenerator {
+class SafepointGenerator : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -50,7 +50,9 @@ class SafepointGenerator : public PostCallGenerator {
deoptimization_index_(deoptimization_index) {}
virtual ~SafepointGenerator() { }
- virtual void Generate() {
+ virtual void BeforeCall(int call_size) const {}
+
+ virtual void AfterCall() const {
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
@@ -77,7 +79,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
- code->set_stack_slots(StackSlotCount());
+ code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -86,8 +88,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
void LCodeGen::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
- SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
- PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LCodeGen in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
@@ -116,16 +118,6 @@ void LCodeGen::Comment(const char* format, ...) {
}
-bool LCodeGen::GenerateRelocPadding() {
- int reloc_size = masm()->relocation_writer_size();
- while (reloc_size < deoptimization_reloc_size.min_size) {
- __ RecordComment(RelocInfo::kFillerCommentString, true);
- reloc_size += RelocInfo::kMinRelocCommentSize;
- }
- return !is_aborted();
-}
-
-
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
@@ -136,13 +128,28 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). ecx is zero for method calls and non-zero for
+ // function calls.
+ if (info_->is_strict_mode() || info_->is_native()) {
+ Label ok;
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ mov(Operand(esp, receiver_offset),
+ Immediate(isolate()->factory()->undefined_value()));
+ __ bind(&ok);
+ }
+
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS function.
// Reserve space for the stack slots needed by the code.
- int slots = StackSlotCount();
+ int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
__ mov(Operand(eax), Immediate(slots));
@@ -177,7 +184,7 @@ bool LCodeGen::GeneratePrologue() {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
// Context is returned in both eax and esi. It replaces the context
@@ -264,7 +271,7 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
- safepoints_.Emit(masm(), StackSlotCount());
+ safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
@@ -396,7 +403,7 @@ void LCodeGen::AddToTranslation(Translation* translation,
translation->StoreDoubleStackSlot(op->index());
} else if (op->IsArgument()) {
ASSERT(is_tagged);
- int src_index = StackSlotCount() + op->index();
+ int src_index = GetStackSlotCount() + op->index();
translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -436,7 +443,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
- if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+ if (code->kind() == Code::BINARY_OP_IC ||
code->kind() == Code::COMPARE_IC) {
__ nop();
}
@@ -451,7 +458,7 @@ void LCodeGen::CallCode(Handle<Code> code,
}
-void LCodeGen::CallRuntime(Runtime::Function* fun,
+void LCodeGen::CallRuntime(const Runtime::Function* fun,
int argc,
LInstruction* instr,
ContextMode context_mode) {
@@ -553,7 +560,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
__ mov(ebx, shared);
__ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
__ sub(Operand(eax), Immediate(Smi::FromInt(1)));
- __ j(not_zero, &no_deopt);
+ __ j(not_zero, &no_deopt, Label::kNear);
if (FLAG_trap_on_deopt) __ int3();
__ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
__ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
@@ -574,13 +581,13 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
} else {
if (FLAG_trap_on_deopt) {
- NearLabel done;
- __ j(NegateCondition(cc), &done);
+ Label done;
+ __ j(NegateCondition(cc), &done, Label::kNear);
__ int3();
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&done);
} else {
- __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
+ __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
}
}
}
@@ -591,14 +598,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
if (length == 0) return;
ASSERT(FLAG_deopt);
Handle<DeoptimizationInputData> data =
- Factory::NewDeoptimizationInputData(length, TENURED);
+ factory()->NewDeoptimizationInputData(length, TENURED);
Handle<ByteArray> translations = translations_.CreateByteArray();
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
- Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
@@ -686,7 +693,7 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
void LCodeGen::RecordPosition(int position) {
- if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+ if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
}
@@ -699,7 +706,7 @@ void LCodeGen::DoLabel(LLabel* label) {
}
__ bind(label->label());
current_block_ = label->block_id();
- LCodeGen::DoGap(label);
+ DoGap(label);
}
@@ -725,6 +732,11 @@ void LCodeGen::DoGap(LGap* gap) {
}
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
void LCodeGen::DoParameter(LParameter* instr) {
// Nothing to do.
}
@@ -749,16 +761,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
- case CodeStub::StringCharAt: {
- StringCharAtStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
- break;
- }
- case CodeStub::MathPow: {
- MathPowStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
- break;
- }
case CodeStub::NumberToString: {
NumberToStringStub stub;
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
@@ -792,41 +794,113 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
- LOperand* right = instr->InputAt(1);
- ASSERT(ToRegister(instr->result()).is(edx));
- ASSERT(ToRegister(instr->InputAt(0)).is(eax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
+ if (instr->hydrogen()->HasPowerOf2Divisor()) {
+ Register dividend = ToRegister(instr->InputAt(0));
- Register right_reg = ToRegister(right);
+ int32_t divisor =
+ HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(right_reg, ToOperand(right));
- DeoptimizeIf(zero, instr->environment());
- }
+ if (divisor < 0) divisor = -divisor;
- // Sign extend to edx.
- __ cdq();
-
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel positive_left;
- NearLabel done;
- __ test(eax, Operand(eax));
- __ j(not_sign, &positive_left);
- __ idiv(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
- __ test(edx, Operand(edx));
- __ j(not_zero, &done);
-
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&positive_left);
- __ idiv(right_reg);
+ Label positive_dividend, done;
+ __ test(dividend, Operand(dividend));
+ __ j(not_sign, &positive_dividend, Label::kNear);
+ __ neg(dividend);
+ __ and_(dividend, divisor - 1);
+ __ neg(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ j(not_zero, &done, Label::kNear);
+ DeoptimizeIf(no_condition, instr->environment());
+ } else {
+ __ jmp(&done, Label::kNear);
+ }
+ __ bind(&positive_dividend);
+ __ and_(dividend, divisor - 1);
__ bind(&done);
} else {
- __ idiv(right_reg);
+ Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+ Register left_reg = ToRegister(instr->InputAt(0));
+ Register right_reg = ToRegister(instr->InputAt(1));
+ Register result_reg = ToRegister(instr->result());
+
+ ASSERT(left_reg.is(eax));
+ ASSERT(result_reg.is(edx));
+ ASSERT(!right_reg.is(eax));
+ ASSERT(!right_reg.is(edx));
+
+ // Check for x % 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(right_reg, Operand(right_reg));
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ __ test(left_reg, Operand(left_reg));
+ __ j(zero, &remainder_eq_dividend, Label::kNear);
+ __ j(sign, &slow, Label::kNear);
+
+ __ test(right_reg, Operand(right_reg));
+ __ j(not_sign, &both_positive, Label::kNear);
+ // The sign of the divisor doesn't matter.
+ __ neg(right_reg);
+
+ __ bind(&both_positive);
+ // If the dividend is smaller than the nonnegative
+ // divisor, the dividend is the result.
+ __ cmp(left_reg, Operand(right_reg));
+ __ j(less, &remainder_eq_dividend, Label::kNear);
+
+ // Check if the divisor is a PowerOfTwo integer.
+ Register scratch = ToRegister(instr->TempAt(0));
+ __ mov(scratch, right_reg);
+ __ sub(Operand(scratch), Immediate(1));
+ __ test(scratch, Operand(right_reg));
+ __ j(not_zero, &do_subtraction, Label::kNear);
+ __ and_(left_reg, Operand(scratch));
+ __ jmp(&remainder_eq_dividend, Label::kNear);
+
+ __ bind(&do_subtraction);
+ const int kUnfolds = 3;
+ // Try a few subtractions of the dividend.
+ __ mov(scratch, left_reg);
+ for (int i = 0; i < kUnfolds; i++) {
+ // Reduce the dividend by the divisor.
+ __ sub(left_reg, Operand(right_reg));
+ // Check if the dividend is less than the divisor.
+ __ cmp(left_reg, Operand(right_reg));
+ __ j(less, &remainder_eq_dividend, Label::kNear);
+ }
+ __ mov(left_reg, scratch);
+
+ // Slow case, using idiv instruction.
+ __ bind(&slow);
+ // Sign extend to edx.
+ __ cdq();
+
+ // Check for (0 % -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ Label done;
+ __ test(left_reg, Operand(left_reg));
+ __ j(not_sign, &positive_left, Label::kNear);
+ __ idiv(right_reg);
+
+ // Test the remainder for 0, because then the result would be -0.
+ __ test(result_reg, Operand(result_reg));
+ __ j(not_zero, &done, Label::kNear);
+
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&positive_left);
+ __ idiv(right_reg);
+ __ bind(&done);
+ } else {
+ __ idiv(right_reg);
+ }
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&remainder_eq_dividend);
+ __ mov(result_reg, left_reg);
+
+ __ bind(&done);
}
}
@@ -849,9 +923,9 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel left_not_zero;
+ Label left_not_zero;
__ test(left_reg, Operand(left_reg));
- __ j(not_zero, &left_not_zero);
+ __ j(not_zero, &left_not_zero, Label::kNear);
__ test(right_reg, ToOperand(right));
DeoptimizeIf(sign, instr->environment());
__ bind(&left_not_zero);
@@ -859,9 +933,9 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for (-kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- NearLabel left_not_min_int;
+ Label left_not_min_int;
__ cmp(left_reg, kMinInt);
- __ j(not_zero, &left_not_min_int);
+ __ j(not_zero, &left_not_min_int, Label::kNear);
__ cmp(right_reg, -1);
DeoptimizeIf(zero, instr->environment());
__ bind(&left_not_min_int);
@@ -886,7 +960,49 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (right->IsConstantOperand()) {
- __ imul(left, left, ToInteger32(LConstantOperand::cast(right)));
+ // Try strength reductions on the multiplication.
+ // All replacement instructions are at most as long as the imul
+ // and have better latency.
+ int constant = ToInteger32(LConstantOperand::cast(right));
+ if (constant == -1) {
+ __ neg(left);
+ } else if (constant == 0) {
+ __ xor_(left, Operand(left));
+ } else if (constant == 2) {
+ __ add(left, Operand(left));
+ } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // If we know that the multiplication can't overflow, it's safe to
+ // use instructions that don't set the overflow flag for the
+ // multiplication.
+ switch (constant) {
+ case 1:
+ // Do nothing.
+ break;
+ case 3:
+ __ lea(left, Operand(left, left, times_2, 0));
+ break;
+ case 4:
+ __ shl(left, 2);
+ break;
+ case 5:
+ __ lea(left, Operand(left, left, times_4, 0));
+ break;
+ case 8:
+ __ shl(left, 3);
+ break;
+ case 9:
+ __ lea(left, Operand(left, left, times_8, 0));
+ break;
+ case 16:
+ __ shl(left, 4);
+ break;
+ default:
+ __ imul(left, left, constant);
+ break;
+ }
+ } else {
+ __ imul(left, left, constant);
+ }
} else {
__ imul(left, ToOperand(right));
}
@@ -897,9 +1013,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Bail out if the result is supposed to be negative zero.
- NearLabel done;
+ Label done;
__ test(left, Operand(left));
- __ j(not_zero, &done);
+ __ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
DeoptimizeIf(no_condition, instr->environment());
@@ -1040,7 +1156,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
// Use xor to produce +0.0 in a fast and compact way, but avoid to
// do so if the constant is -0.0.
if (BitCast<uint64_t, double>(v) == 0) {
- __ xorpd(res, res);
+ __ xorps(res, res);
} else {
Register temp = ToRegister(instr->TempAt(0));
uint64_t int_val = BitCast<uint64_t, double>(v);
@@ -1054,7 +1170,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
__ Set(temp, Immediate(upper));
__ pinsrd(res, Operand(temp), 1);
} else {
- __ xorpd(res, res);
+ __ xorps(res, res);
__ Set(temp, Immediate(upper));
__ pinsrd(res, Operand(temp), 1);
}
@@ -1092,10 +1208,25 @@ void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
}
-void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
+void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->InputAt(0));
- __ mov(result, FieldOperand(array, PixelArray::kLengthOffset));
+ __ mov(result, FieldOperand(array, ExternalArray::kLengthOffset));
+}
+
+
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+
+ // Load map into |result|.
+ __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(result, FieldOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(result, Map::kElementsKindMask);
+ __ shr(result, Map::kElementsKindShift);
}
@@ -1104,14 +1235,13 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->TempAt(0));
ASSERT(input.is(result));
- NearLabel done;
+ Label done;
// If the object is a smi return the object.
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, &done);
+ __ JumpIfSmi(input, &done, Label::kNear);
// If the object is not a value type, return the object.
__ CmpObjectType(input, JS_VALUE_TYPE, map);
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
__ mov(result, FieldOperand(input, JSValue::kValueOffset));
__ bind(&done);
@@ -1154,35 +1284,38 @@ void LCodeGen::DoAddI(LAddI* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ XMMRegister left = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister right = ToDoubleRegister(instr->InputAt(1));
+ XMMRegister result = ToDoubleRegister(instr->result());
// Modulo uses a fixed result register.
- ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
+ ASSERT(instr->op() == Token::MOD || left.is(result));
switch (instr->op()) {
case Token::ADD:
- __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ addsd(left, right);
break;
case Token::SUB:
- __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ subsd(left, right);
break;
case Token::MUL:
- __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ mulsd(left, right);
break;
case Token::DIV:
- __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ divsd(left, right);
break;
case Token::MOD: {
// Pass two doubles as arguments on the stack.
__ PrepareCallCFunction(4, eax);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
- __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
+ __ movdbl(Operand(esp, 0 * kDoubleSize), left);
+ __ movdbl(Operand(esp, 1 * kDoubleSize), right);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ 4);
// Return value is in st(0) on ia32.
// Store it into the (fixed) result register.
__ sub(Operand(esp), Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movdbl(ToDoubleRegister(instr->result()), Operand(esp, 0));
+ __ movdbl(result, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
break;
}
@@ -1198,7 +1331,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->InputAt(1)).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
- TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
}
@@ -1241,35 +1374,34 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(true_block, false_block, not_zero);
} else if (r.IsDouble()) {
XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(xmm0, xmm0);
+ __ xorps(xmm0, xmm0);
__ ucomisd(reg, xmm0);
EmitBranch(true_block, false_block, not_equal);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
if (instr->hydrogen()->type().IsBoolean()) {
- __ cmp(reg, Factory::true_value());
+ __ cmp(reg, factory()->true_value());
EmitBranch(true_block, false_block, equal);
} else {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ cmp(reg, Factory::undefined_value());
+ __ cmp(reg, factory()->undefined_value());
__ j(equal, false_label);
- __ cmp(reg, Factory::true_value());
+ __ cmp(reg, factory()->true_value());
__ j(equal, true_label);
- __ cmp(reg, Factory::false_value());
+ __ cmp(reg, factory()->false_value());
__ j(equal, false_label);
__ test(reg, Operand(reg));
__ j(equal, false_label);
- __ test(reg, Immediate(kSmiTagMask));
- __ j(zero, true_label);
+ __ JumpIfSmi(reg, true_label);
// Test for double values. Zero is false.
- NearLabel call_stub;
+ Label call_stub;
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(not_equal, &call_stub);
+ factory()->heap_number_map());
+ __ j(not_equal, &call_stub, Label::kNear);
__ fldz();
__ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
__ FCmp();
@@ -1279,7 +1411,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
// The conversion stub doesn't cause garbage collections so it's
// safe to not record a safepoint after the call.
__ bind(&call_stub);
- ToBooleanStub stub;
+ ToBooleanStub stub(eax);
__ pushad();
__ push(reg);
__ CallStub(&stub);
@@ -1291,45 +1423,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+void LCodeGen::EmitGoto(int block) {
block = chunk_->LookupDestination(block);
int next_block = GetNextEmittedBlock(current_block_);
if (block != next_block) {
- // Perform stack overflow check if this goto needs it before jumping.
- if (deferred_stack_check != NULL) {
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, chunk_->GetAssemblyLabel(block));
- __ jmp(deferred_stack_check->entry());
- deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
- } else {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
+ __ jmp(chunk_->GetAssemblyLabel(block));
}
}
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
-}
-
void LCodeGen::DoGoto(LGoto* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- private:
- LGoto* instr_;
- };
-
- DeferredStackCheck* deferred = NULL;
- if (instr->include_stack_check()) {
- deferred = new DeferredStackCheck(this, instr);
- }
- EmitGoto(instr->block_id(), deferred);
+ EmitGoto(instr->block_id());
}
@@ -1375,23 +1479,23 @@ void LCodeGen::DoCmpID(LCmpID* instr) {
LOperand* right = instr->InputAt(1);
LOperand* result = instr->result();
- NearLabel unordered;
+ Label unordered;
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the unordered case, which produces a false value.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, &unordered, not_taken);
+ __ j(parity_even, &unordered, Label::kNear);
} else {
EmitCmpI(left, right);
}
- NearLabel done;
+ Label done;
Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ mov(ToRegister(result), Factory::true_value());
- __ j(cc, &done);
+ __ mov(ToRegister(result), factory()->true_value());
+ __ j(cc, &done, Label::kNear);
__ bind(&unordered);
- __ mov(ToRegister(result), Factory::false_value());
+ __ mov(ToRegister(result), factory()->false_value());
__ bind(&done);
}
@@ -1416,21 +1520,21 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
}
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
__ cmp(left, Operand(right));
- __ mov(result, Factory::true_value());
- NearLabel done;
- __ j(equal, &done);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->true_value());
+ Label done;
+ __ j(equal, &done, Label::kNear);
+ __ mov(result, factory()->false_value());
__ bind(&done);
}
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1441,6 +1545,29 @@ void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
}
+void LCodeGen::DoCmpConstantEq(LCmpConstantEq* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ Label done;
+ __ cmp(left, instr->hydrogen()->right());
+ __ mov(result, factory()->true_value());
+ __ j(equal, &done, Label::kNear);
+ __ mov(result, factory()->false_value());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ cmp(left, instr->hydrogen()->right());
+ EmitBranch(true_block, false_block, equal);
+}
+
+
void LCodeGen::DoIsNull(LIsNull* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1448,32 +1575,31 @@ void LCodeGen::DoIsNull(LIsNull* instr) {
// TODO(fsc): If the expression is known to be a smi, then it's
// definitely not null. Materialize false.
- __ cmp(reg, Factory::null_value());
+ __ cmp(reg, factory()->null_value());
if (instr->is_strict()) {
- __ mov(result, Factory::true_value());
- NearLabel done;
- __ j(equal, &done);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->true_value());
+ Label done;
+ __ j(equal, &done, Label::kNear);
+ __ mov(result, factory()->false_value());
__ bind(&done);
} else {
- NearLabel true_value, false_value, done;
- __ j(equal, &true_value);
- __ cmp(reg, Factory::undefined_value());
- __ j(equal, &true_value);
- __ test(reg, Immediate(kSmiTagMask));
- __ j(zero, &false_value);
+ Label true_value, false_value, done;
+ __ j(equal, &true_value, Label::kNear);
+ __ cmp(reg, factory()->undefined_value());
+ __ j(equal, &true_value, Label::kNear);
+ __ JumpIfSmi(reg, &false_value, Label::kNear);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = result;
__ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
__ test(scratch, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &true_value);
+ __ j(not_zero, &true_value, Label::kNear);
__ bind(&false_value);
- __ mov(result, Factory::false_value());
- __ jmp(&done);
+ __ mov(result, factory()->false_value());
+ __ jmp(&done, Label::kNear);
__ bind(&true_value);
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
__ bind(&done);
}
}
@@ -1488,17 +1614,16 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- __ cmp(reg, Factory::null_value());
+ __ cmp(reg, factory()->null_value());
if (instr->is_strict()) {
EmitBranch(true_block, false_block, equal);
} else {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
- __ cmp(reg, Factory::undefined_value());
+ __ cmp(reg, factory()->undefined_value());
__ j(equal, true_label);
- __ test(reg, Immediate(kSmiTagMask));
- __ j(zero, false_label);
+ __ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = ToRegister(instr->TempAt(0));
@@ -1512,29 +1637,23 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object) {
- ASSERT(!input.is(temp1));
- ASSERT(!input.is(temp2));
- ASSERT(!temp1.is(temp2));
-
- __ test(input, Immediate(kSmiTagMask));
- __ j(equal, is_not_object);
+ __ JumpIfSmi(input, is_not_object);
- __ cmp(input, Factory::null_value());
+ __ cmp(input, isolate()->factory()->null_value());
__ j(equal, is_object);
__ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
- __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
- __ test(temp2, Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
__ j(not_zero, is_not_object);
- __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
+ __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
+ __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(below, is_not_object);
- __ cmp(temp2, LAST_JS_OBJECT_TYPE);
+ __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
return below_equal;
}
@@ -1542,18 +1661,17 @@ Condition LCodeGen::EmitIsObject(Register input,
void LCodeGen::DoIsObject(LIsObject* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->TempAt(0));
Label is_false, is_true, done;
- Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
+ Condition true_cond = EmitIsObject(reg, result, &is_false, &is_true);
__ j(true_cond, &is_true);
__ bind(&is_false);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ jmp(&done);
__ bind(&is_true);
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
__ bind(&done);
}
@@ -1562,14 +1680,13 @@ void LCodeGen::DoIsObject(LIsObject* instr) {
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
+ Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
}
@@ -1580,11 +1697,10 @@ void LCodeGen::DoIsSmi(LIsSmi* instr) {
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ test(input, Immediate(kSmiTagMask));
- __ mov(result, Factory::true_value());
- NearLabel done;
- __ j(zero, &done);
- __ mov(result, Factory::false_value());
+ Label done;
+ __ mov(result, factory()->true_value());
+ __ JumpIfSmi(input, &done, Label::kNear);
+ __ mov(result, factory()->false_value());
__ bind(&done);
}
@@ -1600,6 +1716,42 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
+void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ Label false_label, done;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(input, &false_label, Label::kNear);
+ __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(result, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &false_label, Label::kNear);
+ __ mov(result, factory()->true_value());
+ __ jmp(&done);
+ __ bind(&false_label);
+ __ mov(result, factory()->false_value());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ EmitBranch(true_block, false_block, not_zero);
+}
+
+
static InstanceType TestType(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
@@ -1625,15 +1777,15 @@ void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ test(input, Immediate(kSmiTagMask));
- NearLabel done, is_false;
- __ j(zero, &is_false);
+ Label done, is_false;
+ __ JumpIfSmi(input, &is_false, Label::kNear);
__ CmpObjectType(input, TestType(instr->hydrogen()), result);
- __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
- __ mov(result, Factory::true_value());
- __ jmp(&done);
+ __ j(NegateCondition(BranchCondition(instr->hydrogen())),
+ &is_false, Label::kNear);
+ __ mov(result, factory()->true_value());
+ __ jmp(&done, Label::kNear);
__ bind(&is_false);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ bind(&done);
}
@@ -1647,25 +1799,37 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, false_label);
+ __ JumpIfSmi(input, false_label);
__ CmpObjectType(input, TestType(instr->hydrogen()), temp);
EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
}
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(input);
+ }
+
+ __ mov(result, FieldOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
__ test(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- NearLabel done;
- __ j(not_zero, &done);
- __ mov(result, Factory::false_value());
+ Label done;
+ __ j(zero, &done, Label::kNear);
+ __ mov(result, factory()->false_value());
__ bind(&done);
}
@@ -1679,7 +1843,7 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
__ test(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, not_equal);
+ EmitBranch(true_block, false_block, equal);
}
@@ -1693,28 +1857,28 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
Register temp2) {
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, is_false);
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+ __ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
// Map is now in temp.
// Functions have class 'Function'.
- __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+ __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ j(equal, is_true);
+ __ j(above_equal, is_true);
} else {
- __ j(equal, is_false);
+ __ j(above_equal, is_false);
}
// Check if the constructor in the map is a function.
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
@@ -1746,19 +1910,19 @@ void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
ASSERT(input.is(result));
Register temp = ToRegister(instr->TempAt(0));
Handle<String> class_name = instr->hydrogen()->class_name();
- NearLabel done;
+ Label done;
Label is_true, is_false;
EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
- __ j(not_equal, &is_false);
+ __ j(not_equal, &is_false, Label::kNear);
__ bind(&is_true);
- __ mov(result, Factory::true_value());
- __ jmp(&done);
+ __ mov(result, factory()->true_value());
+ __ jmp(&done, Label::kNear);
__ bind(&is_false);
- __ mov(result, Factory::false_value());
+ __ mov(result, factory()->false_value());
__ bind(&done);
}
@@ -1803,29 +1967,17 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
- NearLabel true_value, done;
+ Label true_value, done;
__ test(eax, Operand(eax));
- __ j(zero, &true_value);
- __ mov(ToRegister(instr->result()), Factory::false_value());
- __ jmp(&done);
+ __ j(zero, &true_value, Label::kNear);
+ __ mov(ToRegister(instr->result()), factory()->false_value());
+ __ jmp(&done, Label::kNear);
__ bind(&true_value);
- __ mov(ToRegister(instr->result()), Factory::true_value());
+ __ mov(ToRegister(instr->result()), factory()->true_value());
__ bind(&done);
}
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
- __ test(eax, Operand(eax));
- EmitBranch(true_block, false_block, zero);
-}
-
-
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
public:
@@ -1851,26 +2003,25 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register temp = ToRegister(instr->TempAt(0));
// A Smi is not an instance of anything.
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, &false_result, not_taken);
+ __ JumpIfSmi(object, &false_result);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
// instanceof stub.
- NearLabel cache_miss;
+ Label cache_miss;
Register map = ToRegister(instr->TempAt(0));
__ mov(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
- __ cmp(map, Factory::the_hole_value()); // Patched to cached map.
- __ j(not_equal, &cache_miss, not_taken);
- __ mov(eax, Factory::the_hole_value()); // Patched to either true or false.
+ __ cmp(map, factory()->the_hole_value()); // Patched to cached map.
+ __ j(not_equal, &cache_miss, Label::kNear);
+ __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
__ jmp(&done);
// The inlined call site cache did not match. Check for null and string
// before calling the deferred code.
__ bind(&cache_miss);
// Null is not an instance of anything.
- __ cmp(object, Factory::null_value());
+ __ cmp(object, factory()->null_value());
__ j(equal, &false_result);
// String values are not instances of anything.
@@ -1881,7 +2032,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ jmp(deferred->entry());
__ bind(&false_result);
- __ mov(ToRegister(instr->result()), Factory::false_value());
+ __ mov(ToRegister(instr->result()), factory()->false_value());
// Here result has either true or false. Deferred code also produces true or
// false object.
@@ -1912,8 +2063,6 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ mov(InstanceofStub::right(), Immediate(instr->function()));
static const int kAdditionalDelta = 16;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
- Label before_push_delta;
- __ bind(&before_push_delta);
__ mov(temp, Immediate(delta));
__ StoreToSafepointRegisterSlot(temp, temp);
CallCodeGeneric(stub.GetCode(),
@@ -1956,36 +2105,17 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
- NearLabel true_value, done;
+ Label true_value, done;
__ test(eax, Operand(eax));
- __ j(condition, &true_value);
- __ mov(ToRegister(instr->result()), Factory::false_value());
- __ jmp(&done);
+ __ j(condition, &true_value, Label::kNear);
+ __ mov(ToRegister(instr->result()), factory()->false_value());
+ __ jmp(&done, Label::kNear);
__ bind(&true_value);
- __ mov(ToRegister(instr->result()), Factory::true_value());
+ __ mov(ToRegister(instr->result()), factory()->true_value());
__ bind(&done);
}
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
-
- // The compare stub expects compare condition and the input operands
- // reversed for GT and LTE.
- Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ test(eax, Operand(eax));
- EmitBranch(true_block, false_block, condition);
-}
-
-
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace) {
// Preserve the return value on the stack and rely on the runtime call
@@ -1998,21 +2128,34 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ mov(esp, ebp);
__ pop(ebp);
- __ Ret((ParameterCount() + 1) * kPointerSize, ecx);
+ __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(result, Operand::Cell(instr->hydrogen()->cell()));
if (instr->hydrogen()->check_hole_value()) {
- __ cmp(result, Factory::the_hole_value());
+ __ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
}
-void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->global_object()).is(eax));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ __ mov(ecx, instr->name());
+ RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+ RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0));
Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
@@ -2021,7 +2164,7 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->check_hole_value()) {
- __ cmp(cell_operand, Factory::the_hole_value());
+ __ cmp(cell_operand, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
@@ -2030,6 +2173,19 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
}
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->global_object()).is(edx));
+ ASSERT(ToRegister(instr->value()).is(eax));
+
+ __ mov(ecx, instr->name());
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2050,7 +2206,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
@@ -2061,13 +2217,82 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name) {
+ LookupResult lookup;
+ type->LookupInDescriptors(NULL, *name, &lookup);
+ ASSERT(lookup.IsProperty() &&
+ (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+ if (lookup.type() == FIELD) {
+ int index = lookup.GetLocalFieldIndexFromMap(*type);
+ int offset = index * kPointerSize;
+ if (index < 0) {
+ // Negative property indices are in-object properties, indexed
+ // from the end of the fixed part of the object.
+ __ mov(result, FieldOperand(object, offset + type->instance_size()));
+ } else {
+ // Non-negative property indices are in the properties array.
+ __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+ }
+ } else {
+ Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+ LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ }
+}
+
+
+void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+
+ int map_count = instr->hydrogen()->types()->length();
+ Handle<String> name = instr->hydrogen()->name();
+ if (map_count == 0) {
+ ASSERT(instr->hydrogen()->need_generic());
+ __ mov(ecx, name);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ } else {
+ Label done;
+ for (int i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
+ Label next;
+ __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ __ j(not_equal, &next, Label::kNear);
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ jmp(&done, Label::kNear);
+ __ bind(&next);
+ }
+ Handle<Map> map = instr->hydrogen()->types()->last();
+ __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ if (instr->hydrogen()->need_generic()) {
+ Label generic;
+ __ j(not_equal, &generic, Label::kNear);
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ jmp(&done, Label::kNear);
+ __ bind(&generic);
+ __ mov(ecx, name);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ } else {
+ DeoptimizeIf(not_equal, instr->environment());
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ }
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2082,27 +2307,27 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
DeoptimizeIf(not_equal, instr->environment());
// Check whether the function has an instance prototype.
- NearLabel non_instance;
+ Label non_instance;
__ test_b(FieldOperand(result, Map::kBitFieldOffset),
1 << Map::kHasNonInstancePrototype);
- __ j(not_zero, &non_instance);
+ __ j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
__ mov(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
- __ cmp(Operand(result), Immediate(Factory::the_hole_value()));
+ __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
DeoptimizeIf(equal, instr->environment());
// If the function does not have an initial map, we're done.
- NearLabel done;
+ Label done;
__ CmpObjectType(result, MAP_TYPE, temp);
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
__ mov(result, FieldOperand(result, Map::kPrototypeOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in the function's map.
@@ -2119,26 +2344,40 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register input = ToRegister(instr->InputAt(0));
__ mov(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
- NearLabel done;
+ Label done, ok, fail;
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- __ j(equal, &done);
+ Immediate(factory()->fixed_array_map()));
+ __ j(equal, &done, Label::kNear);
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::pixel_array_map()));
- __ j(equal, &done);
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::fixed_cow_array_map()));
- __ Check(equal, "Check for fast elements or pixel array failed.");
+ Immediate(factory()->fixed_cow_array_map()));
+ __ j(equal, &done, Label::kNear);
+ Register temp((result.is(eax)) ? ebx : eax);
+ __ push(temp);
+ __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
+ __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
+ __ and_(temp, Map::kElementsKindMask);
+ __ shr(temp, Map::kElementsKindShift);
+ __ cmp(temp, JSObject::FAST_ELEMENTS);
+ __ j(equal, &ok, Label::kNear);
+ __ cmp(temp, JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ __ j(less, &fail, Label::kNear);
+ __ cmp(temp, JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ __ j(less_equal, &ok, Label::kNear);
+ __ bind(&fail);
+ __ Abort("Check for fast or external elements failed.");
+ __ bind(&ok);
+ __ pop(temp);
__ bind(&done);
}
}
-void LCodeGen::DoLoadPixelArrayExternalPointer(
- LLoadPixelArrayExternalPointer* instr) {
+void LCodeGen::DoLoadExternalArrayPointer(
+ LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->InputAt(0));
- __ mov(result, FieldOperand(input, PixelArray::kExternalPointerOffset));
+ __ mov(result, FieldOperand(input,
+ ExternalArray::kExternalPointerOffset));
}
@@ -2170,19 +2409,80 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
FixedArray::kHeaderSize));
// Check for the hole value.
- __ cmp(result, Factory::the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(result, factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
}
-void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- Register result = ToRegister(instr->result());
- ASSERT(result.is(external_pointer));
+Operand LCodeGen::BuildExternalArrayOperand(
+ LOperand* external_pointer,
+ LOperand* key,
+ JSObject::ElementsKind elements_kind) {
+ Register external_pointer_reg = ToRegister(external_pointer);
+ int shift_size = ElementsKindToShiftSize(elements_kind);
+ if (key->IsConstantOperand()) {
+ int constant_value = ToInteger32(LConstantOperand::cast(key));
+ if (constant_value & 0xF0000000) {
+ Abort("array index constant value too big");
+ }
+ return Operand(external_pointer_reg, constant_value * (1 << shift_size));
+ } else {
+ ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+ return Operand(external_pointer_reg, ToRegister(key), scale_factor, 0);
+ }
+}
- // Load the result.
- __ movzx_b(result, Operand(external_pointer, key, times_1, 0));
+
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+ LLoadKeyedSpecializedArrayElement* instr) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
+ instr->key(), elements_kind));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movdbl(ToDoubleRegister(instr->result()), operand);
+ } else {
+ Register result(ToRegister(instr->result()));
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ __ movsx_b(result, operand);
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ movzx_b(result, operand);
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ __ movsx_w(result, operand);
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movzx_w(result, operand);
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ __ mov(result, operand);
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ mov(result, operand);
+ __ test(result, Operand(result));
+ // TODO(danno): we could be more clever here, perhaps having a special
+ // version of the stub that detects if the overflow case actually
+ // happens, and generate code that returns a double rather than int.
+ DeoptimizeIf(negative, instr->environment());
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
}
@@ -2191,7 +2491,7 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(edx));
ASSERT(ToRegister(instr->key()).is(eax));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2200,16 +2500,16 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
// Check for arguments adapter frame.
- NearLabel done, adapted;
+ Label done, adapted;
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
__ cmp(Operand(result),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted);
+ __ j(equal, &adapted, Label::kNear);
// No arguments adaptor frame.
__ mov(result, Operand(ebp));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Arguments adaptor frame present.
__ bind(&adapted);
@@ -2225,12 +2525,12 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
Operand elem = ToOperand(instr->InputAt(0));
Register result = ToRegister(instr->result());
- NearLabel done;
+ Label done;
// If no arguments adaptor frame the number of arguments is fixed.
__ cmp(ebp, elem);
__ mov(result, Immediate(scope()->num_parameters()));
- __ j(equal, &done);
+ __ j(equal, &done, Label::kNear);
// Arguments adaptor frame present. Get argument length from there.
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2253,20 +2553,36 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
ASSERT(function.is(edi)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(eax));
- // If the receiver is null or undefined, we have to pass the global object
- // as a receiver.
- NearLabel global_object, receiver_ok;
- __ cmp(receiver, Factory::null_value());
- __ j(equal, &global_object);
- __ cmp(receiver, Factory::undefined_value());
- __ j(equal, &global_object);
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
+ Label global_object, receiver_ok;
+
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ mov(scratch,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &receiver_ok, Label::kNear);
+
+ // Do not transform the receiver to object for builtins.
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &receiver_ok, Label::kNear);
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ cmp(receiver, factory()->null_value());
+ __ j(equal, &global_object, Label::kNear);
+ __ cmp(receiver, factory()->undefined_value());
+ __ j(equal, &global_object, Label::kNear);
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr->environment());
- __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, scratch);
+ __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok);
+ __ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
// TODO(kmillikin): We have a hydrogen value for the global object. See
@@ -2274,6 +2590,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// here.
__ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
__ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
// Copy the arguments to this function possibly from the
@@ -2287,10 +2605,10 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Loop through the arguments pushing them onto the execution
// stack.
- NearLabel invoke, loop;
+ Label invoke, loop;
// length is a small non-negative integer, due to the test above.
__ test(length, Operand(length));
- __ j(zero, &invoke);
+ __ j(zero, &invoke, Label::kNear);
__ bind(&loop);
__ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
__ dec(length);
@@ -2306,8 +2624,9 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
SafepointGenerator safepoint_generator(this,
pointers,
env->deoptimization_index());
- v8::internal::ParameterCount actual(eax);
- __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+ ParameterCount actual(eax);
+ __ InvokeFunction(function, actual, CALL_FUNCTION,
+ safepoint_generator, CALL_AS_METHOD);
}
@@ -2321,6 +2640,12 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
}
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
__ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2330,8 +2655,8 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoOuterContext(LOuterContext* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ mov(result, Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ mov(result, FieldOperand(result, JSFunction::kContextOffset));
+ __ mov(result,
+ Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
@@ -2351,10 +2676,11 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int arity,
- LInstruction* instr) {
+ LInstruction* instr,
+ CallKind call_kind) {
// Change context if needed.
bool change_context =
- (graph()->info()->closure()->context() != function->context()) ||
+ (info()->closure()->context() != function->context()) ||
scope()->contains_with() ||
(scope()->num_heap_slots() > 0);
if (change_context) {
@@ -2373,7 +2699,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
RecordPosition(pointers->position());
// Invoke function.
- if (*function == *graph()->info()->closure()) {
+ __ SetCallKind(ecx, call_kind);
+ if (*function == *info()->closure()) {
__ CallSelf();
} else {
__ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
@@ -2387,14 +2714,17 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(edi, instr->function());
- CallKnownFunction(instr->function(), instr->arity(), instr);
+ CallKnownFunction(instr->function(),
+ instr->arity(),
+ instr,
+ CALL_AS_METHOD);
}
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
Register input_reg = ToRegister(instr->InputAt(0));
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory()->heap_number_map());
DeoptimizeIf(not_equal, instr->environment());
Label done;
@@ -2475,7 +2805,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- __ pxor(scratch, scratch);
+ __ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
__ pand(input_reg, scratch);
} else if (r.IsInteger32()) {
@@ -2485,8 +2815,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
new DeferredMathAbsTaggedHeapNumber(this, instr);
Register input_reg = ToRegister(instr->InputAt(0));
// Smi check.
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->entry());
+ __ JumpIfNotSmi(input_reg, deferred->entry());
EmitIntegerMathAbs(instr);
__ bind(deferred->exit());
}
@@ -2497,7 +2826,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
+ __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -2520,25 +2849,16 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ Label below_half, done;
// xmm_scratch = 0.5
ExternalReference one_half = ExternalReference::address_of_one_half();
__ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+ __ ucomisd(xmm_scratch, input_reg);
+ __ j(above, &below_half);
// input = input + 0.5
__ addsd(input_reg, xmm_scratch);
- // We need to return -0 for the input range [-0.5, 0[, otherwise
- // compute Math.floor(value + 0.5).
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below_equal, instr->environment());
- } else {
- // If we don't need to bailout on -0, we check only bailout
- // on negative inputs.
- __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below, instr->environment());
- }
// Compute Math.floor(value + 0.5).
// Use truncating instruction (OK because input is positive).
@@ -2547,6 +2867,27 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
// Overflow is signalled with minint.
__ cmp(output_reg, 0x80000000u);
DeoptimizeIf(equal, instr->environment());
+ __ jmp(&done);
+
+ __ bind(&below_half);
+
+ // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+ // we can ignore the difference between a result of -0 and +0.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // If the sign is positive, we return +0.
+ __ movmskpd(output_reg, input_reg);
+ __ test(output_reg, Immediate(1));
+ DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ // If the input is >= -0.5, we return +0.
+ __ mov(output_reg, Immediate(0xBF000000));
+ __ movd(xmm_scratch, Operand(output_reg));
+ __ cvtss2sd(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
+ DeoptimizeIf(below, instr->environment());
+ }
+ __ Set(output_reg, Immediate(0));
+ __ bind(&done);
}
@@ -2561,7 +2902,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ xorpd(xmm_scratch, xmm_scratch);
+ __ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
}
@@ -2572,13 +2913,15 @@ void LCodeGen::DoPower(LPower* instr) {
LOperand* right = instr->InputAt(1);
DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
+
if (exponent_type.IsDouble()) {
// It is safe to use ebx directly since the instruction is marked
// as a call.
__ PrepareCallCFunction(4, ebx);
__ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
__ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
- __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 4);
} else if (exponent_type.IsInteger32()) {
// It is safe to use ebx directly since the instruction is marked
// as a call.
@@ -2586,15 +2929,15 @@ void LCodeGen::DoPower(LPower* instr) {
__ PrepareCallCFunction(4, ebx);
__ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
__ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
- __ CallCFunction(ExternalReference::power_double_int_function(), 4);
+ __ CallCFunction(ExternalReference::power_double_int_function(isolate()),
+ 4);
} else {
ASSERT(exponent_type.IsTagged());
CpuFeatures::Scope scope(SSE2);
Register right_reg = ToRegister(right);
Label non_smi, call;
- __ test(right_reg, Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi);
+ __ JumpIfNotSmi(right_reg, &non_smi);
__ SmiUntag(right_reg);
__ cvtsi2sd(result_reg, Operand(right_reg));
__ jmp(&call);
@@ -2611,7 +2954,8 @@ void LCodeGen::DoPower(LPower* instr) {
__ PrepareCallCFunction(4, ebx);
__ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
__ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
- __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 4);
}
// Return value is in st(0) on ia32.
@@ -2624,10 +2968,32 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ Label positive, done, zero;
+ __ xorps(xmm0, xmm0);
+ __ ucomisd(input_reg, xmm0);
+ __ j(above, &positive, Label::kNear);
+ __ j(equal, &zero, Label::kNear);
+ ExternalReference nan = ExternalReference::address_of_nan();
+ __ movdbl(input_reg, Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
+ __ bind(&zero);
+ __ push(Immediate(0xFFF00000));
+ __ push(Immediate(0));
+ __ movdbl(input_reg, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive);
+ __ fldln2();
+ __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ movdbl(Operand(esp, 0), input_reg);
+ __ fld_d(Operand(esp, 0));
+ __ fyl2x();
+ __ fstp_d(Operand(esp, 0));
+ __ movdbl(input_reg, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+ __ bind(&done);
}
@@ -2680,13 +3046,29 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->function()).is(edi));
+ ASSERT(instr->HasPointerMap());
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator generator(this, pointers, env->deoptimization_index());
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+}
+
+
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic = isolate()->stub_cache()->
+ ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2696,9 +3078,11 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
__ mov(ecx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
}
@@ -2707,7 +3091,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
__ Drop(1);
}
@@ -2718,16 +3102,18 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
__ mov(ecx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
}
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(edi, instr->target());
- CallKnownFunction(instr->target(), instr->arity(), instr);
+ CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -2736,7 +3122,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
- Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
__ Set(eax, Immediate(instr->arity()));
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr, CONTEXT_ADJUSTED);
}
@@ -2783,9 +3169,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic(Builtins::builtin(
- info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2796,22 +3182,42 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
-void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- Register value = ToRegister(instr->value());
- ASSERT(ToRegister(instr->TempAt(0)).is(eax));
-
- __ mov(eax, value);
- { // Clamp the value to [0..255].
- NearLabel done;
- __ test(eax, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, eax); // 1 if negative, 0 if positive.
- __ dec_b(eax); // 0 if negative, 255 if positive.
- __ bind(&done);
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+ LStoreKeyedSpecializedArrayElement* instr) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
+ instr->key(), elements_kind));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
+ __ movss(operand, xmm0);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movdbl(operand, ToDoubleRegister(instr->value()));
+ } else {
+ Register value = ToRegister(instr->value());
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ __ mov_b(operand, value);
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ mov_w(operand, value);
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ mov(operand, value);
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
}
- __ mov_b(Operand(external_pointer, key, times_1, 0), eax);
}
@@ -2853,9 +3259,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->value()).is(eax));
- Handle<Code> ic(Builtins::builtin(
- info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2894,7 +3300,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
- NearLabel flat_string, ascii_string, done;
+ Label flat_string, ascii_string, done;
// Fetch the instance type of the receiver into result register.
__ mov(result, FieldOperand(string, HeapObject::kMapOffset));
@@ -2903,7 +3309,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
// We need special handling for non-flat strings.
STATIC_ASSERT(kSeqStringTag == 0);
__ test(result, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
+ __ j(zero, &flat_string, Label::kNear);
// Handle non-flat strings.
__ test(result, Immediate(kIsConsStringMask));
@@ -2915,7 +3321,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
// the case we would rather go to the runtime system now to flatten
// the string.
__ cmp(FieldOperand(string, ConsString::kSecondOffset),
- Immediate(Factory::empty_string()));
+ Immediate(factory()->empty_string()));
__ j(not_equal, deferred->entry());
// Get the first of the two strings and load its instance type.
__ mov(string, FieldOperand(string, ConsString::kFirstOffset));
@@ -2930,7 +3336,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
__ bind(&flat_string);
STATIC_ASSERT(kAsciiStringTag != 0);
__ test(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
+ __ j(not_zero, &ascii_string, Label::kNear);
// Two-byte string.
// Load the two-byte character code into the result register.
@@ -2946,7 +3352,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
times_2,
SeqTwoByteString::kHeaderSize));
}
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// ASCII string.
// Load the byte into the result register.
@@ -2996,6 +3402,53 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
}
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode: public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new DeferredStringCharFromCode(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+ ASSERT(!char_code.is(result));
+
+ __ cmp(char_code, String::kMaxAsciiCharCode);
+ __ j(above, deferred->entry());
+ __ Set(result, Immediate(factory()->single_character_string_cache()));
+ __ mov(result, FieldOperand(result,
+ char_code, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(result, factory()->undefined_value());
+ __ j(equal, deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, Immediate(0));
+
+ PushSafepointRegistersScope scope(this);
+ __ SmiTag(char_code);
+ __ push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
void LCodeGen::DoStringLength(LStringLength* instr) {
Register string = ToRegister(instr->string());
Register result = ToRegister(instr->result());
@@ -3003,6 +3456,22 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
}
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ if (instr->left()->IsConstantOperand()) {
+ __ push(ToImmediate(instr->left()));
+ } else {
+ __ push(ToOperand(instr->left()));
+ }
+ if (instr->right()->IsConstantOperand()) {
+ __ push(ToImmediate(instr->right()));
+ } else {
+ __ push(ToOperand(instr->right()));
+ }
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+}
+
+
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot());
@@ -3044,13 +3513,13 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
// the value in there. If that fails, call the runtime system.
- NearLabel done;
+ Label done;
__ SmiUntag(reg);
__ xor_(reg, 0x80000000);
__ cvtsi2sd(xmm0, Operand(reg));
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, no_reg, &slow);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
}
// Slow case: Call the runtime system to do the number allocation.
@@ -3131,32 +3600,35 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
+ bool deoptimize_on_undefined,
LEnvironment* env) {
- NearLabel load_smi, heap_number, done;
+ Label load_smi, done;
// Smi check.
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, not_taken);
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(equal, &heap_number);
+ factory()->heap_number_map());
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(not_equal, env);
+ } else {
+ Label heap_number;
+ __ j(equal, &heap_number, Label::kNear);
- __ cmp(input_reg, Factory::undefined_value());
- DeoptimizeIf(not_equal, env);
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, env);
- // Convert undefined to NaN.
- __ push(input_reg);
- __ mov(input_reg, Factory::nan_value());
- __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ pop(input_reg);
- __ jmp(&done);
+ // Convert undefined to NaN.
+ ExternalReference nan = ExternalReference::address_of_nan();
+ __ movdbl(result_reg, Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
+ __ bind(&heap_number);
+ }
// Heap number to XMM conversion.
- __ bind(&heap_number);
__ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Smi to XMM conversion
__ bind(&load_smi);
@@ -3178,26 +3650,26 @@ class DeferredTaggedToI: public LDeferredCode {
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- NearLabel done, heap_number;
+ Label done, heap_number;
Register input_reg = ToRegister(instr->InputAt(0));
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory()->heap_number_map());
if (instr->truncating()) {
- __ j(equal, &heap_number);
+ __ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
- __ cmp(input_reg, Factory::undefined_value());
+ __ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
__ mov(input_reg, 0);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&heap_number);
if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatures::Scope scope(SSE3);
- NearLabel convert;
+ Label convert;
// Use more powerful conversion when sse3 is available.
// Load x87 register with heap number.
__ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
@@ -3207,7 +3679,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
const uint32_t kTooBigExponent =
(HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
__ cmp(Operand(input_reg), Immediate(kTooBigExponent));
- __ j(less, &convert);
+ __ j(less, &convert, Label::kNear);
// Pop FPU stack before deoptimizing.
__ ffree(0);
__ fincstp();
@@ -3221,7 +3693,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
__ add(Operand(esp), Immediate(kDoubleSize));
} else {
- NearLabel deopt;
XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
@@ -3268,8 +3739,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
// Smi check.
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->entry());
+ __ JumpIfNotSmi(input_reg, deferred->entry());
// Smi to int32 conversion
__ SmiUntag(input_reg); // Untag smi.
@@ -3287,7 +3757,9 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
- EmitNumberUntagD(input_reg, result_reg, instr->environment());
+ EmitNumberUntagD(input_reg, result_reg,
+ instr->hydrogen()->deoptimize_on_undefined(),
+ instr->environment());
}
@@ -3308,8 +3780,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
if (CpuFeatures::IsSupported(SSE3)) {
// This will deoptimize if the exponent of the input in out of range.
CpuFeatures::Scope scope(SSE3);
- NearLabel convert, done;
- __ j(not_equal, &done);
+ Label convert, done;
+ __ j(not_equal, &done, Label::kNear);
__ sub(Operand(esp), Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), input_reg);
// Get exponent alone and check for too-big exponent.
@@ -3318,7 +3790,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
const uint32_t kTooBigExponent =
(HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
__ cmp(Operand(result_reg), Immediate(kTooBigExponent));
- __ j(less, &convert);
+ __ j(less, &convert, Label::kNear);
__ add(Operand(esp), Immediate(kDoubleSize));
DeoptimizeIf(no_condition, instr->environment());
__ bind(&convert);
@@ -3329,13 +3801,13 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
} else {
- NearLabel done;
+ Label done;
Register temp_reg = ToRegister(instr->TempAt(0));
XMMRegister xmm_scratch = xmm0;
// If cvttsd2si succeeded, we're done. Otherwise, we attempt
// manual conversion.
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
// Get high 32 bits of the input in result_reg and temp_reg.
__ pshufd(xmm_scratch, input_reg, 1);
@@ -3385,7 +3857,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ bind(&done);
}
} else {
- NearLabel done;
+ Label done;
__ cvttsd2si(result_reg, Operand(input_reg));
__ cvtsi2sd(xmm0, Operand(result_reg));
__ ucomisd(xmm0, input_reg);
@@ -3395,7 +3867,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
// The integer converted back is equal to the original. We
// only have to test if we got -0 as an input.
__ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done);
+ __ j(not_zero, &done, Label::kNear);
__ movmskpd(result_reg, input_reg);
// Bit 0 contains the sign of the double in input_reg.
// If input was positive, we are ok and return 0, otherwise
@@ -3410,38 +3882,58 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
__ test(ToRegister(input), Immediate(kSmiTagMask));
- DeoptimizeIf(instr->condition(), instr->environment());
+ DeoptimizeIf(not_zero, instr->environment());
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ LOperand* input = instr->InputAt(0);
+ __ test(ToRegister(input), Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
- InstanceType first = instr->hydrogen()->first();
- InstanceType last = instr->hydrogen()->last();
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- // If there is only one type in the interval check for equality.
- if (first == last) {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(first));
- DeoptimizeIf(not_equal, instr->environment());
- } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
- // String has a dedicated bit in instance type.
- __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), kIsNotStringMask);
- DeoptimizeIf(not_zero, instr->environment());
- } else {
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first;
+ InstanceType last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(first));
- DeoptimizeIf(below, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(last));
- DeoptimizeIf(above, instr->environment());
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ DeoptimizeIf(not_equal, instr->environment());
+ } else {
+ DeoptimizeIf(below, instr->environment());
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+ static_cast<int8_t>(last));
+ DeoptimizeIf(above, instr->environment());
+ }
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT(tag == 0 || IsPowerOf2(tag));
+ __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+ } else {
+ __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ and_(temp, mask);
+ __ cmpb(Operand(temp), tag);
+ DeoptimizeIf(not_equal, instr->environment());
}
}
}
@@ -3465,10 +3957,58 @@ void LCodeGen::DoCheckMap(LCheckMap* instr) {
}
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ ASSERT(instr->unclamped()->Equals(instr->result()));
+ Register value_reg = ToRegister(instr->result());
+ __ ClampUint8(value_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ ASSERT(instr->unclamped()->Equals(instr->result()));
+ Register input_reg = ToRegister(instr->unclamped());
+ Label is_smi, done, heap_number;
+
+ __ JumpIfSmi(input_reg, &is_smi);
+
+ // Check for heap number
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, instr->environment());
+ __ mov(input_reg, 0);
+ __ jmp(&done, Label::kNear);
+
+ // Heap number
+ __ bind(&heap_number);
+ __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
+ __ jmp(&done, Label::kNear);
+
+ // smi
+ __ bind(&is_smi);
+ __ SmiUntag(input_reg);
+ __ ClampUint8(input_reg);
+
+ __ bind(&done);
+}
+
+
void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (Heap::InNewSpace(*object)) {
+ if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
- Factory::NewJSGlobalPropertyCell(object);
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
__ mov(result, Operand::Cell(cell));
} else {
__ mov(result, object);
@@ -3538,7 +4078,13 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(instr->hydrogen()->constant_properties()));
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+ int flags = instr->hydrogen()->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= instr->hydrogen()->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ push(Immediate(Smi::FromInt(flags)));
// Pick the right runtime function to call.
if (instr->hydrogen()->depth() > 1) {
@@ -3552,8 +4098,15 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
}
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->InputAt(0)).is(eax));
+ __ push(eax);
+ CallRuntime(Runtime::kToFastProperties, 1, instr, CONTEXT_ADJUSTED);
+}
+
+
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- NearLabel materialized;
+ Label materialized;
// Registers will be used as follows:
// edi = JS function.
// ecx = literals array.
@@ -3564,8 +4117,8 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
int literal_offset = FixedArray::kHeaderSize +
instr->hydrogen()->literal_index() * kPointerSize;
__ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, Factory::undefined_value());
- __ j(not_equal, &materialized);
+ __ cmp(ebx, factory()->undefined_value());
+ __ j(not_equal, &materialized, Label::kNear);
// Create regexp literal using runtime function
// Result will be in eax.
@@ -3609,16 +4162,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
- if (shared_info->num_literals() == 0 && !pretenure) {
- FastNewClosureStub stub;
+ if (!pretenure && shared_info->num_literals() == 0) {
+ FastNewClosureStub stub(
+ shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
__ push(Immediate(shared_info));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
} else {
__ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(shared_info));
__ push(Immediate(pretenure
- ? Factory::true_value()
- : Factory::false_value()));
+ ? factory()->true_value()
+ : factory()->false_value()));
CallRuntime(Runtime::kNewClosure, 3, instr, RESTORE_CONTEXT);
}
}
@@ -3640,19 +4194,19 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
- NearLabel done;
+ Label done;
Condition final_branch_condition = EmitTypeofIs(&true_label,
&false_label,
input,
instr->type_literal());
- __ j(final_branch_condition, &true_label);
+ __ j(final_branch_condition, &true_label, Label::kNear);
__ bind(&false_label);
- __ mov(result, Factory::false_value());
- __ jmp(&done);
+ __ mov(result, factory()->false_value());
+ __ jmp(&done, Label::kNear);
__ bind(&true_label);
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
__ bind(&done);
}
@@ -3679,66 +4233,54 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Register input,
Handle<String> type_name) {
Condition final_branch_condition = no_condition;
- if (type_name->Equals(Heap::number_symbol())) {
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, true_label);
+ if (type_name->Equals(heap()->number_symbol())) {
+ __ JumpIfSmi(input, true_label);
__ cmp(FieldOperand(input, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory()->heap_number_map());
final_branch_condition = equal;
- } else if (type_name->Equals(Heap::string_symbol())) {
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, false_label);
- __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+ } else if (type_name->Equals(heap()->string_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
+ __ j(above_equal, false_label);
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
- __ j(not_zero, false_label);
- __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
- final_branch_condition = below;
+ final_branch_condition = zero;
- } else if (type_name->Equals(Heap::boolean_symbol())) {
- __ cmp(input, Factory::true_value());
+ } else if (type_name->Equals(heap()->boolean_symbol())) {
+ __ cmp(input, factory()->true_value());
__ j(equal, true_label);
- __ cmp(input, Factory::false_value());
+ __ cmp(input, factory()->false_value());
final_branch_condition = equal;
- } else if (type_name->Equals(Heap::undefined_symbol())) {
- __ cmp(input, Factory::undefined_value());
+ } else if (type_name->Equals(heap()->undefined_symbol())) {
+ __ cmp(input, factory()->undefined_value());
__ j(equal, true_label);
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, false_label);
+ __ JumpIfSmi(input, false_label);
// Check for undetectable objects => true.
__ mov(input, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
final_branch_condition = not_zero;
- } else if (type_name->Equals(Heap::function_symbol())) {
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, false_label);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
- // Regular expressions => 'function' (they are callable).
- __ CmpInstanceType(input, JS_REGEXP_TYPE);
- final_branch_condition = equal;
+ } else if (type_name->Equals(heap()->function_symbol())) {
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ __ JumpIfSmi(input, false_label);
+ __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
+ final_branch_condition = above_equal;
- } else if (type_name->Equals(Heap::object_symbol())) {
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, false_label);
- __ cmp(input, Factory::null_value());
+ } else if (type_name->Equals(heap()->object_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ cmp(input, factory()->null_value());
__ j(equal, true_label);
- // Regular expressions => 'function', not 'object'.
- __ CmpObjectType(input, JS_REGEXP_TYPE, input);
- __ j(equal, false_label);
+ __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
+ __ j(below, false_label);
+ __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, false_label);
// Check for undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
- __ j(not_zero, false_label);
- // Check for JS objects => true.
- __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
- __ j(below, false_label);
- __ CmpInstanceType(input, LAST_JS_OBJECT_TYPE);
- final_branch_condition = below_equal;
+ final_branch_condition = zero;
} else {
final_branch_condition = not_equal;
@@ -3752,18 +4294,17 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
Register result = ToRegister(instr->result());
- NearLabel true_label;
- NearLabel false_label;
- NearLabel done;
+ Label true_label;
+ Label done;
EmitIsConstructCall(result);
- __ j(equal, &true_label);
+ __ j(equal, &true_label, Label::kNear);
- __ mov(result, Factory::false_value());
- __ jmp(&done);
+ __ mov(result, factory()->false_value());
+ __ jmp(&done, Label::kNear);
__ bind(&true_label);
- __ mov(result, Factory::true_value());
+ __ mov(result, factory()->true_value());
__ bind(&done);
}
@@ -3784,10 +4325,10 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
__ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
- NearLabel check_frame_marker;
+ Label check_frame_marker;
__ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker);
+ __ j(not_equal, &check_frame_marker, Label::kNear);
__ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
@@ -3830,20 +4371,49 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
env->deoptimization_index());
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this);
+ CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
}
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- // Perform stack overflow check.
- NearLabel done;
- ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &done);
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ private:
+ LStackCheck* instr_;
+ };
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
- __ bind(&done);
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &done, Label::kNear);
+
+ StackCheckStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new DeferredStackCheck(this, instr);
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(below, deferred_stack_check->entry());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ }
}
@@ -3864,6 +4434,35 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
}
+void LCodeGen::DoIn(LIn* instr) {
+ LOperand* obj = instr->object();
+ LOperand* key = instr->key();
+ if (key->IsConstantOperand()) {
+ __ push(ToImmediate(key));
+ } else {
+ __ push(ToOperand(key));
+ }
+ if (obj->IsConstantOperand()) {
+ __ push(ToImmediate(obj));
+ } else {
+ __ push(ToOperand(obj));
+ }
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ // Create safepoint generator that will also ensure enough space in the
+ // reloc info for patching in deoptimization (since this is invoking a
+ // builtin)
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index());
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 681ea77e5..defbcab0a 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -56,7 +56,7 @@ class LCodeGen BASE_EMBEDDED {
deoptimizations_(4),
deoptimization_literals_(8),
inlined_function_count_(0),
- scope_(chunk->graph()->info()->scope()),
+ scope_(info->scope()),
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
@@ -68,6 +68,10 @@ class LCodeGen BASE_EMBEDDED {
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info_->isolate(); }
+ Factory* factory() const { return isolate()->factory(); }
+ Heap* heap() const { return isolate()->heap(); }
// Support for converting LOperands to assembler types.
Operand ToOperand(LOperand* op) const;
@@ -93,13 +97,15 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
+ void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -125,7 +131,7 @@ class LCodeGen BASE_EMBEDDED {
bool is_aborted() const { return status_ == ABORTED; }
int strict_mode_flag() const {
- return info_->is_strict() ? kStrictMode : kNonStrictMode;
+ return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
LChunk* chunk() const { return chunk_; }
@@ -142,8 +148,8 @@ class LCodeGen BASE_EMBEDDED {
Register temporary,
Register temporary2);
- int StackSlotCount() const { return chunk()->spill_slot_count(); }
- int ParameterCount() const { return scope()->num_parameters(); }
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* format, ...);
void Comment(const char* format, ...);
@@ -181,7 +187,7 @@ class LCodeGen BASE_EMBEDDED {
ContextMode context_mode,
SafepointMode safepoint_mode);
- void CallRuntime(Runtime::Function* fun,
+ void CallRuntime(const Runtime::Function* fun,
int argc,
LInstruction* instr,
ContextMode context_mode);
@@ -190,7 +196,7 @@ class LCodeGen BASE_EMBEDDED {
int argc,
LInstruction* instr,
ContextMode context_mode) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, argc, instr, context_mode);
}
@@ -202,7 +208,8 @@ class LCodeGen BASE_EMBEDDED {
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
- LInstruction* instr);
+ LInstruction* instr,
+ CallKind call_kind);
void LoadHeapObject(Register result, Handle<HeapObject> object);
@@ -223,6 +230,9 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
int ToInteger32(LConstantOperand* op) const;
+ Operand BuildExternalArrayOperand(LOperand* external_pointer,
+ LOperand* key,
+ JSObject::ElementsKind elements_kind);
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -248,10 +258,13 @@ class LCodeGen BASE_EMBEDDED {
void RecordPosition(int position);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+ void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(Register input, XMMRegister result, LEnvironment* env);
+ void EmitNumberUntagD(Register input,
+ XMMRegister result,
+ bool deoptimize_on_undefined,
+ LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -264,7 +277,6 @@ class LCodeGen BASE_EMBEDDED {
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object);
@@ -272,6 +284,10 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
+ void EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name);
LChunk* const chunk_;
MacroAssembler* const masm_;
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index 45c790f3f..9d91c6184 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -25,6 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "ia32/lithium-gap-resolver-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
@@ -305,12 +309,15 @@ void LGapResolver::EmitMove(int index) {
__ mov(dst, src);
} else if (source->IsDoubleRegister()) {
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
XMMRegister src = cgen_->ToDoubleRegister(source);
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
-
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movaps(dst, src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ __ movdbl(dst, src);
+ }
} else if (source->IsDoubleStackSlot()) {
ASSERT(destination->IsDoubleRegister() ||
destination->IsDoubleStackSlot());
@@ -387,13 +394,19 @@ void LGapResolver::EmitSwap(int index) {
__ mov(dst, tmp1);
__ mov(src, tmp0);
}
+ } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ // XMM register-register swap. We rely on having xmm0
+ // available as a fixed scratch register.
+ XMMRegister src = cgen_->ToDoubleRegister(source);
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movaps(xmm0, src);
+ __ movaps(src, dst);
+ __ movaps(dst, xmm0);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
- // XMM register-register or register-memory. We rely on having xmm0
+ // XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
- ASSERT(source->IsDoubleRegister() || source->IsDoubleStackSlot());
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
+ ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
? source
: destination);
@@ -458,3 +471,5 @@ void LGapResolver::EmitSwap(int index) {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index ece0ab3d6..40e4badbe 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -71,22 +71,21 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
#ifdef DEBUG
void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as
- // temporaries and outputs because all registers
- // are blocked by the calling convention.
- // Inputs must use a fixed register.
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
}
- for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
#endif
@@ -114,21 +113,18 @@ void LInstruction::PrintTo(StringStream* stream) {
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- inputs_.PrintOperandsTo(stream);
+ for (int i = 0; i < inputs_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ inputs_[i]->PrintTo(stream);
+ }
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
- for (int i = 0; i < N; i++) {
+ for (int i = 0; i < results_.length(); i++) {
if (i > 0) stream->Add(" ");
- elems_[i]->PrintTo(stream);
+ results_[i]->PrintTo(stream);
}
}
@@ -240,6 +236,13 @@ void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
}
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
@@ -303,6 +306,15 @@ void LStoreContextSlot::PrintDataTo(StringStream* stream) {
}
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" ");
+ InputAt(1)->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[ecx] #%d / ", arity());
}
@@ -379,8 +391,7 @@ void LChunk::MarkEmptyBlocks() {
LLabel* label = LLabel::cast(first_instr);
if (last_instr->IsGoto()) {
LGoto* goto_instr = LGoto::cast(last_instr);
- if (!goto_instr->include_stack_check() &&
- label->IsRedundant() &&
+ if (label->IsRedundant() &&
!label->is_loop_header()) {
bool can_eliminate = true;
for (int i = first + 1; i < last && can_eliminate; ++i) {
@@ -441,7 +452,7 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LGap* gap = new LGap(block);
+ LInstructionGap* gap = new LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap);
@@ -469,7 +480,7 @@ int LChunk::GetParameterStackSlot(int index) const {
// shift all parameter indexes down by the number of parameters, and
// make sure they end up negative so they are distinguishable from
// spill slots.
- int result = index - graph()->info()->scope()->num_parameters() - 1;
+ int result = index - info()->scope()->num_parameters() - 1;
ASSERT(result < 0);
return result;
}
@@ -477,7 +488,7 @@ int LChunk::GetParameterStackSlot(int index) const {
// A parameter relative to ebp in the arguments stub.
int LChunk::ParameterAt(int index) {
ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + graph()->info()->scope()->num_parameters() - index) *
+ return (1 + info()->scope()->num_parameters() - index) *
kPointerSize;
}
@@ -516,7 +527,7 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new LChunk(graph());
+ chunk_ = new LChunk(info(), graph());
HPhase phase("Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -533,8 +544,8 @@ LChunk* LChunkBuilder::Build() {
void LChunkBuilder::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
- SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
- PrintF("Aborting LChunk building in @\"%s\": ", *debug_name);
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LChunk building in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
@@ -789,6 +800,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new LDeoptimize);
}
@@ -844,24 +860,22 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
right = UseFixed(right_value, ecx);
}
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool can_deopt = (op == Token::SHR && constant_value == 0);
- if (can_deopt) {
- bool can_truncate = true;
- for (int i = 0; i < instr->uses()->length(); i++) {
- if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
- can_truncate = false;
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ bool may_deopt = (op == Token::SHR && constant_value == 0);
+ bool does_deopt = false;
+ if (may_deopt) {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
break;
}
}
- can_deopt = !can_truncate;
}
- LShiftI* result = new LShiftI(op, left, right, can_deopt);
- return can_deopt
- ? AssignEnvironment(DefineSameAsFirst(result))
- : DefineSameAsFirst(result);
+ LInstruction* result =
+ DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
}
@@ -870,18 +884,11 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- if (op == Token::MOD) {
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new LArithmeticD(op, left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-
- } else {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
- }
+ ASSERT(op != Token::MOD);
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
}
@@ -1011,6 +1018,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
outer);
int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
+ if (hydrogen_env->is_special_index(i)) continue;
+
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
@@ -1028,116 +1037,88 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- return (instr->include_stack_check())
- ? AssignPointerMap(result)
- : result;
+ return new LGoto(instr->FirstSuccessor()->block_id());
}
LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HValue* v = instr->value();
- if (v->EmitAtUses()) {
- if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister(),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- Token::Value op = compare->token();
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
-
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right));
- } else if (r.IsDouble()) {
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
-
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- bool reversed = op == Token::GT || op == Token::LTE;
- LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
- LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
- LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
- right_operand);
- return MarkAsCall(result, instr);
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- // We only need a temp register for non-strict compare.
- LOperand* temp = compare->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
- temp);
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
- temp1,
- temp2);
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsInstanceOf()) {
- HInstanceOf* instance_of = HInstanceOf::cast(v);
- LOperand* left = UseFixed(instance_of->left(), InstanceofStub::left());
- LOperand* right = UseFixed(instance_of->right(), InstanceofStub::right());
- LOperand* context = UseFixed(instance_of->context(), esi);
- LInstanceOfAndBranch* result =
- new LInstanceOfAndBranch(context, left, right);
- return MarkAsCall(result, instr);
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
+ if (!v->EmitAtUses()) return new LBranch(UseRegisterAtStart(v));
+ ASSERT(!v->HasSideEffects());
+ if (v->IsClassOfTest()) {
+ HClassOfTest* compare = HClassOfTest::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+ TempRegister(),
+ TempRegister());
+ } else if (v->IsCompare()) {
+ HCompare* compare = HCompare::cast(v);
+ HValue* left = compare->left();
+ HValue* right = compare->right();
+ Representation r = compare->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(left->representation().IsInteger32());
+ ASSERT(right->representation().IsInteger32());
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseOrConstantAtStart(right));
} else {
- if (v->IsConstant()) {
- if (HConstant::cast(v)->handle()->IsTrue()) {
- return new LGoto(instr->FirstSuccessor()->block_id());
- } else if (HConstant::cast(v)->handle()->IsFalse()) {
- return new LGoto(instr->SecondSuccessor()->block_id());
- }
- }
- Abort("Undefined compare before branch");
- return NULL;
+ ASSERT(r.IsDouble());
+ ASSERT(left->representation().IsDouble());
+ ASSERT(right->representation().IsDouble());
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
}
+ } else if (v->IsIsSmi()) {
+ HIsSmi* compare = HIsSmi::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsSmiAndBranch(Use(compare->value()));
+ } else if (v->IsIsUndetectable()) {
+ HIsUndetectable* compare = HIsUndetectable::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
+ TempRegister());
+ } else if (v->IsHasInstanceType()) {
+ HHasInstanceType* compare = HHasInstanceType::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
+ TempRegister());
+ } else if (v->IsHasCachedArrayIndex()) {
+ HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(compare->value()));
+ } else if (v->IsIsNull()) {
+ HIsNull* compare = HIsNull::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ // We only need a temp register for non-strict compare.
+ LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), temp);
+ } else if (v->IsIsObject()) {
+ HIsObject* compare = HIsObject::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegister(compare->value()), temp);
+ } else if (v->IsCompareObjectEq()) {
+ HCompareObjectEq* compare = HCompareObjectEq::cast(v);
+ return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+ UseRegisterAtStart(compare->right()));
+ } else if (v->IsCompareConstantEq()) {
+ HCompareConstantEq* compare = HCompareConstantEq::cast(v);
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(compare->value()));
+ } else if (v->IsTypeofIs()) {
+ HTypeofIs* typeof_is = HTypeofIs::cast(v);
+ return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
+ } else if (v->IsIsConstructCall()) {
+ return new LIsConstructCallAndBranch(TempRegister());
+ } else if (v->IsConstant()) {
+ HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+ ? instr->FirstSuccessor()
+ : instr->SecondSuccessor();
+ return new LGoto(successor->block_id());
+ } else {
+ Abort("Undefined compare before branch");
+ return NULL;
}
- return new LBranch(UseRegisterAtStart(v));
}
@@ -1194,13 +1175,18 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
++argument_count_;
- LOperand* argument = UseOrConstant(instr->argument());
+ LOperand* argument = UseAny(instr->argument());
return new LPushArgument(argument);
}
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return DefineAsRegister(new LContext);
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
@@ -1229,9 +1215,24 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
}
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* function = UseFixed(instr->function(), edi);
+ argument_count_ -= instr->argument_count();
+ LInvokeFunction* result = new LInvokeFunction(context, function);
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos) {
+ if (op == kMathLog) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ return DefineSameAsFirst(result);
+ } else if (op == kMathSin || op == kMathCos) {
LOperand* input = UseFixedDouble(instr->value(), xmm1);
LUnaryMathOperation* result = new LUnaryMathOperation(input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
@@ -1373,13 +1374,23 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* value = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new LModI(value, divisor, temp);
- LInstruction* result = DefineFixed(mod, edx);
+
+ LInstruction* result;
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
+ result = DefineSameAsFirst(mod);
+ } else {
+ // The temporary operand is necessary to ensure that right is
+ // not allocated into edx.
+ LOperand* temp = FixedTemp(edx);
+ LOperand* value = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LModI* mod = new LModI(value, divisor, temp);
+ result = DefineFixed(mod, edx);
+ }
+
return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
instr->CheckFlag(HValue::kCanBeDivByZero))
? AssignEnvironment(result)
@@ -1391,8 +1402,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
// We call a C function for double modulo. It can't trigger a GC.
// We need to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm1);
- LOperand* right = UseFixedDouble(instr->right(), xmm2);
+ LOperand* left = UseFixedDouble(instr->left(), xmm2);
+ LOperand* right = UseFixedDouble(instr->right(), xmm1);
LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
}
@@ -1505,15 +1516,21 @@ LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
}
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
+ LCmpObjectEq* result = new LCmpObjectEq(left, right);
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoCompareConstantEq(
+ HCompareConstantEq* instr) {
+ LOperand* left = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LCmpConstantEq(left));
+}
+
+
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1526,7 +1543,7 @@ LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegister(instr->value());
- return DefineAsRegister(new LIsObject(value, TempRegister()));
+ return DefineAsRegister(new LIsObject(value));
}
@@ -1538,6 +1555,14 @@ LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
}
+LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LIsUndetectable(value));
+}
+
+
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1548,8 +1573,10 @@ LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
HGetCachedArrayIndex* instr) {
- Abort("Unimplemented: %s", "DoGetCachedArrayIndex");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LGetCachedArrayIndex(value));
}
@@ -1582,9 +1609,16 @@ LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
}
-LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
+LInstruction* LChunkBuilder::DoExternalArrayLength(
+ HExternalArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LPixelArrayLength(array));
+ return DefineAsRegister(new LExternalArrayLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+ LOperand* object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LElementsKind(object));
}
@@ -1614,6 +1648,19 @@ LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
}
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
@@ -1627,8 +1674,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
+ bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp =
- (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
+ (truncating && CpuFeatures::IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);
@@ -1648,8 +1696,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
- bool needs_temp = instr->CanTruncateToInt32() &&
- !CpuFeatures::IsSupported(SSE3);
+ bool truncating = instr->CanTruncateToInt32();
+ bool needs_temp = truncating && !CpuFeatures::IsSupported(SSE3);
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
@@ -1677,7 +1725,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value, zero));
+ return AssignEnvironment(new LCheckNonSmi(value));
}
@@ -1698,7 +1746,7 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value, not_zero));
+ return AssignEnvironment(new LCheckSmi(value));
}
@@ -1715,6 +1763,55 @@ LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
}
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ if (input_rep.IsDouble()) {
+ LOperand* reg = UseRegister(value);
+ return DefineAsRegister(new LClampDToUint8(reg));
+ } else if (input_rep.IsInteger32()) {
+ LOperand* reg = UseFixed(value, eax);
+ return DefineFixed(new LClampIToUint8(reg), eax);
+ } else {
+ ASSERT(input_rep.IsTagged());
+ LOperand* reg = UseFixed(value, eax);
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve xmm1 explicitly.
+ LOperand* temp = FixedTemp(xmm1);
+ LClampTToUint8* result = new LClampTToUint8(reg, temp);
+ return AssignEnvironment(DefineFixed(result, eax));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+
+ LInstruction* result;
+ if (input_rep.IsDouble()) {
+ LOperand* reg = UseRegister(value);
+ LOperand* temp_reg =
+ CpuFeatures::IsSupported(SSE3) ? NULL : TempRegister();
+ result = DefineAsRegister(new LDoubleToI(reg, temp_reg));
+ } else if (input_rep.IsInteger32()) {
+ // Canonicalization should already have removed the hydrogen instruction in
+ // this case, since it is a noop.
+ UNREACHABLE();
+ return NULL;
+ } else {
+ ASSERT(input_rep.IsTagged());
+ LOperand* reg = UseRegister(value);
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve xmm1 explicitly.
+ LOperand* xmm_temp =
+ CpuFeatures::IsSupported(SSE3) ? NULL : FixedTemp(xmm1);
+ result = DefineSameAsFirst(new LTaggedToI(reg, xmm_temp));
+ }
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new LReturn(UseFixed(instr->value(), eax));
}
@@ -1739,20 +1836,39 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- LLoadGlobal* result = new LLoadGlobal;
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
-LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
- LStoreGlobal* result = new LStoreGlobal(UseRegisterAtStart(instr->value()));
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* global_object = UseFixed(instr->global_object(), eax);
+ LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LStoreGlobalCell* result =
+ new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
return instr->check_hole_value() ? AssignEnvironment(result) : result;
}
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* global_object = UseFixed(instr->global_object(), edx);
+ LOperand* value = UseFixed(instr->value(), eax);
+ LStoreGlobalGeneric* result =
+ new LStoreGlobalGeneric(context, global_object, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadContextSlot(context));
@@ -1783,6 +1899,21 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
+LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
+ HLoadNamedFieldPolymorphic* instr) {
+ ASSERT(instr->representation().IsTagged());
+ if (instr->need_generic()) {
+ LOperand* obj = UseFixed(instr->object(), eax);
+ LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+ } else {
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), eax);
@@ -1805,10 +1936,10 @@ LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
}
-LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer(
- HLoadPixelArrayExternalPointer* instr) {
+LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
+ HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadPixelArrayExternalPointer(input));
+ return DefineAsRegister(new LLoadExternalArrayPointer(input));
}
@@ -1823,16 +1954,29 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
}
-LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
- HLoadPixelArrayElement* instr) {
- ASSERT(instr->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+ HLoadKeyedSpecializedArrayElement* instr) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Representation representation(instr->representation());
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer =
- UseRegisterAtStart(instr->external_pointer());
- LOperand* key = UseRegisterAtStart(instr->key());
- LLoadPixelArrayElement* result =
- new LLoadPixelArrayElement(external_pointer, key);
- return DefineSameAsFirst(result);
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LLoadKeyedSpecializedArrayElement* result =
+ new LLoadKeyedSpecializedArrayElement(external_pointer,
+ key);
+ LInstruction* load_instr = DefineAsRegister(result);
+ // An unsigned int array load might overflow and cause a deopt, make sure it
+ // has an environment.
+ return (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS)
+ ? AssignEnvironment(load_instr)
+ : load_instr;
}
@@ -1865,20 +2009,35 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
}
-LInstruction* LChunkBuilder::DoStorePixelArrayElement(
- HStorePixelArrayElement* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+ HStoreKeyedSpecializedArrayElement* instr) {
+ Representation representation(instr->value()->representation());
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* val = UseRegister(instr->value());
- LOperand* key = UseRegister(instr->key());
- // The generated code requires that the clamped value is in a byte
- // register. eax is an arbitrary choice to satisfy this requirement.
- LOperand* clamped = FixedTemp(eax);
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* val = NULL;
+ if (elements_kind == JSObject::EXTERNAL_BYTE_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ // We need a byte register in this case for the value.
+ val = UseFixed(instr->value(), eax);
+ } else {
+ val = UseRegister(instr->value());
+ }
- return new LStorePixelArrayElement(external_pointer, key, val, clamped);
+ return new LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
}
@@ -1929,6 +2088,13 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* left = UseOrConstantAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ return MarkAsCall(DefineFixed(new LStringAdd(left, right), eax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
@@ -1937,6 +2103,13 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
}
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LStringCharFromCode* result = new LStringCharFromCode(char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LOperand* string = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LStringLength(string));
@@ -1966,7 +2139,8 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LDeleteProperty* result =
- new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+ new LDeleteProperty(UseAtStart(instr->object()),
+ UseOrConstantAtStart(instr->key()));
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2020,6 +2194,13 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), eax);
+ LToFastProperties* result = new LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LTypeof* result = new LTypeof(UseAtStart(instr->value()));
return MarkAsCall(DefineFixed(result, eax), instr);
@@ -2051,7 +2232,6 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->Push(value);
}
}
- ASSERT(env->length() == instr->environment_length());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
@@ -2070,7 +2250,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- return MarkAsCall(new LStackCheck, instr);
+ if (instr->is_function_entry()) {
+ return MarkAsCall(new LStackCheck, instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ return AssignEnvironment(AssignPointerMap(new LStackCheck));
+ }
}
@@ -2079,8 +2264,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->function(),
- false,
- undefined);
+ undefined,
+ instr->call_kind());
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2094,6 +2279,14 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+ LOperand* key = UseOrConstantAtStart(instr->key());
+ LOperand* object = UseOrConstantAtStart(instr->object());
+ LIn* result = new LIn(key, object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index ad0b0ca05..c5667864c 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -32,6 +32,7 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -39,12 +40,6 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
@@ -70,17 +65,22 @@ class LCodeGen;
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMap) \
+ V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
V(ClassOfTest) \
V(ClassOfTestAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
+ V(CmpObjectEq) \
+ V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
- V(CmpTAndBranch) \
+ V(CmpConstantEq) \
+ V(CmpConstantEqAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
@@ -89,9 +89,11 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(ElementsKind) \
+ V(ExternalArrayLength) \
V(FixedArrayLength) \
V(FunctionLiteral) \
- V(Gap) \
+ V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
@@ -99,31 +101,37 @@ class LCodeGen;
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
+ V(In) \
V(InstanceOf) \
- V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCall) \
+ V(IsConstructCallAndBranch) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
- V(IsConstructCall) \
- V(IsConstructCallAndBranch) \
+ V(IsUndetectable) \
+ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
+ V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
- V(LoadGlobal) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
+ V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
- V(LoadPixelArrayElement) \
- V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -133,7 +141,6 @@ class LCodeGen;
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
- V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@@ -143,17 +150,22 @@ class LCodeGen;
V(SmiUntag) \
V(StackCheck) \
V(StoreContextSlot) \
- V(StoreGlobal) \
+ V(StoreGlobalCell) \
+ V(StoreGlobalGeneric) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
- V(StorePixelArrayElement) \
+ V(StringAdd) \
V(StringCharCodeAt) \
+ V(StringCharFromCode) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
+ V(ThisFunction) \
V(Throw) \
+ V(ToFastProperties) \
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
@@ -162,20 +174,16 @@ class LCodeGen;
V(ValueOf)
-#define DECLARE_INSTRUCTION(type) \
- virtual bool Is##type() const { return true; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const { return LInstruction::k##type; } \
+ virtual void CompileToNative(LCodeGen* generator); \
+ virtual const char* Mnemonic() const { return mnemonic; } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- DECLARE_INSTRUCTION(type)
-
-
#define DECLARE_HYDROGEN_ACCESSOR(type) \
H##type* hydrogen() const { \
return H##type::cast(hydrogen_value()); \
@@ -197,10 +205,25 @@ class LInstruction: public ZoneObject {
virtual void PrintDataTo(StringStream* stream) = 0;
virtual void PrintOutputOperandTo(StringStream* stream) = 0;
- // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
- LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
@@ -259,37 +282,6 @@ class LInstruction: public ZoneObject {
};
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
- OperandContainer() {
- for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
- }
- int length() { return NumElements; }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
- void PrintOperandsTo(StringStream* stream);
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
- int length() { return 0; }
- void PrintOperandsTo(StringStream* stream) { }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
@@ -312,24 +304,28 @@ class LTemplateInstruction: public LInstruction {
virtual void PrintOutputOperandTo(StringStream* stream);
protected:
- OperandContainer<LOperand*, R> results_;
- OperandContainer<LOperand*, I> inputs_;
- OperandContainer<LOperand*, T> temps_;
+ EmbeddedContainer<LOperand*, R> results_;
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
};
class LGap: public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
+ explicit LGap(HBasicBlock* block) : block_(block) {
parallel_moves_[BEFORE] = NULL;
parallel_moves_[START] = NULL;
parallel_moves_[END] = NULL;
parallel_moves_[AFTER] = NULL;
}
- DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const { return true; }
virtual void PrintDataTo(StringStream* stream);
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
bool IsRedundant() const;
@@ -359,21 +355,26 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
+class LInstructionGap: public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
- LGoto(int block_id, bool include_stack_check = false)
- : block_id_(block_id), include_stack_check_(include_stack_check) { }
+ explicit LGoto(int block_id) : block_id_(block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
- bool include_stack_check() const { return include_stack_check_; }
private:
int block_id_;
- bool include_stack_check_;
};
@@ -453,7 +454,6 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
- DECLARE_INSTRUCTION(ControlInstruction)
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -617,26 +617,49 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ LCmpObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
+};
+
+
+class LCmpConstantEq: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCmpConstantEq(LOperand* left) {
+ inputs_[0] = left;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEq, "cmp-constant-eq")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
+};
+
+
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpConstantEqAndBranch(LOperand* left) {
+ inputs_[0] = left;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+ "cmp-constant-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
};
@@ -669,23 +692,21 @@ class LIsNullAndBranch: public LControlInstruction<1, 1> {
};
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
+class LIsObject: public LTemplateInstruction<1, 1, 0> {
public:
- LIsObject(LOperand* value, LOperand* temp) {
+ explicit LIsObject(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
};
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
@@ -717,6 +738,31 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
};
+class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LIsUndetectable(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
@@ -743,6 +789,17 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
};
+class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
@@ -828,20 +885,6 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpTAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
@@ -856,20 +899,6 @@ class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfAndBranch: public LControlInstruction<3, 0> {
- public:
- LInstanceOfAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-
- LOperand* context() { return inputs_[0]; }
-};
-
-
class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
@@ -1024,14 +1053,14 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
};
-class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LPixelArrayLength(LOperand* value) {
+ explicit LExternalArrayLength(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
- DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
+ DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength)
};
@@ -1046,6 +1075,17 @@ class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
};
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LElementsKind(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
class LValueOf: public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
@@ -1112,6 +1152,7 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
Token::Value op() const { return op_; }
+ virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@@ -1128,6 +1169,7 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@@ -1156,6 +1198,21 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+
+ LOperand* object() { return inputs_[0]; }
+};
+
+
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedFieldPolymorphic(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
+
+ LOperand* object() { return inputs_[0]; }
};
@@ -1199,14 +1256,14 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadPixelArrayExternalPointer(LOperand* object) {
+ explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
- "load-pixel-array-external-pointer")
+ DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
+ "load-external-array-pointer")
};
@@ -1225,19 +1282,23 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) {
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
- "load-pixel-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+ "load-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
};
@@ -1257,21 +1318,59 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
};
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
};
-class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LStoreGlobal(LOperand* value) {
+ explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
+ public:
+ explicit LStoreGlobalGeneric(LOperand* context,
+ LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ inputs_[2] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+ LOperand* context() { return InputAt(0); }
+ LOperand* global_object() { return InputAt(1); }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ LOperand* value() { return InputAt(2); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -1321,6 +1420,11 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+};
+
+
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@@ -1375,6 +1479,25 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
};
+class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
public:
LCallKeyed(LOperand* context, LOperand* key) {
@@ -1477,7 +1600,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- Runtime::Function* function() const { return hydrogen()->function(); }
+ const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
@@ -1522,7 +1645,7 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1537,7 +1660,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
}
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1560,6 +1683,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change);
};
@@ -1620,6 +1744,7 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
Handle<Object> name() const { return hydrogen()->name(); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -1643,25 +1768,26 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
};
-class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 1> {
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStorePixelArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val,
- LOperand* clamped) {
+ LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* val) {
inputs_[0] = external_pointer;
inputs_[1] = key;
inputs_[2] = val;
- temps_[0] = clamped;
}
- DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement,
- "store-pixel-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+ "store-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
};
@@ -1678,6 +1804,7 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
@@ -1685,6 +1812,22 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringAdd(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
};
@@ -1703,6 +1846,19 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
+class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringCharFromCode(LOperand* char_code) {
+ inputs_[0] = char_code;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+
+ LOperand* char_code() { return inputs_[0]; }
+};
+
+
class LStringLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringLength(LOperand* string) {
@@ -1766,20 +1922,58 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
- LCheckSmi(LOperand* value, Condition condition)
- : condition_(condition) {
+ explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
}
- Condition condition() const { return condition_; }
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const {
- return (condition_ == zero) ? "check-non-smi" : "check-smi";
+
+class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampDToUint8(LOperand* value) {
+ inputs_[0] = value;
}
- private:
- Condition condition_;
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampTToUint8(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
@@ -1819,6 +2013,17 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
};
+class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
@@ -1899,14 +2104,35 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+class LIn: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LIn(LOperand* key, LOperand* object) {
+ inputs_[0] = key;
+ inputs_[1] = object;
+ }
+
+ LOperand* key() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(In, "in")
};
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
- explicit LChunk(HGraph* graph)
+ explicit LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
+ info_(info),
graph_(graph),
instructions_(32),
pointer_maps_(8),
@@ -1923,6 +2149,7 @@ class LChunk: public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
+ CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
void AddGapMove(int index, LOperand* from, LOperand* to);
@@ -1959,6 +2186,7 @@ class LChunk: public ZoneObject {
private:
int spill_slot_count_;
+ CompilationInfo* info_;
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
@@ -1968,8 +2196,9 @@ class LChunk: public ZoneObject {
class LChunkBuilder BASE_EMBEDDED {
public:
- LChunkBuilder(HGraph* graph, LAllocator* allocator)
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
+ info_(info),
graph_(graph),
status_(UNUSED),
current_instruction_(NULL),
@@ -1998,6 +2227,7 @@ class LChunkBuilder BASE_EMBEDDED {
};
LChunk* chunk() const { return chunk_; }
+ CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
bool is_unused() const { return status_ == UNUSED; }
@@ -2104,6 +2334,7 @@ class LChunkBuilder BASE_EMBEDDED {
HArithmeticBinaryOperation* instr);
LChunk* chunk_;
+ CompilationInfo* info_;
HGraph* const graph_;
Status status_;
HInstruction* current_instruction_;
@@ -2119,7 +2350,6 @@ class LChunkBuilder BASE_EMBEDDED {
};
#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 91b6651fe..a80821f82 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
#include "runtime.h"
#include "serialize.h"
@@ -41,18 +41,21 @@ namespace internal {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
-MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ allow_stub_calls_(true) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
}
void MacroAssembler::RecordWriteHelper(Register object,
Register addr,
Register scratch) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check that the object is not in new space.
Label not_in_new_space;
InNewSpace(object, scratch, not_equal, &not_in_new_space);
@@ -70,7 +73,69 @@ void MacroAssembler::RecordWriteHelper(Register object,
shr(addr, Page::kRegionSizeLog2);
// Set dirty mark for region.
- bts(Operand(object, Page::kDirtyFlagOffset), addr);
+ // Bit tests with a memory operand should be avoided on Intel processors,
+ // as they usually have long latency and multiple uops. We load the bit base
+ // operand to a register at first and store it back after bit set.
+ mov(scratch, Operand(object, Page::kDirtyFlagOffset));
+ bts(Operand(scratch), addr);
+ mov(Operand(object, Page::kDirtyFlagOffset), scratch);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
+ XMMRegister scratch_reg,
+ Register result_reg) {
+ Label done;
+ ExternalReference zero_ref = ExternalReference::address_of_zero();
+ movdbl(scratch_reg, Operand::StaticVariable(zero_ref));
+ Set(result_reg, Immediate(0));
+ ucomisd(input_reg, scratch_reg);
+ j(below, &done, Label::kNear);
+ ExternalReference half_ref = ExternalReference::address_of_one_half();
+ movdbl(scratch_reg, Operand::StaticVariable(half_ref));
+ addsd(scratch_reg, input_reg);
+ cvttsd2si(result_reg, Operand(scratch_reg));
+ test(result_reg, Immediate(0xFFFFFF00));
+ j(zero, &done, Label::kNear);
+ Set(result_reg, Immediate(255));
+ bind(&done);
+}
+
+
+void MacroAssembler::ClampUint8(Register reg) {
+ Label done;
+ test(reg, Immediate(0xFFFFFF00));
+ j(zero, &done, Label::kNear);
+ setcc(negative, reg); // 1 if negative, 0 if positive.
+ dec_b(reg); // 0 if negative, 255 if positive.
+ bind(&done);
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch,
+ Label::Distance branch_near) {
+ ASSERT(cc == equal || cc == not_equal);
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
+ mov(scratch, Operand(object));
+ // The mask isn't really an address. We load it as an external reference in
+ // case the size of the new space is different between the snapshot maker
+ // and the running system.
+ and_(Operand(scratch),
+ Immediate(ExternalReference::new_space_mask(isolate())));
+ cmp(Operand(scratch),
+ Immediate(ExternalReference::new_space_start(isolate())));
+ j(cc, branch, branch_near);
+ } else {
+ int32_t new_space_start = reinterpret_cast<int32_t>(
+ ExternalReference::new_space_start(isolate()).address());
+ lea(scratch, Operand(object, -new_space_start));
+ and_(scratch, isolate()->heap()->NewSpaceMask());
+ j(cc, branch, branch_near);
+ }
}
@@ -80,14 +145,13 @@ void MacroAssembler::RecordWrite(Register object,
Register scratch) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
- NearLabel done;
+ Label done;
// Skip barrier if writing a smi.
ASSERT_EQ(0, kSmiTag);
- test(value, Immediate(kSmiTagMask));
- j(zero, &done);
+ JumpIfSmi(value, &done, Label::kNear);
- InNewSpace(object, value, equal, &done);
+ InNewSpace(object, value, equal, &done, Label::kNear);
// The offset is relative to a tagged or untagged HeapObject pointer,
// so either offset or offset + kHeapObjectTag must be a
@@ -113,7 +177,7 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(object, Immediate(BitCast<int32_t>(kZapValue)));
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
@@ -130,8 +194,7 @@ void MacroAssembler::RecordWrite(Register object,
// Skip barrier if writing a smi.
ASSERT_EQ(0, kSmiTag);
- test(value, Immediate(kSmiTagMask));
- j(zero, &done);
+ JumpIfSmi(value, &done, Label::kNear);
InNewSpace(object, value, equal, &done);
@@ -141,7 +204,7 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(object, Immediate(BitCast<int32_t>(kZapValue)));
mov(address, Immediate(BitCast<int32_t>(kZapValue)));
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
@@ -152,7 +215,7 @@ void MacroAssembler::RecordWrite(Register object,
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
Set(eax, Immediate(0));
- mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak)));
+ mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
@@ -161,7 +224,7 @@ void MacroAssembler::DebugBreak() {
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
- xor_(dst, Operand(dst)); // shorter than mov
+ xor_(dst, Operand(dst)); // Shorter than mov.
} else {
mov(dst, x);
}
@@ -173,6 +236,33 @@ void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
}
+bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
+ static const int kMaxImmediateBits = 17;
+ if (x.rmode_ != RelocInfo::NONE) return false;
+ return !is_intn(x.x_, kMaxImmediateBits);
+}
+
+
+void MacroAssembler::SafeSet(Register dst, const Immediate& x) {
+ if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
+ Set(dst, Immediate(x.x_ ^ jit_cookie()));
+ xor_(dst, jit_cookie());
+ } else {
+ Set(dst, x);
+ }
+}
+
+
+void MacroAssembler::SafePush(const Immediate& x) {
+ if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
+ push(Immediate(x.x_ ^ jit_cookie()));
+ xor_(Operand(esp, 0), Immediate(jit_cookie()));
+ } else {
+ push(x);
+ }
+}
+
+
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
@@ -187,19 +277,43 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
+void MacroAssembler::CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastElementValue);
+ j(above, fail, distance);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- bool is_heap_object) {
- if (!is_heap_object) {
- test(obj, Immediate(kSmiTagMask));
- j(zero, fail);
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
}
cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
j(not_equal, fail);
}
+void MacroAssembler::DispatchMap(Register obj,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+ j(equal, success);
+
+ bind(&fail);
+}
+
+
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
@@ -224,8 +338,9 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
Register scratch,
Label* fail) {
movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- sub(Operand(scratch), Immediate(FIRST_JS_OBJECT_TYPE));
- cmp(scratch, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
+ sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ cmp(scratch,
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
j(above, fail);
}
@@ -247,10 +362,9 @@ void MacroAssembler::FCmp() {
void MacroAssembler::AbortIfNotNumber(Register object) {
Label ok;
- test(object, Immediate(kSmiTagMask));
- j(zero, &ok);
+ JumpIfSmi(object, &ok);
cmp(FieldOperand(object, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ isolate()->factory()->heap_number_map());
Assert(equal, "Operand not a number");
bind(&ok);
}
@@ -285,15 +399,15 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(esi);
push(Immediate(Smi::FromInt(type)));
push(Immediate(CodeObject()));
- if (FLAG_debug_code) {
- cmp(Operand(esp, 0), Immediate(Factory::undefined_value()));
+ if (emit_debug_code()) {
+ cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
Check(not_equal, "code object not properly patched");
}
}
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(type)));
Check(equal, "stack frame types must match");
@@ -316,8 +430,10 @@ void MacroAssembler::EnterExitFramePrologue() {
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
- ExternalReference context_address(Top::k_context_address);
+ ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address,
+ isolate());
+ ExternalReference context_address(Isolate::k_context_address,
+ isolate());
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
}
@@ -339,7 +455,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
}
// Get the required frame alignment for the OS.
- static const int kFrameAlignment = OS::ActivationFrameAlignment();
+ const int kFrameAlignment = OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
and_(esp, -kFrameAlignment);
@@ -358,7 +474,8 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset));
- EnterExitFrameEpilogue(2, save_doubles);
+ // Reserve space for argc, argv and isolate.
+ EnterExitFrameEpilogue(3, save_doubles);
}
@@ -394,14 +511,15 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
void MacroAssembler::LeaveExitFrameEpilogue() {
// Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Top::k_context_address);
+ ExternalReference context_address(Isolate::k_context_address, isolate());
mov(esi, Operand::StaticVariable(context_address));
#ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
// Clear the top frame.
- ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+ ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address,
+ isolate());
mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
}
@@ -435,15 +553,19 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
push(Immediate(0)); // NULL frame pointer.
}
// Save the current handler as the next handler.
- push(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+ push(Operand::StaticVariable(ExternalReference(Isolate::k_handler_address,
+ isolate())));
// Link this handler as the new current one.
- mov(Operand::StaticVariable(ExternalReference(Top::k_handler_address)), esp);
+ mov(Operand::StaticVariable(ExternalReference(Isolate::k_handler_address,
+ isolate())),
+ esp);
}
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
- pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+ pop(Operand::StaticVariable(ExternalReference(Isolate::k_handler_address,
+ isolate())));
add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
}
@@ -458,7 +580,8 @@ void MacroAssembler::Throw(Register value) {
}
// Drop the sp to the top of the handler.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address,
+ isolate());
mov(esp, Operand::StaticVariable(handler_address));
// Restore next handler and frame pointer, discard handler state.
@@ -472,9 +595,9 @@ void MacroAssembler::Throw(Register value) {
// not NULL. The frame pointer is NULL in the exception handler of
// a JS entry frame.
Set(esi, Immediate(0)); // Tentatively set context pointer to NULL.
- NearLabel skip;
+ Label skip;
cmp(ebp, 0);
- j(equal, &skip, not_taken);
+ j(equal, &skip, Label::kNear);
mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
bind(&skip);
@@ -494,16 +617,17 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
}
// Drop sp to the top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
+ ExternalReference handler_address(Isolate::k_handler_address,
+ isolate());
mov(esp, Operand::StaticVariable(handler_address));
// Unwind the handlers until the ENTRY handler is found.
- NearLabel loop, done;
+ Label loop, done;
bind(&loop);
// Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset;
cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
- j(equal, &done);
+ j(equal, &done, Label::kNear);
// Fetch the next handler in the list.
const int kNextOffset = StackHandlerConstants::kNextOffset;
mov(esp, Operand(esp, kNextOffset));
@@ -516,12 +640,15 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
+ ExternalReference external_caught(
+ Isolate::k_external_caught_exception_address,
+ isolate());
mov(eax, false);
mov(Operand::StaticVariable(external_caught), eax);
// Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
+ ExternalReference pending_exception(Isolate::k_pending_exception_address,
+ isolate());
mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
mov(Operand::StaticVariable(pending_exception), eax);
}
@@ -550,7 +677,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
mov(scratch, Operand(ebp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
cmp(Operand(scratch), Immediate(0));
Check(not_equal, "we should not have an empty lexical context");
}
@@ -560,18 +687,18 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
push(scratch);
// Read the first word and compare to global_context_map.
mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- cmp(scratch, Factory::global_context_map());
+ cmp(scratch, isolate()->factory()->global_context_map());
Check(equal, "JSGlobalObject::global_context should be a global context.");
pop(scratch);
}
// Check if both contexts are the same.
cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
- j(equal, &same_contexts, taken);
+ j(equal, &same_contexts);
// Compare security tokens, save holder_reg on the stack so we can use it
// as a temporary register.
@@ -584,14 +711,14 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
mov(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
// Check the context is a global context.
- if (FLAG_debug_code) {
- cmp(holder_reg, Factory::null_value());
+ if (emit_debug_code()) {
+ cmp(holder_reg, isolate()->factory()->null_value());
Check(not_equal, "JSGlobalProxy::context() should not be null.");
push(holder_reg);
// Read the first word and compare to global_context_map(),
mov(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- cmp(holder_reg, Factory::global_context_map());
+ cmp(holder_reg, isolate()->factory()->global_context_map());
Check(equal, "JSGlobalObject::global_context should be a global context.");
pop(holder_reg);
}
@@ -601,7 +728,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
mov(scratch, FieldOperand(scratch, token_offset));
cmp(scratch, FieldOperand(holder_reg, token_offset));
pop(holder_reg);
- j(not_equal, miss, not_taken);
+ j(not_equal, miss);
bind(&same_contexts);
}
@@ -611,7 +738,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
@@ -637,13 +764,13 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
test(result_end, Immediate(kObjectAlignmentMask));
Check(zero, "Unaligned allocation in new space");
}
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
// Update new top. Use scratch if available.
if (scratch.is(no_reg)) {
@@ -661,7 +788,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Immediate(0x7091));
if (result_end.is_valid()) {
@@ -683,15 +810,15 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
if (!top_reg.is(result)) {
mov(top_reg, result);
}
add(Operand(top_reg), Immediate(object_size));
- j(carry, gc_required, not_taken);
+ j(carry, gc_required);
cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required, not_taken);
+ j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(top_reg, scratch);
@@ -718,7 +845,7 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Immediate(0x7091));
mov(result_end, Immediate(0x7191));
@@ -737,7 +864,7 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
// We assume that element_count*element_size + header_size does not
// overflow.
@@ -764,7 +891,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Immediate(0x7091));
mov(result_end, Immediate(0x7191));
@@ -783,14 +910,14 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
if (!object_size.is(result_end)) {
mov(result_end, object_size);
}
add(result_end, Operand(result));
- j(carry, gc_required, not_taken);
+ j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required, not_taken);
+ j(above, gc_required);
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -804,7 +931,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
and_(Operand(object), Immediate(~kHeapObjectTagMask));
@@ -830,7 +957,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
// Set the map.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+ Immediate(isolate()->factory()->heap_number_map()));
}
@@ -860,7 +987,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::string_map()));
+ Immediate(isolate()->factory()->string_map()));
mov(scratch1, length);
SmiTag(scratch1);
mov(FieldOperand(result, String::kLengthOffset), scratch1);
@@ -895,7 +1022,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::ascii_string_map()));
+ Immediate(isolate()->factory()->ascii_string_map()));
mov(scratch1, length);
SmiTag(scratch1);
mov(FieldOperand(result, String::kLengthOffset), scratch1);
@@ -921,7 +1048,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::ascii_string_map()));
+ Immediate(isolate()->factory()->ascii_string_map()));
mov(FieldOperand(result, String::kLengthOffset),
Immediate(Smi::FromInt(length)));
mov(FieldOperand(result, String::kHashFieldOffset),
@@ -943,7 +1070,7 @@ void MacroAssembler::AllocateConsString(Register result,
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::cons_string_map()));
+ Immediate(isolate()->factory()->cons_string_map()));
}
@@ -961,7 +1088,7 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::cons_ascii_string_map()));
+ Immediate(isolate()->factory()->cons_ascii_string_map()));
}
@@ -1014,27 +1141,14 @@ void MacroAssembler::CopyBytes(Register source,
}
-void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
- Register result,
- Register op,
- JumpTarget* then_target) {
- JumpTarget ok;
- test(result, Operand(result));
- ok.Branch(not_zero, taken);
- test(op, Operand(op));
- then_target->Branch(sign, not_taken);
- ok.Bind();
-}
-
-
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
Label ok;
test(result, Operand(result));
- j(not_zero, &ok, taken);
+ j(not_zero, &ok);
test(op, Operand(op));
- j(sign, then_label, not_taken);
+ j(sign, then_label);
bind(&ok);
}
@@ -1046,10 +1160,10 @@ void MacroAssembler::NegativeZeroTest(Register result,
Label* then_label) {
Label ok;
test(result, Operand(result));
- j(not_zero, &ok, taken);
+ j(not_zero, &ok);
mov(scratch, Operand(op1));
or_(scratch, Operand(op2));
- j(sign, then_label, not_taken);
+ j(sign, then_label);
bind(&ok);
}
@@ -1059,18 +1173,17 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
Register scratch,
Label* miss) {
// Check that the receiver isn't a smi.
- test(function, Immediate(kSmiTagMask));
- j(zero, miss, not_taken);
+ JumpIfSmi(function, miss);
// Check that the function really is a function.
CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss, not_taken);
+ j(not_equal, miss);
// Make sure that the function has an instance prototype.
Label non_instance;
movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance, not_taken);
+ j(not_zero, &non_instance);
// Get the prototype or initial map from the function.
mov(result,
@@ -1079,8 +1192,8 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// If the prototype or initial map is the hole, don't return it and
// simply miss the cache instead. This will allow us to allocate a
// prototype object on-demand in the runtime system.
- cmp(Operand(result), Immediate(Factory::the_hole_value()));
- j(equal, miss, not_taken);
+ cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value()));
+ j(equal, miss);
// If the function does not have an initial map, we're done.
Label done;
@@ -1101,9 +1214,9 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
}
-void MacroAssembler::CallStub(CodeStub* stub) {
+void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- call(stub->GetCode(), RelocInfo::CODE_TARGET);
+ call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
@@ -1145,7 +1258,7 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
add(Operand(esp), Immediate(num_arguments * kPointerSize));
}
- mov(eax, Immediate(Factory::undefined_value()));
+ mov(eax, Immediate(isolate()->factory()->undefined_value()));
}
@@ -1174,9 +1287,9 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
Set(eax, Immediate(function->nargs));
- mov(ebx, Immediate(ExternalReference(function)));
+ mov(ebx, Immediate(ExternalReference(function, isolate())));
CEntryStub ces(1);
ces.SaveDoubles();
CallStub(&ces);
@@ -1189,7 +1302,8 @@ MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
}
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -1203,19 +1317,19 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// should remove this need and make the runtime routine entry code
// smarter.
Set(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ExternalReference(f)));
+ mov(ebx, Immediate(ExternalReference(f, isolate())));
CEntryStub ces(1);
CallStub(&ces);
}
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
+MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
int num_arguments) {
if (f->nargs >= 0 && f->nargs != num_arguments) {
IllegalOperation(num_arguments);
// Since we did not call the stub, there was no allocation failure.
// Return some non-failure object.
- return Heap::undefined_value();
+ return isolate()->heap()->undefined_value();
}
// TODO(1236192): Most runtime routines don't need the number of
@@ -1223,7 +1337,7 @@ MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
// should remove this need and make the runtime routine entry code
// smarter.
Set(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ExternalReference(f)));
+ mov(ebx, Immediate(ExternalReference(f, isolate())));
CEntryStub ces(1);
return TryCallStub(&ces);
}
@@ -1265,7 +1379,9 @@ MaybeObject* MacroAssembler::TryTailCallExternalReference(
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
}
@@ -1273,7 +1389,7 @@ MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
return TryTailCallExternalReference(
- ExternalReference(fid), num_arguments, result_size);
+ ExternalReference(fid, isolate()), num_arguments, result_size);
}
@@ -1320,7 +1436,7 @@ void MacroAssembler::PrepareCallApiFunction(int argc, Register scratch) {
// pointer to out cell.
lea(scratch, Operand(esp, (argc + 1) * kPointerSize));
mov(Operand(esp, 0 * kPointerSize), scratch); // output.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(Operand(esp, (argc + 1) * kPointerSize), Immediate(0)); // out cell.
}
}
@@ -1358,7 +1474,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
// Check if the result handle holds 0.
test(eax, Operand(eax));
- j(zero, &empty_handle, not_taken);
+ j(zero, &empty_handle);
// It was non-zero. Dereference to get the result value.
mov(eax, Operand(eax, 0));
bind(&prologue);
@@ -1368,15 +1484,15 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
sub(Operand::StaticVariable(level_address), Immediate(1));
Assert(above_equal, "Invalid HandleScope level");
cmp(edi, Operand::StaticVariable(limit_address));
- j(not_equal, &delete_allocated_handles, not_taken);
+ j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
// Check if the function scheduled an exception.
ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address();
+ ExternalReference::scheduled_exception_address(isolate());
cmp(Operand::StaticVariable(scheduled_exception_address),
- Immediate(Factory::the_hole_value()));
- j(not_equal, &promote_scheduled_exception, not_taken);
+ Immediate(isolate()->factory()->the_hole_value()));
+ j(not_equal, &promote_scheduled_exception);
LeaveApiExitFrame();
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
@@ -1387,14 +1503,17 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
}
bind(&empty_handle);
// It was zero; the result is undefined.
- mov(eax, Factory::undefined_value());
+ mov(eax, isolate()->factory()->undefined_value());
jmp(&prologue);
// HandleScope limit has changed. Delete allocated extensions.
+ ExternalReference delete_extensions =
+ ExternalReference::delete_handle_scope_extensions(isolate());
bind(&delete_allocated_handles);
mov(Operand::StaticVariable(limit_address), edi);
mov(edi, eax);
- mov(eax, Immediate(ExternalReference::delete_handle_scope_extensions()));
+ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
+ mov(eax, Immediate(delete_extensions));
call(Operand(eax));
mov(eax, edi);
jmp(&leave_exit_frame);
@@ -1420,13 +1539,32 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
}
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+ // This macro takes the dst register to make the code more readable
+ // at the call sites. However, the dst register has to be ecx to
+ // follow the calling convention which requires the call type to be
+ // in ecx.
+ ASSERT(dst.is(ecx));
+ if (call_kind == CALL_AS_FUNCTION) {
+ // Set to some non-zero smi by updating the least significant
+ // byte.
+ mov_b(Operand(dst), 1 << kSmiTagSize);
+ } else {
+ // Set to smi zero by clearing the register.
+ xor_(dst, Operand(dst));
+ }
+}
+
+
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
const Operand& code_operand,
Label* done,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ Label::Distance done_near,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
bool definitely_matches = false;
Label invoke;
if (expected.is_immediate()) {
@@ -1467,7 +1605,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (!definitely_matches) {
Handle<Code> adaptor =
- Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
mov(edx, Immediate(code_constant));
add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -1476,10 +1614,13 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ SetCallKind(ecx, call_kind);
call(adaptor, RelocInfo::CODE_TARGET);
- if (post_call_generator != NULL) post_call_generator->Generate();
- jmp(done);
+ call_wrapper.AfterCall();
+ jmp(done, done_near);
} else {
+ SetCallKind(ecx, call_kind);
jmp(adaptor, RelocInfo::CODE_TARGET);
}
bind(&invoke);
@@ -1491,15 +1632,20 @@ void MacroAssembler::InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, flag, post_call_generator);
+ &done, flag, Label::kNear, call_wrapper,
+ call_kind);
if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(ecx, call_kind);
call(code);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(ecx, call_kind);
jmp(code);
}
bind(&done);
@@ -1511,16 +1657,20 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
Label done;
Operand dummy(eax);
- InvokePrologue(expected, actual, code, dummy, &done,
- flag, post_call_generator);
+ InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
+ call_wrapper, call_kind);
if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code, rmode));
+ SetCallKind(ecx, call_kind);
call(code, rmode);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(ecx, call_kind);
jmp(code, rmode);
}
bind(&done);
@@ -1530,7 +1680,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
ASSERT(fun.is(edi));
mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -1539,14 +1690,15 @@ void MacroAssembler::InvokeFunction(Register fun,
ParameterCount expected(ebx);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, post_call_generator);
+ expected, actual, flag, call_wrapper, call_kind);
}
void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
ASSERT(function->is_compiled());
// Get the function and setup the context.
mov(edi, Immediate(Handle<JSFunction>(function)));
@@ -1558,18 +1710,18 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
// code field in the function to allow recompilation to take effect
// without changing any of the call sites.
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, post_call_generator);
+ expected, actual, flag, call_wrapper, call_kind);
} else {
Handle<Code> code(function->code());
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET,
- flag, post_call_generator);
+ flag, call_wrapper, call_kind);
}
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper) {
// Calls are not allowed in some stubs.
ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
@@ -1579,7 +1731,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
ParameterCount expected(0);
GetBuiltinFunction(edi, id);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, expected, flag, post_call_generator);
+ expected, expected, flag, call_wrapper, CALL_AS_METHOD);
}
void MacroAssembler::GetBuiltinFunction(Register target,
@@ -1603,12 +1755,9 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- mov(dst, Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- mov(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
@@ -1617,14 +1766,17 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
mov(dst, esi);
}
- // We should not have found a 'with' context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (FLAG_debug_code) {
- cmp(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- Check(equal, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts");
+ // We should not have found a with or catch context by walking the context
+ // chain (i.e., the static scope chain and runtime context chain do not
+ // agree). A variable occurring in such a scope should have slot type
+ // LOOKUP and not CONTEXT.
+ if (emit_debug_code()) {
+ cmp(FieldOperand(dst, HeapObject::kMapOffset),
+ isolate()->factory()->with_context_map());
+ Check(not_equal, "Variable resolved to with context.");
+ cmp(FieldOperand(dst, HeapObject::kMapOffset),
+ isolate()->factory()->with_context_map());
+ Check(not_equal, "Variable resolved to catch context.");
}
}
@@ -1643,9 +1795,9 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok, fail;
- CheckMap(map, Factory::meta_map(), &fail, false);
+ CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
Abort("Global functions must have initial map");
@@ -1787,18 +1939,19 @@ void MacroAssembler::DecrementCounter(Condition cc,
void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (FLAG_debug_code) Check(cc, msg);
+ if (emit_debug_code()) Check(cc, msg);
}
void MacroAssembler::AssertFastElements(Register elements) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
+ Factory* factory = isolate()->factory();
Label ok;
cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(factory->fixed_array_map()));
j(equal, &ok);
cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(Factory::fixed_cow_array_map()));
+ Immediate(factory->fixed_cow_array_map()));
j(equal, &ok);
Abort("JSObject with fast elements map has slow elements");
bind(&ok);
@@ -1808,7 +1961,7 @@ void MacroAssembler::AssertFastElements(Register elements) {
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
- j(cc, &L, taken);
+ j(cc, &L);
Abort(msg);
// will not return here
bind(&L);
@@ -1857,56 +2010,14 @@ void MacroAssembler::Abort(const char* msg) {
}
-void MacroAssembler::JumpIfNotNumber(Register reg,
- TypeInfo info,
- Label* on_not_number) {
- if (FLAG_debug_code) AbortIfSmi(reg);
- if (!info.IsNumber()) {
- cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
- j(not_equal, on_not_number);
- }
-}
-
-
-void MacroAssembler::ConvertToInt32(Register dst,
- Register source,
- Register scratch,
- TypeInfo info,
- Label* on_not_int32) {
- if (FLAG_debug_code) {
- AbortIfSmi(source);
- AbortIfNotNumber(source);
- }
- if (info.IsInteger32()) {
- cvttsd2si(dst, FieldOperand(source, HeapNumber::kValueOffset));
- } else {
- Label done;
- bool push_pop = (scratch.is(no_reg) && dst.is(source));
- ASSERT(!scratch.is(source));
- if (push_pop) {
- push(dst);
- scratch = dst;
- }
- if (scratch.is(no_reg)) scratch = dst;
- cvttsd2si(scratch, FieldOperand(source, HeapNumber::kValueOffset));
- cmp(scratch, 0x80000000u);
- if (push_pop) {
- j(not_equal, &done);
- pop(dst);
- jmp(on_not_int32);
- } else {
- j(equal, on_not_int32);
- }
-
- bind(&done);
- if (push_pop) {
- add(Operand(esp), Immediate(kPointerSize)); // Pop.
- }
- if (!scratch.is(dst)) {
- mov(dst, scratch);
- }
- }
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ mov(descriptors,
+ FieldOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
+ Label not_smi;
+ JumpIfNotSmi(descriptors, &not_smi);
+ mov(descriptors, isolate()->factory()->empty_descriptor_array());
+ bind(&not_smi);
}
@@ -1944,8 +2055,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
ASSERT_EQ(0, kSmiTag);
mov(scratch1, Operand(object1));
and_(scratch1, Operand(object2));
- test(scratch1, Immediate(kSmiTagMask));
- j(zero, failure);
+ JumpIfSmi(scratch1, failure);
// Load instance type for both strings.
mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
@@ -1968,14 +2078,14 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
- int frameAlignment = OS::ActivationFrameAlignment();
- if (frameAlignment != 0) {
+ int frame_alignment = OS::ActivationFrameAlignment();
+ if (frame_alignment != 0) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
mov(scratch, esp);
sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frameAlignment));
- and_(esp, -frameAlignment);
+ ASSERT(IsPowerOf2(frame_alignment));
+ and_(esp, -frame_alignment);
mov(Operand(esp, num_arguments * kPointerSize), scratch);
} else {
sub(Operand(esp), Immediate(num_arguments * kPointerSize));
@@ -1994,7 +2104,7 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function,
int num_arguments) {
// Check stack alignment.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
CheckStackAlignment();
}
@@ -2002,13 +2112,15 @@ void MacroAssembler::CallCFunction(Register function,
if (OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));
} else {
- add(Operand(esp), Immediate(num_arguments * sizeof(int32_t)));
+ add(Operand(esp), Immediate(num_arguments * kPointerSize));
}
}
CodePatcher::CodePatcher(byte* address, int size)
- : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+ : address_(address),
+ size_(size),
+ masm_(Isolate::Current(), address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 580aad1a5..a638517da 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,7 @@
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
#include "assembler.h"
-#include "type-info.h"
+#include "v8globals.h"
namespace v8 {
namespace internal {
@@ -45,18 +45,19 @@ enum AllocationFlags {
RESULT_CONTAINS_TOP = 1 << 1
};
+
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
-// Forward declaration.
-class JumpTarget;
-class PostCallGenerator;
-
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- MacroAssembler(void* buffer, int size);
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
// ---------------------------------------------------------------------------
// GC Support
@@ -70,11 +71,11 @@ class MacroAssembler: public Assembler {
// Check if object is in new space.
// scratch can be object itself, but it will be clobbered.
- template <typename LabelType>
void InNewSpace(Register object,
Register scratch,
Condition cc, // equal for new space, not_equal otherwise.
- LabelType* branch);
+ Label* branch,
+ Label::Distance branch_near = Label::kFar);
// For page containing |object| mark region covering [object+offset]
// dirty. |object| is the object being stored into, |value| is the
@@ -152,37 +153,46 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
+ // Setup call kind marking in ecx. The method takes ecx as an
+ // explicit first parameter to make the code more readable at the
+ // call sites.
+ void SetCallKind(Register dst, CallKind kind);
+
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper = NullCallWrapper());
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -194,6 +204,11 @@ class MacroAssembler: public Assembler {
void Set(Register dst, const Immediate& x);
void Set(const Operand& dst, const Immediate& x);
+ // Support for constant splitting.
+ bool IsUnsafeImmediate(const Immediate& x);
+ void SafeSet(Register dst, const Immediate& x);
+ void SafePush(const Immediate& x);
+
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
@@ -201,13 +216,27 @@ class MacroAssembler: public Assembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
- // Check if the map of an object is equal to a specified map and
- // branch to label if not. Skip the smi check if not required
- // (object is known to be a heap object)
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object)
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- bool is_heap_object);
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
// Check if the object in register heap_object is a string. Afterwards the
// register map contains the object map and the register instance_type
@@ -234,6 +263,13 @@ class MacroAssembler: public Assembler {
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
+ void ClampUint8(Register reg);
+
+ void ClampDoubleToUint8(XMMRegister input_reg,
+ XMMRegister scratch_reg,
+ Register result_reg);
+
+
// Smi tagging support.
void SmiTag(Register reg) {
ASSERT(kSmiTag == 0);
@@ -245,16 +281,6 @@ class MacroAssembler: public Assembler {
}
// Modifies the register even if it does not contain a Smi!
- void SmiUntag(Register reg, TypeInfo info, Label* non_smi) {
- ASSERT(kSmiTagSize == 1);
- sar(reg, kSmiTagSize);
- if (info.IsSmi()) {
- ASSERT(kSmiTag == 0);
- j(carry, non_smi);
- }
- }
-
- // Modifies the register even if it does not contain a Smi!
void SmiUntag(Register reg, Label* is_smi) {
ASSERT(kSmiTagSize == 1);
sar(reg, kSmiTagSize);
@@ -263,26 +289,28 @@ class MacroAssembler: public Assembler {
}
// Jump the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label) {
+ inline void JumpIfSmi(Register value,
+ Label* smi_label,
+ Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
- j(zero, smi_label, not_taken);
+ j(zero, smi_label, distance);
+ }
+ // Jump if the operand is a smi.
+ inline void JumpIfSmi(Operand value,
+ Label* smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(zero, smi_label, distance);
}
// Jump if register contain a non-smi.
- inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+ inline void JumpIfNotSmi(Register value,
+ Label* not_smi_label,
+ Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
- j(not_zero, not_smi_label, not_taken);
+ j(not_zero, not_smi_label, distance);
}
- // Assumes input is a heap object.
- void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
-
- // Assumes input is a heap number. Jumps on things out of range. Also jumps
- // on the min negative int32. Ignores frational parts.
- void ConvertToInt32(Register dst,
- Register src, // Can be the same as dst.
- Register scratch, // Can be no_reg or dst, but not src.
- TypeInfo info,
- Label* on_not_int32);
+ void LoadInstanceDescriptors(Register map, Register descriptors);
void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
@@ -420,12 +448,6 @@ class MacroAssembler: public Assembler {
// Check if result is zero and op is negative.
void NegativeZeroTest(Register result, Register op, Label* then_label);
- // Check if result is zero and op is negative in code using jump targets.
- void NegativeZeroTest(CodeGenerator* cgen,
- Register result,
- Register op,
- JumpTarget* then_target);
-
// Check if result is zero and any of op1 and op2 are negative.
// Register scratch is destroyed, and it must be different from op2.
void NegativeZeroTest(Register result, Register op1, Register op2,
@@ -455,7 +477,7 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub. Generate the code if necessary.
- void CallStub(CodeStub* stub);
+ void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
// Call a code stub and return the code object called. Try to generate
// the code if necessary. Do not perform a GC but instead return a retry
@@ -474,13 +496,13 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
// Call a runtime function, returning the CodeStub object called.
// Try to generate the stub code if necessary. Do not perform a GC
// but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::Function* f,
+ MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
@@ -580,7 +602,13 @@ class MacroAssembler: public Assembler {
void Move(Register target, Handle<Object> value);
- Handle<Object> CodeObject() { return code_object_; }
+ // Push a handle value.
+ void Push(Handle<Object> handle) { push(handle); }
+
+ Handle<Object> CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+ }
// ---------------------------------------------------------------------------
@@ -652,7 +680,9 @@ class MacroAssembler: public Assembler {
const Operand& code_operand,
Label* done,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ Label::Distance done_near = Label::kFar,
+ const CallWrapper& call_wrapper = NullCallWrapper(),
+ CallKind call_kind = CALL_AS_METHOD);
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -687,31 +717,6 @@ class MacroAssembler: public Assembler {
};
-template <typename LabelType>
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- LabelType* branch) {
- ASSERT(cc == equal || cc == not_equal);
- if (Serializer::enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- mov(scratch, Operand(object));
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
- cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
- j(cc, branch);
- } else {
- int32_t new_space_start = reinterpret_cast<int32_t>(
- ExternalReference::new_space_start().address());
- lea(scratch, Operand(object, -new_space_start));
- and_(scratch, Heap::NewSpaceMask());
- j(cc, branch);
- }
-}
-
-
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
@@ -732,17 +737,6 @@ class CodePatcher {
};
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
- public:
- PostCallGenerator() { }
- virtual ~PostCallGenerator() { }
- virtual void Generate() = 0;
-};
-
-
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 121344884..8db2e9b16 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -56,6 +56,7 @@ namespace internal {
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
+ * - Isolate* isolate (Address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0
* call through the runtime system)
* - stack_area_base (High end of the memory area to use as
@@ -98,7 +99,7 @@ namespace internal {
RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
Mode mode,
int registers_to_save)
- : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+ : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -304,7 +305,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// The length of a capture should not be negative. This can only happen
// if the end of the capture is unrecorded, or at a point earlier than
// the start of the capture.
- BranchOrBacktrack(less, on_no_match, not_taken);
+ BranchOrBacktrack(less, on_no_match);
// If length is zero, either the capture is empty or it is completely
// uncaptured. In either case succeed immediately.
@@ -347,7 +348,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ add(Operand(edi), Immediate(1));
// Compare to end of match, and loop if not done.
__ cmp(edi, Operand(ebx));
- __ j(below, &loop, taken);
+ __ j(below, &loop);
__ jmp(&success);
__ bind(&fail);
@@ -371,14 +372,18 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ push(backtrack_stackpointer());
__ push(ebx);
- static const int argument_count = 3;
+ static const int argument_count = 4;
__ PrepareCallCFunction(argument_count, ecx);
// Put arguments into allocated stack area, last argument highest on stack.
// Parameters are
// Address byte_offset1 - Address captured substring's start.
// Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!)
+ // Isolate* isolate
+ // Set isolate.
+ __ mov(Operand(esp, 3 * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
// Set byte_length.
__ mov(Operand(esp, 2 * kPointerSize), ebx);
// Set byte_offset2.
@@ -392,7 +397,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ mov(Operand(esp, 0 * kPointerSize), edx);
ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16();
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
__ CallCFunction(compare, argument_count);
// Pop original values before reacting on result value.
__ pop(ebx);
@@ -657,7 +662,7 @@ void RegExpMacroAssemblerIA32::Fail() {
}
-Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
+Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Finalize code - write the entry point code now we know how many
// registers we need.
@@ -678,15 +683,15 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label stack_ok;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
+ ExternalReference::address_of_stack_limit(masm_->isolate());
__ mov(ecx, esp);
__ sub(ecx, Operand::StaticVariable(stack_limit));
// Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit, not_taken);
+ __ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
__ cmp(ecx, num_registers_ * kPointerSize);
- __ j(above_equal, &stack_ok, taken);
+ __ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(eax, EXCEPTION);
@@ -837,12 +842,15 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(edi);
// Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 2;
+ static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, ebx);
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address()));
__ lea(eax, Operand(ebp, kStackHighEnd));
__ mov(Operand(esp, 1 * kPointerSize), eax);
__ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
- ExternalReference grow_stack = ExternalReference::re_grow_stack();
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(masm_->isolate());
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
@@ -866,11 +874,12 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(&code_desc);
- Handle<Code> code = Factory::NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(RegExpCodeCreateEvent(*code, *source));
- return Handle<Object>::cast(code);
+ Handle<Code> code =
+ masm_->isolate()->factory()->NewCode(code_desc,
+ Code::ComputeFlags(Code::REGEXP),
+ masm_->CodeObject());
+ PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
}
@@ -962,9 +971,9 @@ void RegExpMacroAssemblerIA32::ReadStackPointerFromRegister(int reg) {
}
void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by) {
- NearLabel after_position;
+ Label after_position;
__ cmp(edi, -by * char_size());
- __ j(greater_equal, &after_position);
+ __ j(greater_equal, &after_position, Label::kNear);
__ mov(edi, -by * char_size());
// On RegExp code entry (where this operation is used), the character before
// the current position is expected to be already loaded.
@@ -1024,7 +1033,7 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
__ lea(eax, Operand(esp, -kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), eax);
ExternalReference check_stack_guard =
- ExternalReference::re_check_stack_guard_state();
+ ExternalReference::re_check_stack_guard_state(masm_->isolate());
__ CallCFunction(check_stack_guard, num_arguments);
}
@@ -1039,8 +1048,10 @@ static T& frame_entry(Address re_frame, int frame_offset) {
int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
- if (StackGuard::IsStackOverflow()) {
- Top::StackOverflow();
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ ASSERT(isolate == Isolate::Current());
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
return EXCEPTION;
}
@@ -1131,8 +1142,7 @@ void RegExpMacroAssemblerIA32::CheckPosition(int cp_offset,
void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
- Label* to,
- Hint hint) {
+ Label* to) {
if (condition < 0) { // No condition
if (to == NULL) {
Backtrack();
@@ -1142,10 +1152,10 @@ void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
return;
}
if (to == NULL) {
- __ j(condition, &backtrack_label_, hint);
+ __ j(condition, &backtrack_label_);
return;
}
- __ j(condition, to, hint);
+ __ j(condition, to);
}
@@ -1196,9 +1206,9 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
// Check for preemption.
Label no_preempt;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
+ ExternalReference::address_of_stack_limit(masm_->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above, &no_preempt, taken);
+ __ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
@@ -1209,7 +1219,7 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
void RegExpMacroAssemblerIA32::CheckStackLimit() {
Label no_stack_overflow;
ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit();
+ ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
__ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
__ j(above, &no_stack_overflow);
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index 51e2cb01a..21c86d050 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -80,7 +80,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
virtual void Fail();
- virtual Handle<Object> GetCode(Handle<String> source);
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
@@ -126,6 +126,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kRegisterOutput = kInputEnd + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
// the frame in GetCode.
@@ -167,7 +168,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// Equivalent to a conditional branch to the label, unless the label
// is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to, Hint hint = no_hint);
+ void BranchOrBacktrack(Condition condition, Label* to);
// Call and return internally in the generated code in a way that
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
diff --git a/deps/v8/src/ia32/register-allocator-ia32-inl.h b/deps/v8/src/ia32/register-allocator-ia32-inl.h
deleted file mode 100644
index 99ae6ebcb..000000000
--- a/deps/v8/src/ia32/register-allocator-ia32-inl.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
-#define V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- // The code for this test relies on the order of register codes.
- return reg.code() >= esp.code() && reg.code() <= esi.code();
-}
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers. The mapping is:
-
-// eax <-> 0, ebx <-> 1, ecx <-> 2, edx <-> 3, edi <-> 4.
-
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // eax
- 2, // ecx
- 3, // edx
- 1, // ebx
- -1, // esp
- -1, // ebp
- -1, // esi
- 4 // edi
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] = { eax, ebx, ecx, edx, edi };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The non-reserved edi register is live on JS function entry.
- Use(edi); // JS function.
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
diff --git a/deps/v8/src/ia32/register-allocator-ia32.cc b/deps/v8/src/ia32/register-allocator-ia32.cc
deleted file mode 100644
index d840c0cc5..000000000
--- a/deps/v8/src/ia32/register-allocator-ia32.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
- ASSERT(is_valid());
- if (is_constant()) {
- Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- if (is_untagged_int32()) {
- fresh.set_untagged_int32(true);
- if (handle()->IsSmi()) {
- CodeGeneratorScope::Current()->masm()->Set(
- fresh.reg(),
- Immediate(Smi::cast(*handle())->value()));
- } else if (handle()->IsHeapNumber()) {
- double double_value = HeapNumber::cast(*handle())->value();
- int32_t value = DoubleToInt32(double_value);
- if (double_value == 0 && signbit(double_value)) {
- // Negative zero must not be converted to an int32 unless
- // the context allows it.
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
- } else if (double_value == value) {
- CodeGeneratorScope::Current()->masm()->Set(
- fresh.reg(), Immediate(value));
- } else {
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
- }
- } else {
- // Constant is not a number. This was not predicted by AST analysis.
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
- }
- } else if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
- CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
- } else {
- CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
- Immediate(handle()));
- }
- // This result becomes a copy of the fresh one.
- fresh.set_type_info(type_info());
- *this = fresh;
- }
- ASSERT(is_register());
-}
-
-
-void Result::ToRegister(Register target) {
- ASSERT(is_valid());
- if (!is_register() || !reg().is(target)) {
- Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(target);
- ASSERT(fresh.is_valid());
- if (is_register()) {
- CodeGeneratorScope::Current()->masm()->mov(fresh.reg(), reg());
- } else {
- ASSERT(is_constant());
- if (is_untagged_int32()) {
- if (handle()->IsSmi()) {
- CodeGeneratorScope::Current()->masm()->Set(
- fresh.reg(),
- Immediate(Smi::cast(*handle())->value()));
- } else {
- ASSERT(handle()->IsHeapNumber());
- double double_value = HeapNumber::cast(*handle())->value();
- int32_t value = DoubleToInt32(double_value);
- if (double_value == 0 && signbit(double_value)) {
- // Negative zero must not be converted to an int32 unless
- // the context allows it.
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
- } else if (double_value == value) {
- CodeGeneratorScope::Current()->masm()->Set(
- fresh.reg(), Immediate(value));
- } else {
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
- CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
- }
- }
- } else {
- if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
- CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
- } else {
- CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
- Immediate(handle()));
- }
- }
- }
- fresh.set_type_info(type_info());
- fresh.set_untagged_int32(is_untagged_int32());
- *this = fresh;
- } else if (is_register() && reg().is(target)) {
- ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
- CodeGeneratorScope::Current()->frame()->Spill(target);
- ASSERT(CodeGeneratorScope::Current()->allocator()->count(target) == 1);
- }
- ASSERT(is_register());
- ASSERT(reg().is(target));
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- Result result = AllocateWithoutSpilling();
- // Check that the register is a byte register. If not, unuse the
- // register if valid and return an invalid result.
- if (result.is_valid() && !result.reg().is_byte_register()) {
- result.Unuse();
- return Result();
- }
- return result;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h
index 43b7ea3b0..13ddf35ca 100644
--- a/deps/v8/src/ia32/simulator-ia32.h
+++ b/deps/v8/src/ia32/simulator-ia32.h
@@ -40,12 +40,12 @@ namespace internal {
typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, Address, int);
+ const byte*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6))
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
@@ -56,7 +56,9 @@ typedef int (*regexp_matcher)(String*, int, const byte*,
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
return c_limit;
}
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 633097af6..3bce00076 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "stub-cache.h"
namespace v8 {
@@ -39,14 +39,15 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void ProbeTable(MacroAssembler* masm,
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register name,
Register offset,
Register extra) {
- ExternalReference key_offset(SCTableReference::keyReference(table));
- ExternalReference value_offset(SCTableReference::valueReference(table));
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
Label miss;
@@ -56,7 +57,7 @@ static void ProbeTable(MacroAssembler* masm,
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
@@ -75,7 +76,7 @@ static void ProbeTable(MacroAssembler* masm,
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Get the code entry from the cache.
__ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
@@ -106,17 +107,17 @@ static void ProbeTable(MacroAssembler* masm,
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register r0,
- Register r1) {
+static MaybeObject* GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ String* name,
+ Register r0,
+ Register r1) {
ASSERT(name->IsSymbol());
- __ IncrementCounter(&Counters::negative_lookups, 1);
- __ IncrementCounter(&Counters::negative_lookups_miss, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1);
- Label done;
__ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
@@ -125,11 +126,11 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
// Bail out if the receiver has a named interceptor or requires access checks.
__ test_b(FieldOperand(r0, Map::kBitFieldOffset),
kInterceptorOrAccessCheckNeededMask);
- __ j(not_zero, miss_label, not_taken);
+ __ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
- __ j(below, miss_label, not_taken);
+ __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
+ __ j(below, miss_label);
// Load properties array.
Register properties = r0;
@@ -137,67 +138,23 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
// Check that the properties array is a dictionary.
__ cmp(FieldOperand(properties, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
+ Immediate(masm->isolate()->factory()->hash_table_map()));
__ j(not_equal, miss_label);
- // Compute the capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- // Generate an unrolled loop that performs a few probes before
- // giving up.
- static const int kProbes = 4;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = 0; i < kProbes; i++) {
- // r0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r1;
- // Capacity is smi 2^n.
- __ mov(index, FieldOperand(properties, kCapacityOffset));
- __ dec(index);
- __ and_(Operand(index),
- Immediate(Smi::FromInt(name->Hash() +
- StringDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
-
- Register entity_name = r1;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ cmp(entity_name, Factory::undefined_value());
- if (i != kProbes - 1) {
- __ j(equal, &done, taken);
-
- // Stop if found the property.
- __ cmp(entity_name, Handle<String>(name));
- __ j(equal, miss_label, not_taken);
-
- // Check if the entry name is not a symbol.
- __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- kIsSymbolMask);
- __ j(zero, miss_label, not_taken);
- } else {
- // Give up probing if still not found the undefined value.
- __ j(not_equal, miss_label, not_taken);
- }
- }
+ Label done;
+ MaybeObject* result =
+ StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ properties,
+ name,
+ r1);
+ if (result->IsFailure()) return result;
__ bind(&done);
- __ DecrementCounter(&Counters::negative_lookups_miss, 1);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1);
+
+ return result;
}
@@ -208,6 +165,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register scratch,
Register extra,
Register extra2) {
+ Isolate* isolate = Isolate::Current();
Label miss;
USE(extra2); // The register extra2 is not used on the ia32 platform.
@@ -230,8 +188,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(extra2.is(no_reg));
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
@@ -240,7 +197,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
// Probe the primary table.
- ProbeTable(masm, flags, kPrimary, name, scratch, extra);
+ ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
@@ -252,7 +209,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
// Probe the secondary table.
- ProbeTable(masm, flags, kSecondary, name, scratch, extra);
+ ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
@@ -274,10 +231,11 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
// Check we're still in the same context.
__ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
- Top::global());
+ masm->isolate()->global());
__ j(not_equal, miss);
// Get the global function with the given index.
- JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
+ JSFunction* function =
+ JSFunction::cast(masm->isolate()->global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
__ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
// Load the prototype from the initial map.
@@ -290,12 +248,11 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss_label, not_taken);
+ __ JumpIfSmi(receiver, miss_label);
// Check that the object is a JS array.
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, miss_label, not_taken);
+ __ j(not_equal, miss_label);
// Load length directly from the JS array.
__ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
@@ -311,15 +268,14 @@ static void GenerateStringCheck(MacroAssembler* masm,
Label* smi,
Label* non_string_object) {
// Check that the object isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, smi, not_taken);
+ __ JumpIfSmi(receiver, smi);
// Check that the object is a string.
__ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
ASSERT(kNotStringTag != 0);
__ test(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object, not_taken);
+ __ j(not_zero, non_string_object);
}
@@ -344,7 +300,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// Check if the object is a JSValue wrapper.
__ bind(&check_wrapper);
__ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
// Check if the wrapped value is a string and load the length
// directly if it is.
@@ -395,7 +351,7 @@ static void PushInterceptorArguments(MacroAssembler* masm,
JSObject* holder_obj) {
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!Heap::InNewSpace(interceptor));
+ ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
Register scratch = name;
__ mov(scratch, Immediate(Handle<Object>(interceptor)));
__ push(scratch);
@@ -412,8 +368,9 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
JSObject* holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly)),
- 5);
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
+ masm->isolate()),
+ 5);
}
@@ -480,7 +437,7 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
__ mov(Operand(esp, 2 * kPointerSize), edi);
Object* call_data = optimization.api_call_info()->data();
Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (Heap::InNewSpace(call_data)) {
+ if (masm->isolate()->heap()->InNewSpace(call_data)) {
__ mov(ecx, api_call_info_handle);
__ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
__ mov(Operand(esp, 3 * kPointerSize), ebx);
@@ -528,10 +485,12 @@ class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
const ParameterCount& arguments,
- Register name)
+ Register name,
+ Code::ExtraICState extra_ic_state)
: stub_compiler_(stub_compiler),
arguments_(arguments),
- name_(name) {}
+ name_(name),
+ extra_ic_state_(extra_ic_state) {}
MaybeObject* Compile(MacroAssembler* masm,
JSObject* object,
@@ -547,8 +506,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup);
@@ -574,7 +532,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name,
holder,
miss);
- return Heap::undefined_value(); // Success.
+ return masm->isolate()->heap()->undefined_value(); // Success.
}
}
@@ -610,10 +568,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
(depth2 != kInvalidProtoDepth);
}
- __ IncrementCounter(&Counters::call_const_interceptor, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->call_const_interceptor(), 1);
if (can_do_fast_api_call) {
- __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1);
+ __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
ReserveSpaceForFastApiCall(masm, scratch1);
}
@@ -655,8 +614,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
GenerateFastApiCall(masm, optimization, arguments_.immediate());
if (result->IsFailure()) return result;
} else {
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
__ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION);
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -672,7 +634,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
FreeSpaceForFastApiCall(masm, scratch1);
}
- return Heap::undefined_value(); // Success.
+ return masm->isolate()->heap()->undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
@@ -700,9 +662,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
interceptor_holder);
__ CallExternalReference(
- ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
- 5);
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
+ masm->isolate()),
+ 5);
// Restore the name_ register.
__ pop(name_);
@@ -728,13 +690,14 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ pop(receiver); // Restore the holder.
__ LeaveInternalFrame();
- __ cmp(eax, Factory::no_interceptor_result_sentinel());
+ __ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel());
__ j(not_equal, interceptor_succeeded);
}
StubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
+ Code::ExtraICState extra_ic_state_;
};
@@ -742,9 +705,9 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
Code* code = NULL;
if (kind == Code::LOAD_IC) {
- code = Builtins::builtin(Builtins::LoadIC_Miss);
+ code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
} else {
- code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+ code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
}
Handle<Code> ic(code);
@@ -752,6 +715,14 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
}
+void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
+ Code* code = masm->isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_MissForceGeneric);
+ Handle<Code> ic(code);
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+}
+
+
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
@@ -763,13 +734,12 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
// Check that the object isn't a smi.
- __ test(receiver_reg, Immediate(kSmiTagMask));
- __ j(zero, miss_label, not_taken);
+ __ JumpIfSmi(receiver_reg, miss_label);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
- __ j(not_equal, miss_label, not_taken);
+ __ j(not_equal, miss_label);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -790,7 +760,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ push(eax);
__ push(scratch);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1);
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3,
+ 1);
return;
}
@@ -851,12 +824,12 @@ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
if (Serializer::enabled()) {
__ mov(scratch, Immediate(Handle<Object>(cell)));
__ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Immediate(Factory::the_hole_value()));
+ Immediate(masm->isolate()->factory()->the_hole_value()));
} else {
__ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
- Immediate(Factory::the_hole_value()));
+ Immediate(masm->isolate()->factory()->the_hole_value()));
}
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
return cell;
}
@@ -906,6 +879,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
&& !scratch2.is(scratch1));
+
// Keep track of the current object in register reg.
Register reg = object_reg;
JSObject* current = object;
@@ -930,7 +904,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = Heap::LookupSymbol(name);
+ MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
Object* lookup_result = NULL; // Initialization to please compiler.
if (!maybe_lookup_result->ToObject(&lookup_result)) {
set_failure(Failure::cast(maybe_lookup_result));
@@ -941,21 +915,26 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
ASSERT(current->property_dictionary()->FindEntry(name) ==
StringDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
+ MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch1,
+ scratch2);
+ if (negative_lookup->IsFailure()) {
+ set_failure(Failure::cast(negative_lookup));
+ return reg;
+ }
+
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
__ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (Heap::InNewSpace(prototype)) {
+ } else if (heap()->InNewSpace(prototype)) {
// Get the map of the current object.
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
__ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
// Branch on the result of the map check.
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
// Check access rights to the global object. This has to happen
// after the map check so that we know that the object is
// actually a global object.
@@ -975,7 +954,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
Immediate(Handle<Map>(current->map())));
// Branch on the result of the map check.
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
// Check access rights to the global object. This has to happen
// after the map check so that we know that the object is
// actually a global object.
@@ -997,12 +976,12 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
ASSERT(current == holder);
// Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
// Check the holder map.
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
Immediate(Handle<Map>(holder->map())));
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1036,8 +1015,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ JumpIfSmi(receiver, miss);
// Check the prototype chain.
Register reg =
@@ -1061,8 +1039,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1080,7 +1057,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
ASSERT(!scratch2.is(reg));
__ push(reg); // holder
// Push data from AccessorInfo.
- if (Heap::InNewSpace(callback_handle->data())) {
+ if (isolate()->heap()->InNewSpace(callback_handle->data())) {
__ mov(scratch1, Immediate(callback_handle));
__ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));
} else {
@@ -1128,8 +1105,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
CheckPrototypes(object, receiver, holder,
@@ -1155,8 +1131,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ JumpIfSmi(receiver, miss);
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
@@ -1204,7 +1179,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
- __ cmp(eax, Factory::no_interceptor_result_sentinel());
+ __ cmp(eax, factory()->no_interceptor_result_sentinel());
__ j(equal, &interceptor_failed);
__ LeaveInternalFrame();
__ ret(0);
@@ -1259,7 +1234,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
__ push(scratch2); // restore return address
ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
+ masm()->isolate());
__ TailCallExternalReference(ref, 5, 1);
}
} else { // !compile_followup_inline
@@ -1273,8 +1249,9 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
name_reg, interceptor_holder);
__ push(scratch2); // restore old return address
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
+ isolate());
__ TailCallExternalReference(ref, 5, 1);
}
}
@@ -1283,7 +1260,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ cmp(Operand(ecx), Immediate(Handle<String>(name)));
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
}
}
@@ -1304,8 +1281,7 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ JumpIfSmi(edx, miss);
}
// Check that the maps haven't changed.
@@ -1325,31 +1301,32 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
}
// Check that the cell contains the same function.
- if (Heap::InNewSpace(function)) {
+ if (isolate()->heap()->InNewSpace(function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ JumpIfSmi(edi, miss);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
// Check the shared function info. Make sure it hasn't changed.
__ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
Immediate(Handle<SharedFunctionInfo>(function->shared())));
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
} else {
__ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
}
}
MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
- kind_);
+ MaybeObject* maybe_obj =
+ isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+ kind_,
+ extra_ic_state_);
Object* obj;
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@@ -1378,8 +1355,7 @@ MUST_USE_RESULT MaybeObject* CallStubCompiler::CompileCallField(
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ JumpIfSmi(edx, &miss);
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
@@ -1388,10 +1364,9 @@ MUST_USE_RESULT MaybeObject* CallStubCompiler::CompileCallField(
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
// Check that the function really is a function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ JumpIfSmi(edi, &miss);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -1401,14 +1376,16 @@ MUST_USE_RESULT MaybeObject* CallStubCompiler::CompileCallField(
}
// Invoke the function.
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(FIELD, name);
@@ -1429,7 +1406,9 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) {
+ return isolate()->heap()->undefined_value();
+ }
Label miss;
@@ -1440,8 +1419,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
@@ -1459,7 +1437,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(factory()->fixed_array_map()));
__ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
@@ -1489,8 +1467,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ mov(Operand(edx, 0), ecx);
// Check if value is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &with_write_barrier);
+ __ JumpIfNotSmi(ecx, &with_write_barrier);
__ bind(&exit);
__ ret((argc + 1) * kPointerSize);
@@ -1508,9 +1485,9 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
}
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
const int kAllocationDelta = 4;
// Load top.
@@ -1535,7 +1512,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// ... and fill the rest with holes.
for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize),
- Immediate(Factory::the_hole_value()));
+ Immediate(factory()->the_hole_value()));
}
// Restore receiver to edx as finish sequence assumes it's here.
@@ -1551,16 +1528,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
}
__ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
- argc + 1,
- 1);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate()),
+ argc + 1,
+ 1);
}
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1581,7 +1557,9 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) {
+ return heap()->undefined_value();
+ }
Label miss, return_undefined, call_builtin;
@@ -1592,8 +1570,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
eax, edi, name, &miss);
@@ -1603,7 +1580,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
+ Immediate(factory()->fixed_array_map()));
__ j(not_equal, &call_builtin);
// Get the array's length into ecx and calculate new length.
@@ -1617,7 +1594,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
__ mov(eax, FieldOperand(ebx,
ecx, times_half_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
+ __ cmp(Operand(eax), Immediate(factory()->the_hole_value()));
__ j(equal, &call_builtin);
// Set the array's length.
@@ -1627,23 +1604,22 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
__ mov(FieldOperand(ebx,
ecx, times_half_pointer_size,
FixedArray::kHeaderSize),
- Immediate(Factory::the_hole_value()));
+ Immediate(factory()->the_hole_value()));
__ ret((argc + 1) * kPointerSize);
__ bind(&return_undefined);
- __ mov(eax, Immediate(Factory::undefined_value()));
+ __ mov(eax, Immediate(factory()->undefined_value()));
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
- argc + 1,
- 1);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPop, isolate()),
+ argc + 1,
+ 1);
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1665,7 +1641,9 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) {
+ return isolate()->heap()->undefined_value();
+ }
const int argc = arguments().immediate();
@@ -1674,7 +1652,9 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
@@ -1697,7 +1677,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
if (argc > 0) {
__ mov(index, Operand(esp, (argc - 0) * kPointerSize));
} else {
- __ Set(index, Immediate(Factory::undefined_value()));
+ __ Set(index, Immediate(factory()->undefined_value()));
}
StringCharCodeAtGenerator char_code_at_generator(receiver,
@@ -1716,7 +1696,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ Set(eax, Immediate(Factory::nan_value()));
+ __ Set(eax, Immediate(factory()->nan_value()));
__ ret((argc + 1) * kPointerSize);
}
@@ -1724,10 +1704,8 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// Restore function name in ecx.
__ Set(ecx, Immediate(Handle<String>(name)));
__ bind(&name_miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1749,7 +1727,9 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) {
+ return heap()->undefined_value();
+ }
const int argc = arguments().immediate();
@@ -1758,7 +1738,9 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
@@ -1782,7 +1764,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
if (argc > 0) {
__ mov(index, Operand(esp, (argc - 0) * kPointerSize));
} else {
- __ Set(index, Immediate(Factory::undefined_value()));
+ __ Set(index, Immediate(factory()->undefined_value()));
}
StringCharAtGenerator char_at_generator(receiver,
@@ -1802,7 +1784,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ Set(eax, Immediate(Factory::empty_string()));
+ __ Set(eax, Immediate(factory()->empty_string()));
__ ret((argc + 1) * kPointerSize);
}
@@ -1810,10 +1792,8 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// Restore function name in ecx.
__ Set(ecx, Immediate(Handle<String>(name)));
__ bind(&name_miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1838,7 +1818,9 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) {
+ return isolate()->heap()->undefined_value();
+ }
Label miss;
GenerateNameCheck(name, &miss);
@@ -1847,8 +1829,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
&miss);
@@ -1865,8 +1846,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Check the code is a smi.
Label slow;
STATIC_ASSERT(kSmiTag == 0);
- __ test(code, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
+ __ JumpIfNotSmi(code, &slow);
// Convert the smi code to uint16.
__ and_(code, Immediate(Smi::FromInt(0xffff)));
@@ -1881,14 +1861,16 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
__ bind(&miss);
// ecx: function name.
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
@@ -1908,14 +1890,19 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(SSE2)) return Heap::undefined_value();
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ return isolate()->heap()->undefined_value();
+ }
+
CpuFeatures::Scope use_sse2(SSE2);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) {
+ return isolate()->heap()->undefined_value();
+ }
Label miss;
GenerateNameCheck(name, &miss);
@@ -1924,8 +1911,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
&miss);
@@ -1941,12 +1927,11 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// Check if the argument is a smi.
Label smi;
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &smi);
+ __ JumpIfSmi(eax, &smi);
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
- __ CheckMap(eax, Factory::heap_number_map(), &slow, true);
+ __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
__ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
// Check if the argument is strictly positive. Note this also
@@ -2008,14 +1993,13 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// ecx: function name.
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
@@ -2039,7 +2023,9 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) {
+ return isolate()->heap()->undefined_value();
+ }
Label miss;
GenerateNameCheck(name, &miss);
@@ -2048,8 +2034,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
&miss);
@@ -2065,8 +2050,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smi);
+ __ JumpIfNotSmi(eax, &not_smi);
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
@@ -2090,7 +2074,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Check if the argument is a heap number and load its exponent and
// sign into ebx.
__ bind(&not_smi);
- __ CheckMap(eax, Factory::heap_number_map(), &slow, true);
+ __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
__ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive,
@@ -2113,25 +2097,86 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// ecx: function name.
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
+MaybeObject* CallStubCompiler::CompileFastApiCall(
+ const CallOptimization& optimization,
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ ASSERT(optimization.is_simple_api_call());
+ // Bail out if object is a global object as we don't want to
+ // repatch it to global receiver.
+ if (object->IsGlobalObject()) return heap()->undefined_value();
+ if (cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSObject()) return heap()->undefined_value();
+ int depth = optimization.GetPrototypeDepthOfExpectedType(
+ JSObject::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+
+ Label miss, miss_before_stack_reserved;
+
+ GenerateNameCheck(name, &miss_before_stack_reserved);
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(edx, &miss_before_stack_reserved);
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->call_const(), 1);
+ __ IncrementCounter(counters->call_const_fast_api(), 1);
+
+ // Allocate space for v8::Arguments implicit values. Must be initialized
+ // before calling any runtime function.
+ __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
+
+ // Check that the maps haven't changed and find a Holder as a side effect.
+ CheckPrototypes(JSObject::cast(object), edx, holder,
+ ebx, eax, edi, name, depth, &miss);
+
+ // Move the return address on top of the stack.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), eax);
+
+ // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
+ // duplicate of return address and will be overwritten.
+ MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
+ if (result->IsFailure()) return result;
+
+ __ bind(&miss);
+ __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
+
+ __ bind(&miss_before_stack_reserved);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
+}
+
+
+MaybeObject* CallStubCompiler::CompileCallConstant(
+ Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2140,20 +2185,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function_info->builtin_function_id();
+ if (HasCustomCallGenerator(function)) {
MaybeObject* maybe_result = CompileCustomCall(
- id, object, holder, NULL, function, name);
+ object, holder, NULL, function, name);
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
// undefined means bail out to regular compiler.
if (!result->IsUndefined()) return result;
}
- Label miss_in_smi_check;
+ Label miss;
- GenerateNameCheck(name, &miss_in_smi_check);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -2161,43 +2204,25 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss_in_smi_check, not_taken);
+ __ JumpIfSmi(edx, &miss);
}
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- CallOptimization optimization(function);
- int depth = kInvalidProtoDepth;
- Label miss;
-
+ SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
- __ IncrementCounter(&Counters::call_const, 1);
-
- if (optimization.is_simple_api_call() && !object->IsGlobalObject()) {
- depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- }
-
- if (depth != kInvalidProtoDepth) {
- __ IncrementCounter(&Counters::call_const_fast_api, 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before to call any runtime function.
- __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
- }
+ __ IncrementCounter(isolate()->counters()->call_const(), 1);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, edi, name, depth, &miss);
+ ebx, eax, edi, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
if (object->IsGlobalObject()) {
- ASSERT(depth == kInvalidProtoDepth);
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
}
@@ -2211,7 +2236,7 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
} else {
// Check that the object is a string or a symbol.
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
- __ j(above_equal, &miss, not_taken);
+ __ j(above_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
@@ -2228,10 +2253,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
} else {
Label fast;
// Check that the object is a smi or a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &fast, taken);
+ __ JumpIfSmi(edx, &fast);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
@@ -2250,10 +2274,10 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
} else {
Label fast;
// Check that the object is a boolean.
- __ cmp(edx, Factory::true_value());
- __ j(equal, &fast, taken);
- __ cmp(edx, Factory::false_value());
- __ j(not_equal, &miss, not_taken);
+ __ cmp(edx, factory()->true_value());
+ __ j(equal, &fast);
+ __ cmp(edx, factory()->false_value());
+ __ j(not_equal, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
@@ -2268,29 +2292,16 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- if (depth != kInvalidProtoDepth) {
- // Move the return address on top of the stack.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
-
- // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
- // duplicate of return address and will be overwritten.
- MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
- } else {
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
- }
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
- if (depth != kInvalidProtoDepth) {
- __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
- }
- __ bind(&miss_in_smi_check);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -2320,7 +2331,7 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), ecx);
+ CallInterceptorCompiler compiler(this, arguments(), ecx, extra_ic_state_);
MaybeObject* result = compiler.Compile(masm(),
object,
holder,
@@ -2337,10 +2348,9 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the function really is a function.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ JumpIfSmi(eax, &miss);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2351,25 +2361,28 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Invoke the function.
__ mov(edi, eax);
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle load cache miss.
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(INTERCEPTOR, name);
}
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
+MaybeObject* CallStubCompiler::CompileCallGlobal(
+ JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2378,11 +2391,9 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function_info->builtin_function_id();
+ if (HasCustomCallGenerator(function)) {
MaybeObject* maybe_result = CompileCustomCall(
- id, object, holder, cell, function, name);
+ object, holder, cell, function, name);
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
// undefined means bail out to regular compiler.
@@ -2410,28 +2421,32 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- __ IncrementCounter(&Counters::call_global_inline, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->call_global_inline(), 1);
ASSERT(function->is_compiled());
ParameterCount expected(function->shared()->formal_parameter_count());
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
if (V8::UseCrankshaft()) {
// TODO(kasperl): For now, we always call indirectly through the
// code field in the function to allow recompilation to take effect
// without changing any of the call sites.
__ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, arguments(), JUMP_FUNCTION);
+ expected, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
} else {
Handle<Code> code(function->code());
__ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
}
// Handle call cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::call_global_inline_miss, 1);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ __ IncrementCounter(counters->call_global_inline_miss(), 1);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(NORMAL, name);
@@ -2461,7 +2476,7 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
__ mov(ecx, Immediate(Handle<String>(name))); // restore name
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2481,13 +2496,12 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
Label miss;
// Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ JumpIfSmi(edx, &miss);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -2507,12 +2521,12 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2531,13 +2545,12 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
Label miss;
// Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ JumpIfSmi(edx, &miss);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(receiver->map())));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
@@ -2557,12 +2570,12 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2584,7 +2597,7 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Check that the map of the global has not changed.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Compute the cell operand to use.
@@ -2598,20 +2611,21 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
- __ cmp(cell_operand, Factory::the_hole_value());
+ __ cmp(cell_operand, factory()->the_hole_value());
__ j(equal, &miss);
// Store the value in the cell.
__ mov(cell_operand, eax);
// Return the value (register eax).
- __ IncrementCounter(&Counters::named_store_global_inline, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_store_global_inline(), 1);
__ ret(0);
// Handle store cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2631,11 +2645,12 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_store_field, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_store_field(), 1);
// Check that the name has not changed.
__ cmp(Operand(ecx), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Generate store field code. Trashes the name register.
GenerateStoreField(masm(),
@@ -2647,8 +2662,8 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_store_field, 1);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ DecrementCounter(counters->keyed_store_field(), 1);
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2656,56 +2671,22 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
- JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- // Check that the map matches.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(receiver->map())));
- __ j(not_equal, &miss, not_taken);
-
- // Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, not_taken);
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- __ j(not_equal, &miss, not_taken);
-
- // Check that the key is within bounds.
- if (receiver->IsJSArray()) {
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &miss, not_taken);
- } else {
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &miss, not_taken);
- }
-
- // Do the store and update the write barrier. Make sure to preserve
- // the value in register eax.
- __ mov(edx, Operand(eax));
- __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
- __ RecordWrite(edi, 0, edx, ecx);
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ Code* stub;
+ MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ __ DispatchMap(edx,
+ Handle<Map>(receiver_map),
+ Handle<Code>(stub),
+ DO_SMI_CHECK);
+
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2713,8 +2694,9 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
}
-MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
- JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -2722,30 +2704,22 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
// -- esp[0] : return address
// -----------------------------------
Label miss;
+ __ JumpIfSmi(edx, &miss);
- // Check that the map matches.
- __ CheckMap(edx, Handle<Map>(receiver->map()), &miss, false);
-
- // Do the load.
- GenerateFastPixelArrayStore(masm(),
- edx,
- ecx,
- eax,
- edi,
- ebx,
- true,
- &miss,
- &miss,
- NULL,
- &miss);
-
- // Handle store cache miss.
+ Register map_reg = ebx;
+ __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
+ int receiver_count = receiver_maps->length();
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map(receiver_maps->at(current));
+ __ cmp(map_reg, map);
+ __ j(equal, Handle<Code>(handler_ics->at(current)));
+ }
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
}
@@ -2760,8 +2734,7 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
Label miss;
// Check that the receiver isn't a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ JumpIfSmi(eax, &miss);
ASSERT(last->IsGlobalObject() || last->HasFastProperties());
@@ -2786,14 +2759,14 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
- __ mov(eax, Factory::undefined_value());
+ __ mov(eax, isolate()->factory()->undefined_value());
__ ret(0);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, Heap::empty_string());
+ return GetCode(NONEXISTENT, isolate()->heap()->empty_string());
}
@@ -2913,8 +2886,7 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ JumpIfSmi(eax, &miss);
}
// Check that the maps haven't changed.
@@ -2930,19 +2902,20 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
- __ cmp(ebx, Factory::the_hole_value());
- __ j(equal, &miss, not_taken);
+ __ cmp(ebx, factory()->the_hole_value());
+ __ j(equal, &miss);
} else if (FLAG_debug_code) {
- __ cmp(ebx, Factory::the_hole_value());
+ __ cmp(ebx, factory()->the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- __ IncrementCounter(&Counters::named_load_global_stub, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1);
__ mov(eax, ebx);
__ ret(0);
__ bind(&miss);
- __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
+ __ IncrementCounter(counters->named_load_global_stub_miss(), 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
@@ -2961,16 +2934,17 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_field, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_field(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_field, 1);
+ __ DecrementCounter(counters->keyed_load_field(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2990,11 +2964,12 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_callback, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_callback(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
ecx, edi, callback, name, &miss);
@@ -3005,7 +2980,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_callback, 1);
+ __ DecrementCounter(counters->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -3024,16 +2999,17 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_constant_function(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
value, name, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
+ __ DecrementCounter(counters->keyed_load_constant_function(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -3051,11 +3027,12 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_interceptor(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
LookupResult lookup;
LookupPostInterceptor(holder, name, &lookup);
@@ -3070,7 +3047,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
name,
&miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
+ __ DecrementCounter(counters->keyed_load_interceptor(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -3086,15 +3063,16 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_array_length, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_array_length(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
GenerateLoadArrayLength(masm(), edx, ecx, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_array_length, 1);
+ __ DecrementCounter(counters->keyed_load_array_length(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -3110,15 +3088,16 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_string_length(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+ __ DecrementCounter(counters->keyed_load_string_length(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -3134,15 +3113,16 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
+ __ DecrementCounter(counters->keyed_load_function_prototype(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -3150,44 +3130,20 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss;
+ Code* stub;
+ MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ __ DispatchMap(edx,
+ Handle<Map>(receiver_map),
+ Handle<Code>(stub),
+ DO_SMI_CHECK);
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- // Check that the map matches.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(receiver->map())));
- __ j(not_equal, &miss, not_taken);
-
- // Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, not_taken);
-
- // Get the elements array.
- __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(ecx);
-
- // Check that the key is within bounds.
- __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss, not_taken);
-
- // Load the result and make sure it's not the hole.
- __ mov(ebx, Operand(ecx, eax, times_2,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(ebx, Factory::the_hole_value());
- __ j(equal, &miss, not_taken);
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -3195,34 +3151,31 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
+ __ JumpIfSmi(edx, &miss);
- // Check that the map matches.
- __ CheckMap(edx, Handle<Map>(receiver->map()), &miss, false);
-
- GenerateFastPixelArrayLoad(masm(),
- edx,
- eax,
- ecx,
- ebx,
- eax,
- &miss,
- &miss,
- &miss);
+ Register map_reg = ebx;
+ __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
+ int receiver_count = receiver_maps->length();
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map(receiver_maps->at(current));
+ __ cmp(map_reg, map);
+ __ j(equal, Handle<Code>(handler_ics->at(current)));
+ }
- // Handle load cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss));
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
}
@@ -3242,15 +3195,14 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// code for the function thereby hitting the break points.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(ebx, Factory::undefined_value());
- __ j(not_equal, &generic_stub_call, not_taken);
+ __ cmp(ebx, factory()->undefined_value());
+ __ j(not_equal, &generic_stub_call);
#endif
// Load the initial map and verify that it is in fact a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &generic_stub_call);
+ __ JumpIfSmi(ebx, &generic_stub_call);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
__ j(not_equal, &generic_stub_call);
@@ -3279,7 +3231,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// ebx: initial map
// edx: JSObject (untagged)
__ mov(Operand(edx, JSObject::kMapOffset), ebx);
- __ mov(ebx, Factory::empty_fixed_array());
+ __ mov(ebx, factory()->empty_fixed_array());
__ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
__ mov(Operand(edx, JSObject::kElementsOffset), ebx);
@@ -3296,7 +3248,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
__ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
// Use edi for holding undefined which is used in several places below.
- __ mov(edi, Factory::undefined_value());
+ __ mov(edi, factory()->undefined_value());
// eax: argc
// ecx: first argument
@@ -3348,15 +3300,16 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
__ pop(ecx);
__ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
__ push(ecx);
- __ IncrementCounter(&Counters::constructed_objects, 1);
- __ IncrementCounter(&Counters::constructed_objects_stub, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->constructed_objects(), 1);
+ __ IncrementCounter(counters->constructed_objects_stub(), 1);
__ ret(0);
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
- Handle<Code> generic_construct_stub(code);
+ Handle<Code> generic_construct_stub =
+ isolate()->builtins()->JSConstructStubGeneric();
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -3364,75 +3317,59 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
}
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
- ExternalArrayType array_type, Code::Flags flags) {
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+ MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label slow, failed_allocation;
+ Label miss_force_generic, failed_allocation, slow;
- // Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
-
- // Get the map of the receiver.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
- __ j(not_zero, &slow, not_taken);
-
- __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
- __ j(not_equal, &slow, not_taken);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- Handle<Map> map(Heap::MapForExternalArrayType(array_type));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(map));
- __ j(not_equal, &slow, not_taken);
+ __ JumpIfNotSmi(eax, &miss_force_generic);
- // eax: key, known to be a smi.
- // edx: receiver, known to be a JSObject.
- // ebx: elements object, known to be an external array.
// Check that the index is in range.
__ mov(ecx, eax);
__ SmiUntag(ecx); // Untag the index.
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
+ __ j(above_equal, &miss_force_generic);
__ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
// ebx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- __ movsx_b(ecx, Operand(ebx, ecx, times_1, 0));
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ __ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
break;
- case kExternalUnsignedByteArray:
- __ movzx_b(ecx, Operand(ebx, ecx, times_1, 0));
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ __ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
break;
- case kExternalShortArray:
- __ movsx_w(ecx, Operand(ebx, ecx, times_2, 0));
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ __ movsx_w(eax, Operand(ebx, ecx, times_2, 0));
break;
- case kExternalUnsignedShortArray:
- __ movzx_w(ecx, Operand(ebx, ecx, times_2, 0));
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movzx_w(eax, Operand(ebx, ecx, times_2, 0));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
__ mov(ecx, Operand(ebx, ecx, times_4, 0));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
__ fld_s(Operand(ebx, ecx, times_4, 0));
break;
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ __ fld_d(Operand(ebx, ecx, times_8, 0));
+ break;
default:
UNREACHABLE();
break;
@@ -3443,17 +3380,17 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// For floating-point array type:
// FP(0): value
- if (array_type == kExternalIntArray ||
- array_type == kExternalUnsignedIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
- if (array_type == kExternalIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) {
__ cmp(ecx, 0xC0000000);
__ j(sign, &box_int);
} else {
- ASSERT_EQ(array_type, kExternalUnsignedIntArray);
+ ASSERT_EQ(JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
@@ -3469,12 +3406,12 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
- if (array_type == kExternalIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) {
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
} else {
- ASSERT(array_type == kExternalUnsignedIntArray);
+ ASSERT_EQ(JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
// Need to zero-extend the value.
// There's no fild variant for unsigned values, so zero-extend
// to a 64-bit int manually.
@@ -3490,7 +3427,8 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
- } else if (array_type == kExternalFloatArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
@@ -3499,7 +3437,6 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else {
- __ mov(eax, ecx);
__ SmiTag(eax);
__ ret(0);
}
@@ -3513,63 +3450,50 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// Slow case: Jump to runtime.
__ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
+
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- __ pop(ebx);
- __ push(edx); // receiver
- __ push(eax); // name
- __ push(ebx); // return address
+ Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ jmp(ic, RelocInfo::CODE_TARGET);
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
- // Return the generated code.
- return GetCode(flags);
+ // Miss case: Jump to runtime.
+ __ bind(&miss_force_generic);
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
}
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
- ExternalArrayType array_type, Code::Flags flags) {
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+ MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind) {
// ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label slow, check_heap_number;
+ Label miss_force_generic, slow, check_heap_number;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
- // Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow);
- // Get the map from the receiver.
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
- __ j(not_zero, &slow);
// Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
- // Get the instance type from the map of the receiver.
- __ CmpInstanceType(edi, JS_OBJECT_TYPE);
- __ j(not_equal, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // eax: value
- // edx: receiver, a JSObject
- // ecx: key, a smi
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ CheckMap(edi, Handle<Map>(Heap::MapForExternalArrayType(array_type)),
- &slow, true);
+ __ JumpIfNotSmi(ecx, &miss_force_generic);
// Check that the index is in range.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(ebx, ecx);
__ SmiUntag(ebx);
__ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
@@ -3583,32 +3507,52 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// ecx: key
// edi: elements array
// ebx: untagged index
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_equal, &check_heap_number);
+ if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ __ JumpIfNotSmi(eax, &slow);
+ } else {
+ __ JumpIfNotSmi(eax, &check_heap_number);
+ }
+
// smi case
__ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
__ SmiUntag(ecx);
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ecx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ { // Clamp the value to [0..255].
+ Label done;
+ __ test(ecx, Immediate(0xFFFFFF00));
+ __ j(zero, &done, Label::kNear);
+ __ setcc(negative, ecx); // 1 if negative, 0 if positive.
+ __ dec_b(ecx); // 0 if negative, 255 if positive.
+ __ bind(&done);
+ }
+ __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
// Need to perform int-to-float conversion.
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
- __ fstp_s(Operand(edi, ebx, times_4, 0));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ __ fstp_s(Operand(edi, ebx, times_4, 0));
+ } else { // elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS.
+ __ fstp_d(Operand(edi, ebx, times_8, 0));
+ }
break;
default:
UNREACHABLE();
@@ -3616,92 +3560,113 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
}
__ ret(0); // Return the original value.
- __ bind(&check_heap_number);
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- // ebx: untagged index
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- __ j(not_equal, &slow);
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // ebx: untagged index
- // edi: base pointer of external storage
- if (array_type == kExternalFloatArray) {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fstp_s(Operand(edi, ebx, times_4, 0));
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
-
- // For the moment we make the slow call to the runtime on
- // processors that don't support SSE2. The code in IntegerConvert
- // (code-stubs-ia32.cc) is roughly what is needed here though the
- // conversion failure case does not need to be handled.
- if (CpuFeatures::IsSupported(SSE2)) {
- if (array_type != kExternalIntArray &&
- array_type != kExternalUnsignedIntArray) {
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope scope(SSE2);
- __ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
- // ecx: untagged integer value
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- if (CpuFeatures::IsSupported(SSE3)) {
- CpuFeatures::Scope scope(SSE3);
- // fisttp stores values as signed integers. To represent the
- // entire range of int and unsigned int arrays, store as a
- // 64-bit int and discard the high 32 bits.
- // If the value is NaN or +/-infinity, the result is 0x80000000,
- // which is automatically zero when taken mod 2^n, n < 32.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
- __ fisttp_d(Operand(esp, 0));
- __ pop(ecx);
- __ add(Operand(esp), Immediate(kPointerSize));
- } else {
+ // TODO(danno): handle heap number -> pixel array conversion
+ if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ __ bind(&check_heap_number);
+ // eax: value
+ // edx: receiver
+ // ecx: key
+ // edi: elements array
+ // ebx: untagged index
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(not_equal, &slow);
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
+ // ebx: untagged index
+ // edi: base pointer of external storage
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ fstp_s(Operand(edi, ebx, times_4, 0));
+ __ ret(0);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ fstp_d(Operand(edi, ebx, times_8, 0));
+ __ ret(0);
+ } else {
+ // Perform float-to-int conversion with truncation (round-to-zero)
+ // behavior.
+
+ // For the moment we make the slow call to the runtime on
+ // processors that don't support SSE2. The code in IntegerConvert
+ // (code-stubs-ia32.cc) is roughly what is needed here though the
+ // conversion failure case does not need to be handled.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ if (elements_kind != JSObject::EXTERNAL_INT_ELEMENTS &&
+ elements_kind != JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
- // We can easily implement the correct rounding behavior for the
- // range [0, 2^31-1]. For the time being, to keep this code simple,
- // make the slow runtime call for values outside this range.
- // Note: we could do better for signed int arrays.
- __ movd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
- // We will need the key if we have to make the slow runtime call.
- __ push(ecx);
- __ LoadPowerOf2(xmm1, ecx, 31);
- __ pop(ecx);
- __ ucomisd(xmm1, xmm0);
- __ j(above_equal, &slow);
- __ cvttsd2si(ecx, Operand(xmm0));
+ __ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
+ // ecx: untagged integer value
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ { // Clamp the value to [0..255].
+ Label done;
+ __ test(ecx, Immediate(0xFFFFFF00));
+ __ j(zero, &done, Label::kNear);
+ __ setcc(negative, ecx); // 1 if negative, 0 if positive.
+ __ dec_b(ecx); // 0 if negative, 255 if positive.
+ __ bind(&done);
+ }
+ __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatures::Scope scope(SSE3);
+ // fisttp stores values as signed integers. To represent the
+ // entire range of int and unsigned int arrays, store as a
+ // 64-bit int and discard the high 32 bits.
+ // If the value is NaN or +/-infinity, the result is 0x80000000,
+ // which is automatically zero when taken mod 2^n, n < 32.
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ fisttp_d(Operand(esp, 0));
+ __ pop(ecx);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ } else {
+ ASSERT(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope scope(SSE2);
+ // We can easily implement the correct rounding behavior for the
+ // range [0, 2^31-1]. For the time being, to keep this code simple,
+ // make the slow runtime call for values outside this range.
+ // Note: we could do better for signed int arrays.
+ __ movd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ // We will need the key if we have to make the slow runtime call.
+ __ push(ecx);
+ __ LoadPowerOf2(xmm1, ecx, 31);
+ __ pop(ecx);
+ __ ucomisd(xmm1, xmm0);
+ __ j(above_equal, &slow);
+ __ cvttsd2si(ecx, Operand(xmm0));
+ }
+ // ecx: untagged integer value
+ __ mov(Operand(edi, ebx, times_4, 0), ecx);
}
- // ecx: untagged integer value
- __ mov(Operand(edi, ebx, times_4, 0), ecx);
+ __ ret(0); // Return original value.
}
- __ ret(0); // Return original value.
}
}
// Slow case: call runtime.
__ bind(&slow);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_store_external_array_slow(), 1);
+
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -3709,19 +3674,105 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// -- esp[0] : return address
// -----------------------------------
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
- __ push(Immediate(Smi::FromInt(
- Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
- __ push(ebx); // return address
+ Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ bind(&miss_force_generic);
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss_force_generic;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(eax, &miss_force_generic);
+
+ // Get the elements array.
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ AssertFastElements(ecx);
+
+ // Check that the key is within bounds.
+ __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ j(above_equal, &miss_force_generic);
+
+ // Load the result and make sure it's not the hole.
+ __ mov(ebx, Operand(ecx, eax, times_2,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
+ __ j(equal, &miss_force_generic);
+ __ mov(eax, ebx);
+ __ ret(0);
+
+ __ bind(&miss_force_generic);
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+ bool is_js_array) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss_force_generic;
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(ecx, &miss_force_generic);
+
+ // Get the elements array and make sure it is a fast element array, not 'cow'.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_array_map()));
+ __ j(not_equal, &miss_force_generic);
+
+ if (is_js_array) {
+ // Check that the key is within bounds.
+ __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
+ __ j(above_equal, &miss_force_generic);
+ } else {
+ // Check that the key is within bounds.
+ __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
+ __ j(above_equal, &miss_force_generic);
+ }
+
+ // Do the store and update the write barrier. Make sure to preserve
+ // the value in register eax.
+ __ mov(edx, Operand(eax));
+ __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
+ __ RecordWrite(edi, 0, edx, ecx);
+
+ // Done.
+ __ ret(0);
- return GetCode(flags);
+ // Handle store cache miss, replacing the ic with the generic stub.
+ __ bind(&miss_force_generic);
+ Handle<Code> ic_force_generic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
deleted file mode 100644
index 93d711e93..000000000
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ /dev/null
@@ -1,1360 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
- // Emit code to write elements below the stack pointer to their
- // (already allocated) stack address.
- ASSERT(index <= stack_pointer_);
- FrameElement element = elements_[index];
- ASSERT(!element.is_synced());
- switch (element.type()) {
- case FrameElement::INVALID:
- break;
-
- case FrameElement::MEMORY:
- // This function should not be called with synced elements.
- // (memory elements are always synced).
- UNREACHABLE();
- break;
-
- case FrameElement::REGISTER:
- __ mov(Operand(ebp, fp_relative(index)), element.reg());
- break;
-
- case FrameElement::CONSTANT:
- if (cgen()->IsUnsafeSmi(element.handle())) {
- cgen()->StoreUnsafeSmiToLocal(fp_relative(index), element.handle());
- } else {
- __ Set(Operand(ebp, fp_relative(index)),
- Immediate(element.handle()));
- }
- break;
-
- case FrameElement::COPY: {
- int backing_index = element.index();
- FrameElement backing_element = elements_[backing_index];
- if (backing_element.is_memory()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
- __ mov(Operand(ebp, fp_relative(index)), temp.reg());
- } else {
- ASSERT(backing_element.is_register());
- __ mov(Operand(ebp, fp_relative(index)), backing_element.reg());
- }
- break;
- }
- }
- elements_[index].set_sync();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
- // Sync an element of the frame that is just above the stack pointer
- // by pushing it.
- ASSERT(index == stack_pointer_ + 1);
- stack_pointer_++;
- FrameElement element = elements_[index];
-
- switch (element.type()) {
- case FrameElement::INVALID:
- __ push(Immediate(Smi::FromInt(0)));
- break;
-
- case FrameElement::MEMORY:
- // No memory elements exist above the stack pointer.
- UNREACHABLE();
- break;
-
- case FrameElement::REGISTER:
- __ push(element.reg());
- break;
-
- case FrameElement::CONSTANT:
- if (cgen()->IsUnsafeSmi(element.handle())) {
- cgen()->PushUnsafeSmi(element.handle());
- } else {
- __ push(Immediate(element.handle()));
- }
- break;
-
- case FrameElement::COPY: {
- int backing_index = element.index();
- FrameElement backing = elements_[backing_index];
- ASSERT(backing.is_memory() || backing.is_register());
- if (backing.is_memory()) {
- __ push(Operand(ebp, fp_relative(backing_index)));
- } else {
- __ push(backing.reg());
- }
- break;
- }
- }
- elements_[index].set_sync();
-}
-
-
-// Clear the dirty bits for the range of elements in
-// [min(stack_pointer_ + 1,begin), end].
-void VirtualFrame::SyncRange(int begin, int end) {
- ASSERT(begin >= 0);
- ASSERT(end < element_count());
- // Sync elements below the range if they have not been materialized
- // on the stack.
- int start = Min(begin, stack_pointer_ + 1);
-
- // Emit normal push instructions for elements above stack pointer
- // and use mov instructions if we are below stack pointer.
- for (int i = start; i <= end; i++) {
- if (!elements_[i].is_synced()) {
- if (i <= stack_pointer_) {
- SyncElementBelowStackPointer(i);
- } else {
- SyncElementByPushing(i);
- }
- }
- }
-}
-
-
-void VirtualFrame::MakeMergable() {
- for (int i = 0; i < element_count(); i++) {
- FrameElement element = elements_[i];
-
- // All number type information is reset to unknown for a mergable frame
- // because of incoming back edges.
- if (element.is_constant() || element.is_copy()) {
- if (element.is_synced()) {
- // Just spill.
- elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
- } else {
- // Allocate to a register.
- FrameElement backing_element; // Invalid if not a copy.
- if (element.is_copy()) {
- backing_element = elements_[element.index()];
- }
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
- elements_[i] =
- FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED,
- TypeInfo::Unknown());
- Use(fresh.reg(), i);
-
- // Emit a move.
- if (element.is_constant()) {
- if (cgen()->IsUnsafeSmi(element.handle())) {
- cgen()->MoveUnsafeSmi(fresh.reg(), element.handle());
- } else {
- __ Set(fresh.reg(), Immediate(element.handle()));
- }
- } else {
- ASSERT(element.is_copy());
- // Copies are only backed by register or memory locations.
- if (backing_element.is_register()) {
- // The backing store may have been spilled by allocating,
- // but that's OK. If it was, the value is right where we
- // want it.
- if (!fresh.reg().is(backing_element.reg())) {
- __ mov(fresh.reg(), backing_element.reg());
- }
- } else {
- ASSERT(backing_element.is_memory());
- __ mov(fresh.reg(), Operand(ebp, fp_relative(element.index())));
- }
- }
- }
- // No need to set the copied flag --- there are no copies.
- } else {
- // Clear the copy flag of non-constant, non-copy elements.
- // They cannot be copied because copies are not allowed.
- // The copy flag is not relied on before the end of this loop,
- // including when registers are spilled.
- elements_[i].clear_copied();
- elements_[i].set_type_info(TypeInfo::Unknown());
- }
- }
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
- Comment cmnt(masm(), "[ Merge frame");
- // We should always be merging the code generator's current frame to an
- // expected frame.
- ASSERT(cgen()->frame() == this);
-
- // Adjust the stack pointer upward (toward the top of the virtual
- // frame) if necessary.
- if (stack_pointer_ < expected->stack_pointer_) {
- int difference = expected->stack_pointer_ - stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ sub(Operand(esp), Immediate(difference * kPointerSize));
- }
-
- MergeMoveRegistersToMemory(expected);
- MergeMoveRegistersToRegisters(expected);
- MergeMoveMemoryToRegisters(expected);
-
- // Adjust the stack pointer downward if necessary.
- if (stack_pointer_ > expected->stack_pointer_) {
- int difference = stack_pointer_ - expected->stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ add(Operand(esp), Immediate(difference * kPointerSize));
- }
-
- // At this point, the frames should be identical.
- ASSERT(Equals(expected));
-}
-
-
-void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- // Move registers, constants, and copies to memory. Perform moves
- // from the top downward in the frame in order to leave the backing
- // stores of copies in registers.
- //
- // Moving memory-backed copies to memory requires a spare register
- // for the memory-to-memory moves. Since we are performing a merge,
- // we use esi (which is already saved in the frame). We keep track
- // of the index of the frame element esi is caching or kIllegalIndex
- // if esi has not been disturbed.
- int esi_caches = kIllegalIndex;
- for (int i = element_count() - 1; i >= 0; i--) {
- FrameElement target = expected->elements_[i];
- if (target.is_register()) continue; // Handle registers later.
- if (target.is_memory()) {
- FrameElement source = elements_[i];
- switch (source.type()) {
- case FrameElement::INVALID:
- // Not a legal merge move.
- UNREACHABLE();
- break;
-
- case FrameElement::MEMORY:
- // Already in place.
- break;
-
- case FrameElement::REGISTER:
- Unuse(source.reg());
- if (!source.is_synced()) {
- __ mov(Operand(ebp, fp_relative(i)), source.reg());
- }
- break;
-
- case FrameElement::CONSTANT:
- if (!source.is_synced()) {
- if (cgen()->IsUnsafeSmi(source.handle())) {
- esi_caches = i;
- cgen()->MoveUnsafeSmi(esi, source.handle());
- __ mov(Operand(ebp, fp_relative(i)), esi);
- } else {
- __ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
- }
- }
- break;
-
- case FrameElement::COPY:
- if (!source.is_synced()) {
- int backing_index = source.index();
- FrameElement backing_element = elements_[backing_index];
- if (backing_element.is_memory()) {
- // If we have to spill a register, we spill esi.
- if (esi_caches != backing_index) {
- esi_caches = backing_index;
- __ mov(esi, Operand(ebp, fp_relative(backing_index)));
- }
- __ mov(Operand(ebp, fp_relative(i)), esi);
- } else {
- ASSERT(backing_element.is_register());
- __ mov(Operand(ebp, fp_relative(i)), backing_element.reg());
- }
- }
- break;
- }
- }
- elements_[i] = target;
- }
-
- if (esi_caches != kIllegalIndex) {
- __ mov(esi, Operand(ebp, fp_relative(context_index())));
- }
-}
-
-
-void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
- // We have already done X-to-memory moves.
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- // Move the right value into register i if it is currently in a register.
- int index = expected->register_location(i);
- int use_index = register_location(i);
- // Skip if register i is unused in the target or else if source is
- // not a register (this is not a register-to-register move).
- if (index == kIllegalIndex || !elements_[index].is_register()) continue;
-
- Register target = RegisterAllocator::ToRegister(i);
- Register source = elements_[index].reg();
- if (index != use_index) {
- if (use_index == kIllegalIndex) { // Target is currently unused.
- // Copy contents of source from source to target.
- // Set frame element register to target.
- Use(target, index);
- Unuse(source);
- __ mov(target, source);
- } else {
- // Exchange contents of registers source and target.
- // Nothing except the register backing use_index has changed.
- elements_[use_index].set_reg(source);
- set_register_location(target, index);
- set_register_location(source, use_index);
- __ xchg(source, target);
- }
- }
-
- if (!elements_[index].is_synced() &&
- expected->elements_[index].is_synced()) {
- __ mov(Operand(ebp, fp_relative(index)), target);
- }
- elements_[index] = expected->elements_[index];
- }
-}
-
-
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
- // Move memory, constants, and copies to registers. This is the
- // final step and since it is not done from the bottom up, but in
- // register code order, we have special code to ensure that the backing
- // elements of copies are in their correct locations when we
- // encounter the copies.
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int index = expected->register_location(i);
- if (index != kIllegalIndex) {
- FrameElement source = elements_[index];
- FrameElement target = expected->elements_[index];
- Register target_reg = RegisterAllocator::ToRegister(i);
- ASSERT(target.reg().is(target_reg));
- switch (source.type()) {
- case FrameElement::INVALID: // Fall through.
- UNREACHABLE();
- break;
- case FrameElement::REGISTER:
- ASSERT(source.Equals(target));
- // Go to next iteration. Skips Use(target_reg) and syncing
- // below. It is safe to skip syncing because a target
- // register frame element would only be synced if all source
- // elements were.
- continue;
- break;
- case FrameElement::MEMORY:
- ASSERT(index <= stack_pointer_);
- __ mov(target_reg, Operand(ebp, fp_relative(index)));
- break;
-
- case FrameElement::CONSTANT:
- if (cgen()->IsUnsafeSmi(source.handle())) {
- cgen()->MoveUnsafeSmi(target_reg, source.handle());
- } else {
- __ Set(target_reg, Immediate(source.handle()));
- }
- break;
-
- case FrameElement::COPY: {
- int backing_index = source.index();
- FrameElement backing = elements_[backing_index];
- ASSERT(backing.is_memory() || backing.is_register());
- if (backing.is_memory()) {
- ASSERT(backing_index <= stack_pointer_);
- // Code optimization if backing store should also move
- // to a register: move backing store to its register first.
- if (expected->elements_[backing_index].is_register()) {
- FrameElement new_backing = expected->elements_[backing_index];
- Register new_backing_reg = new_backing.reg();
- ASSERT(!is_used(new_backing_reg));
- elements_[backing_index] = new_backing;
- Use(new_backing_reg, backing_index);
- __ mov(new_backing_reg,
- Operand(ebp, fp_relative(backing_index)));
- __ mov(target_reg, new_backing_reg);
- } else {
- __ mov(target_reg, Operand(ebp, fp_relative(backing_index)));
- }
- } else {
- __ mov(target_reg, backing.reg());
- }
- }
- }
- // Ensure the proper sync state.
- if (target.is_synced() && !source.is_synced()) {
- __ mov(Operand(ebp, fp_relative(index)), target_reg);
- }
- Use(target_reg, index);
- elements_[index] = target;
- }
- }
-}
-
-
-void VirtualFrame::Enter() {
- // Registers live on entry: esp, ebp, esi, edi.
- Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
- if (FLAG_debug_code) {
- // Verify that edi contains a JS function. The following code
- // relies on eax being available for use.
- __ test(edi, Immediate(kSmiTagMask));
- __ Check(not_zero,
- "VirtualFrame::Enter - edi is not a function (smi check).");
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ Check(equal,
- "VirtualFrame::Enter - edi is not a function (map check).");
- }
-#endif
-
- EmitPush(ebp);
-
- __ mov(ebp, Operand(esp));
-
- // Store the context in the frame. The context is kept in esi and a
- // copy is stored in the frame. The external reference to esi
- // remains.
- EmitPush(esi);
-
- // Store the function in the frame. The frame owns the register
- // reference now (ie, it can keep it in edi or spill it later).
- Push(edi);
- SyncElementAt(element_count() - 1);
- cgen()->allocator()->Unuse(edi);
-}
-
-
-void VirtualFrame::Exit() {
- Comment cmnt(masm(), "[ Exit JS frame");
- // Record the location of the JS exit code for patching when setting
- // break point.
- __ RecordJSReturn();
-
- // Avoid using the leave instruction here, because it is too
- // short. We need the return sequence to be a least the size of a
- // call instruction to support patching the exit code in the
- // debugger. See VisitReturnStatement for the full return sequence.
- __ mov(esp, Operand(ebp));
- stack_pointer_ = frame_pointer();
- for (int i = element_count() - 1; i > stack_pointer_; i--) {
- FrameElement last = elements_.RemoveLast();
- if (last.is_register()) {
- Unuse(last.reg());
- }
- }
-
- EmitPop(ebp);
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- int count = local_count();
- if (count > 0) {
- Comment cmnt(masm(), "[ Allocate space for locals");
- // The locals are initialized to a constant (the undefined value), but
- // we sync them with the actual frame to allocate space for spilling
- // them later. First sync everything above the stack pointer so we can
- // use pushes to allocate and initialize the locals.
- SyncRange(stack_pointer_ + 1, element_count() - 1);
- Handle<Object> undefined = Factory::undefined_value();
- FrameElement initial_value =
- FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
- if (count == 1) {
- __ push(Immediate(undefined));
- } else if (count < kLocalVarBound) {
- // For less locals the unrolled loop is more compact.
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ Set(temp.reg(), Immediate(undefined));
- for (int i = 0; i < count; i++) {
- __ push(temp.reg());
- }
- } else {
- // For more locals a loop in generated code is more compact.
- Label alloc_locals_loop;
- Result cnt = cgen()->allocator()->Allocate();
- Result tmp = cgen()->allocator()->Allocate();
- ASSERT(cnt.is_valid());
- ASSERT(tmp.is_valid());
- __ mov(cnt.reg(), Immediate(count));
- __ mov(tmp.reg(), Immediate(undefined));
- __ bind(&alloc_locals_loop);
- __ push(tmp.reg());
- __ dec(cnt.reg());
- __ j(not_zero, &alloc_locals_loop);
- }
- for (int i = 0; i < count; i++) {
- elements_.Add(initial_value);
- stack_pointer_++;
- }
- }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
- ASSERT(elements_[context_index()].is_memory());
- __ mov(Operand(ebp, fp_relative(context_index())), esi);
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
- ASSERT(elements_[context_index()].is_memory());
- __ mov(esi, Operand(ebp, fp_relative(context_index())));
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ lea(temp.reg(), ParameterAt(-1));
- Push(&temp);
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
- FrameElement original = elements_[index];
-
- // Is this element the backing store of any copies?
- int new_backing_index = kIllegalIndex;
- if (original.is_copied()) {
- // Verify it is copied, and find first copy.
- for (int i = index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == index) {
- new_backing_index = i;
- break;
- }
- }
- }
-
- if (new_backing_index == kIllegalIndex) {
- // No copies found, return kIllegalIndex.
- if (original.is_register()) {
- Unuse(original.reg());
- }
- elements_[index] = FrameElement::InvalidElement();
- return kIllegalIndex;
- }
-
- // This is the backing store of copies.
- Register backing_reg;
- if (original.is_memory()) {
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- Use(fresh.reg(), new_backing_index);
- backing_reg = fresh.reg();
- __ mov(backing_reg, Operand(ebp, fp_relative(index)));
- } else {
- // The original was in a register.
- backing_reg = original.reg();
- set_register_location(backing_reg, new_backing_index);
- }
- // Invalidate the element at index.
- elements_[index] = FrameElement::InvalidElement();
- // Set the new backing element.
- if (elements_[new_backing_index].is_synced()) {
- elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg,
- FrameElement::SYNCED,
- original.type_info());
- } else {
- elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg,
- FrameElement::NOT_SYNCED,
- original.type_info());
- }
- // Update the other copies.
- for (int i = new_backing_index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == index) {
- elements_[i].set_index(new_backing_index);
- elements_[new_backing_index].set_copied();
- }
- }
- return new_backing_index;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
- ASSERT(index >= 0);
- ASSERT(index <= element_count());
- FrameElement original = elements_[index];
- int new_backing_store_index = InvalidateFrameSlotAt(index);
- if (new_backing_store_index != kIllegalIndex) {
- elements_.Add(CopyElementAt(new_backing_store_index));
- return;
- }
-
- switch (original.type()) {
- case FrameElement::MEMORY: {
- // Emit code to load the original element's data into a register.
- // Push that register as a FrameElement on top of the frame.
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- FrameElement new_element =
- FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED,
- original.type_info());
- Use(fresh.reg(), element_count());
- elements_.Add(new_element);
- __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
- break;
- }
- case FrameElement::REGISTER:
- Use(original.reg(), element_count());
- // Fall through.
- case FrameElement::CONSTANT:
- case FrameElement::COPY:
- original.clear_sync();
- elements_.Add(original);
- break;
- case FrameElement::INVALID:
- UNREACHABLE();
- break;
- }
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
- // Store the value on top of the frame to the virtual frame slot at
- // a given index. The value on top of the frame is left in place.
- // This is a duplicating operation, so it can create copies.
- ASSERT(index >= 0);
- ASSERT(index < element_count());
-
- int top_index = element_count() - 1;
- FrameElement top = elements_[top_index];
- FrameElement original = elements_[index];
- if (top.is_copy() && top.index() == index) return;
- ASSERT(top.is_valid());
-
- InvalidateFrameSlotAt(index);
-
- // InvalidateFrameSlotAt can potentially change any frame element, due
- // to spilling registers to allocate temporaries in order to preserve
- // the copy-on-write semantics of aliased elements. Reload top from
- // the frame.
- top = elements_[top_index];
-
- if (top.is_copy()) {
- // There are two cases based on the relative positions of the
- // stored-to slot and the backing slot of the top element.
- int backing_index = top.index();
- ASSERT(backing_index != index);
- if (backing_index < index) {
- // 1. The top element is a copy of a slot below the stored-to
- // slot. The stored-to slot becomes an unsynced copy of that
- // same backing slot.
- elements_[index] = CopyElementAt(backing_index);
- } else {
- // 2. The top element is a copy of a slot above the stored-to
- // slot. The stored-to slot becomes the new (unsynced) backing
- // slot and both the top element and the element at the former
- // backing slot become copies of it. The sync state of the top
- // and former backing elements is preserved.
- FrameElement backing_element = elements_[backing_index];
- ASSERT(backing_element.is_memory() || backing_element.is_register());
- if (backing_element.is_memory()) {
- // Because sets of copies are canonicalized to be backed by
- // their lowest frame element, and because memory frame
- // elements are backed by the corresponding stack address, we
- // have to move the actual value down in the stack.
- //
- // TODO(209): considering allocating the stored-to slot to the
- // temp register. Alternatively, allow copies to appear in
- // any order in the frame and lazily move the value down to
- // the slot.
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
- __ mov(Operand(ebp, fp_relative(index)), temp.reg());
- } else {
- set_register_location(backing_element.reg(), index);
- if (backing_element.is_synced()) {
- // If the element is a register, we will not actually move
- // anything on the stack but only update the virtual frame
- // element.
- backing_element.clear_sync();
- }
- }
- elements_[index] = backing_element;
-
- // The old backing element becomes a copy of the new backing
- // element.
- FrameElement new_element = CopyElementAt(index);
- elements_[backing_index] = new_element;
- if (backing_element.is_synced()) {
- elements_[backing_index].set_sync();
- }
-
- // All the copies of the old backing element (including the top
- // element) become copies of the new backing element.
- for (int i = backing_index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
- elements_[i].set_index(index);
- }
- }
- }
- return;
- }
-
- // Move the top element to the stored-to slot and replace it (the
- // top element) with a copy.
- elements_[index] = top;
- if (top.is_memory()) {
- // TODO(209): consider allocating the stored-to slot to the temp
- // register. Alternatively, allow copies to appear in any order
- // in the frame and lazily move the value down to the slot.
- FrameElement new_top = CopyElementAt(index);
- new_top.set_sync();
- elements_[top_index] = new_top;
-
- // The sync state of the former top element is correct (synced).
- // Emit code to move the value down in the frame.
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), Operand(esp, 0));
- __ mov(Operand(ebp, fp_relative(index)), temp.reg());
- } else if (top.is_register()) {
- set_register_location(top.reg(), index);
- // The stored-to slot has the (unsynced) register reference and
- // the top element becomes a copy. The sync state of the top is
- // preserved.
- FrameElement new_top = CopyElementAt(index);
- if (top.is_synced()) {
- new_top.set_sync();
- elements_[index].clear_sync();
- }
- elements_[top_index] = new_top;
- } else {
- // The stored-to slot holds the same value as the top but
- // unsynced. (We do not have copies of constants yet.)
- ASSERT(top.is_constant());
- elements_[index].clear_sync();
- }
-}
-
-
-void VirtualFrame::UntaggedPushFrameSlotAt(int index) {
- ASSERT(index >= 0);
- ASSERT(index <= element_count());
- FrameElement original = elements_[index];
- if (original.is_copy()) {
- original = elements_[original.index()];
- index = original.index();
- }
-
- switch (original.type()) {
- case FrameElement::MEMORY:
- case FrameElement::REGISTER: {
- Label done;
- // Emit code to load the original element's data into a register.
- // Push that register as a FrameElement on top of the frame.
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- Register fresh_reg = fresh.reg();
- FrameElement new_element =
- FrameElement::RegisterElement(fresh_reg,
- FrameElement::NOT_SYNCED,
- original.type_info());
- new_element.set_untagged_int32(true);
- Use(fresh_reg, element_count());
- fresh.Unuse(); // BreakTarget does not handle a live Result well.
- elements_.Add(new_element);
- if (original.is_register()) {
- __ mov(fresh_reg, original.reg());
- } else {
- ASSERT(original.is_memory());
- __ mov(fresh_reg, Operand(ebp, fp_relative(index)));
- }
- // Now convert the value to int32, or bail out.
- if (original.type_info().IsSmi()) {
- __ SmiUntag(fresh_reg);
- // Pushing the element is completely done.
- } else {
- __ test(fresh_reg, Immediate(kSmiTagMask));
- Label not_smi;
- __ j(not_zero, &not_smi);
- __ SmiUntag(fresh_reg);
- __ jmp(&done);
-
- __ bind(&not_smi);
- if (!original.type_info().IsNumber()) {
- __ cmp(FieldOperand(fresh_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
- cgen()->unsafe_bailout_->Branch(not_equal);
- }
-
- if (!CpuFeatures::IsSupported(SSE2)) {
- UNREACHABLE();
- } else {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, FieldOperand(fresh_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(fresh_reg, Operand(xmm0));
- __ cvtsi2sd(xmm1, Operand(fresh_reg));
- __ ucomisd(xmm0, xmm1);
- cgen()->unsafe_bailout_->Branch(not_equal);
- cgen()->unsafe_bailout_->Branch(parity_even); // NaN.
- // Test for negative zero.
- __ test(fresh_reg, Operand(fresh_reg));
- __ j(not_zero, &done);
- __ movmskpd(fresh_reg, xmm0);
- __ and_(fresh_reg, 0x1);
- cgen()->unsafe_bailout_->Branch(not_equal);
- }
- __ bind(&done);
- }
- break;
- }
- case FrameElement::CONSTANT:
- elements_.Add(CopyElementAt(index));
- elements_[element_count() - 1].set_untagged_int32(true);
- break;
- case FrameElement::COPY:
- case FrameElement::INVALID:
- UNREACHABLE();
- break;
- }
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- ASSERT(cgen()->HasValidEntryRegisters());
- // Grow the expression stack by handler size less one (the return
- // address is already pushed by a call instruction).
- Adjust(kHandlerSize - 1);
- __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallStub(stub);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
- PrepareForCall(0, 0);
- arg->ToRegister(eax);
- arg->Unuse();
- return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- PrepareForCall(0, 0);
-
- if (arg0->is_register() && arg0->reg().is(eax)) {
- if (arg1->is_register() && arg1->reg().is(edx)) {
- // Wrong registers.
- __ xchg(eax, edx);
- } else {
- // Register edx is free for arg0, which frees eax for arg1.
- arg0->ToRegister(edx);
- arg1->ToRegister(eax);
- }
- } else {
- // Register eax is free for arg1, which guarantees edx is free for
- // arg0.
- arg1->ToRegister(eax);
- arg0->ToRegister(edx);
- }
-
- arg0->Unuse();
- arg1->Unuse();
- return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallJSFunction(int arg_count) {
- Result function = Pop();
-
- // InvokeFunction requires function in edi. Move it in there.
- function.ToRegister(edi);
- function.Unuse();
-
- // +1 for receiver.
- PrepareForCall(arg_count + 1, arg_count + 1);
- ASSERT(cgen()->HasValidEntryRegisters());
- ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION);
- RestoreContextRegister();
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(f, arg_count);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(id, arg_count);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
- PrepareForCall(0, 0);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ DebugBreak();
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
-}
-#endif
-
-
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ InvokeBuiltin(id, flag);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ call(code, rmode);
- Result result = cgen()->allocator()->Allocate(eax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-// This function assumes that the only results that could be in a_reg or b_reg
-// are a and b. Other results can be live, but must not be in a_reg or b_reg.
-void VirtualFrame::MoveResultsToRegisters(Result* a,
- Result* b,
- Register a_reg,
- Register b_reg) {
- if (a->is_register() && a->reg().is(a_reg)) {
- b->ToRegister(b_reg);
- } else if (!cgen()->allocator()->is_used(a_reg)) {
- a->ToRegister(a_reg);
- b->ToRegister(b_reg);
- } else if (cgen()->allocator()->is_used(b_reg)) {
- // a must be in b_reg, b in a_reg.
- __ xchg(a_reg, b_reg);
- // Results a and b will be invalidated, so it is ok if they are switched.
- } else {
- b->ToRegister(b_reg);
- a->ToRegister(a_reg);
- }
- a->Unuse();
- b->Unuse();
-}
-
-
-Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
- // Name and receiver are on the top of the frame. The IC expects
- // name in ecx and receiver in eax.
- Result name = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0); // No stack arguments.
- MoveResultsToRegisters(&name, &receiver, ecx, eax);
-
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
- // Key and receiver are on top of the frame. Put them in eax and edx.
- Result key = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&key, &receiver, eax, edx);
-
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallStoreIC(Handle<String> name,
- bool is_contextual,
- StrictModeFlag strict_mode) {
- // Value and (if not contextual) receiver are on top of the frame.
- // The IC expects name in ecx, value in eax, and receiver in edx.
- Handle<Code> ic(Builtins::builtin(
- (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
-
- Result value = Pop();
- RelocInfo::Mode mode;
- if (is_contextual) {
- PrepareForCall(0, 0);
- value.ToRegister(eax);
- __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- value.Unuse();
- mode = RelocInfo::CODE_TARGET_CONTEXT;
- } else {
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&value, &receiver, eax, edx);
- mode = RelocInfo::CODE_TARGET;
- }
- __ mov(ecx, name);
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
- // Value, key, and receiver are on the top of the frame. The IC
- // expects value in eax, key in ecx, and receiver in edx.
- Result value = Pop();
- Result key = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- if (!cgen()->allocator()->is_used(eax) ||
- (value.is_register() && value.reg().is(eax))) {
- if (!cgen()->allocator()->is_used(eax)) {
- value.ToRegister(eax);
- }
- MoveResultsToRegisters(&key, &receiver, ecx, edx);
- value.Unuse();
- } else if (!cgen()->allocator()->is_used(ecx) ||
- (key.is_register() && key.reg().is(ecx))) {
- if (!cgen()->allocator()->is_used(ecx)) {
- key.ToRegister(ecx);
- }
- MoveResultsToRegisters(&value, &receiver, eax, edx);
- key.Unuse();
- } else if (!cgen()->allocator()->is_used(edx) ||
- (receiver.is_register() && receiver.reg().is(edx))) {
- if (!cgen()->allocator()->is_used(edx)) {
- receiver.ToRegister(edx);
- }
- MoveResultsToRegisters(&key, &value, ecx, eax);
- receiver.Unuse();
- } else {
- // All three registers are used, and no value is in the correct place.
- // We have one of the two circular permutations of eax, ecx, edx.
- ASSERT(value.is_register());
- if (value.reg().is(ecx)) {
- __ xchg(eax, edx);
- __ xchg(eax, ecx);
- } else {
- __ xchg(eax, ecx);
- __ xchg(eax, edx);
- }
- value.Unuse();
- key.Unuse();
- receiver.Unuse();
- }
-
- Handle<Code> ic(Builtins::builtin(
- (strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
-Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
- int arg_count,
- int loop_nesting) {
- // Function name, arguments, and receiver are on top of the frame.
- // The IC expects the name in ecx and the rest on the stack and
- // drops them all.
- InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
- // Spill args, receiver, and function. The call will drop args and
- // receiver.
- Result name = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
- name.ToRegister(ecx);
- name.Unuse();
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
- int arg_count,
- int loop_nesting) {
- // Function name, arguments, and receiver are on top of the frame.
- // The IC expects the name in ecx and the rest on the stack and
- // drops them all.
- InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
- // Spill args, receiver, and function. The call will drop args and
- // receiver.
- Result name = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
- name.ToRegister(ecx);
- name.Unuse();
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallConstructor(int arg_count) {
- // Arguments, receiver, and function are on top of the frame. The
- // IC expects arg count in eax, function in edi, and the arguments
- // and receiver on the stack.
- Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
- // Duplicate the function before preparing the frame.
- PushElementAt(arg_count);
- Result function = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args.
- function.ToRegister(edi);
-
- // Constructors are called with the number of arguments in register
- // eax for now. Another option would be to have separate construct
- // call trampolines per different arguments counts encountered.
- Result num_args = cgen()->allocator()->Allocate(eax);
- ASSERT(num_args.is_valid());
- __ Set(num_args.reg(), Immediate(arg_count));
-
- function.Unuse();
- num_args.Unuse();
- return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
-}
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- ASSERT(height() >= count);
- int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
- // Emit code to lower the stack pointer if necessary.
- if (num_virtual_elements < count) {
- int num_dropped = count - num_virtual_elements;
- stack_pointer_ -= num_dropped;
- __ add(Operand(esp), Immediate(num_dropped * kPointerSize));
- }
-
- // Discard elements from the virtual frame and free any registers.
- for (int i = 0; i < count; i++) {
- FrameElement dropped = elements_.RemoveLast();
- if (dropped.is_register()) {
- Unuse(dropped.reg());
- }
- }
-}
-
-
-Result VirtualFrame::Pop() {
- FrameElement element = elements_.RemoveLast();
- int index = element_count();
- ASSERT(element.is_valid());
- ASSERT(element.is_untagged_int32() == cgen()->in_safe_int32_mode());
-
- // Get number type information of the result.
- TypeInfo info;
- if (!element.is_copy()) {
- info = element.type_info();
- } else {
- info = elements_[element.index()].type_info();
- }
-
- bool pop_needed = (stack_pointer_ == index);
- if (pop_needed) {
- stack_pointer_--;
- if (element.is_memory()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ pop(temp.reg());
- temp.set_type_info(info);
- temp.set_untagged_int32(element.is_untagged_int32());
- return temp;
- }
-
- __ add(Operand(esp), Immediate(kPointerSize));
- }
- ASSERT(!element.is_memory());
-
- // The top element is a register, constant, or a copy. Unuse
- // registers and follow copies to their backing store.
- if (element.is_register()) {
- Unuse(element.reg());
- } else if (element.is_copy()) {
- ASSERT(!element.is_untagged_int32());
- ASSERT(element.index() < index);
- index = element.index();
- element = elements_[index];
- }
- ASSERT(!element.is_copy());
-
- // The element is memory, a register, or a constant.
- if (element.is_memory()) {
- // Memory elements could only be the backing store of a copy.
- // Allocate the original to a register.
- ASSERT(index <= stack_pointer_);
- ASSERT(!element.is_untagged_int32());
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- Use(temp.reg(), index);
- FrameElement new_element =
- FrameElement::RegisterElement(temp.reg(),
- FrameElement::SYNCED,
- element.type_info());
- // Preserve the copy flag on the element.
- if (element.is_copied()) new_element.set_copied();
- elements_[index] = new_element;
- __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
- return Result(temp.reg(), info);
- } else if (element.is_register()) {
- Result return_value(element.reg(), info);
- return_value.set_untagged_int32(element.is_untagged_int32());
- return return_value;
- } else {
- ASSERT(element.is_constant());
- Result return_value(element.handle());
- return_value.set_untagged_int32(element.is_untagged_int32());
- return return_value;
- }
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ pop(reg);
-}
-
-
-void VirtualFrame::EmitPop(Operand operand) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ pop(operand);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(reg);
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(operand);
-}
-
-
-void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(immediate);
-}
-
-
-void VirtualFrame::PushUntaggedElement(Handle<Object> value) {
- ASSERT(!ConstantPoolOverflowed());
- elements_.Add(FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED));
- elements_[element_count() - 1].set_untagged_int32(true);
-}
-
-
-void VirtualFrame::Push(Expression* expr) {
- ASSERT(expr->IsTrivial());
-
- Literal* lit = expr->AsLiteral();
- if (lit != NULL) {
- Push(lit->handle());
- return;
- }
-
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL) {
- Slot* slot = proxy->var()->AsSlot();
- if (slot->type() == Slot::LOCAL) {
- PushLocalAt(slot->index());
- return;
- }
- if (slot->type() == Slot::PARAMETER) {
- PushParameterAt(slot->index());
- return;
- }
- }
- UNREACHABLE();
-}
-
-
-void VirtualFrame::Push(Handle<Object> value) {
- if (ConstantPoolOverflowed()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ Set(temp.reg(), Immediate(value));
- Push(&temp);
- } else {
- FrameElement element =
- FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
- elements_.Add(element);
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
deleted file mode 100644
index 51874309d..000000000
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ /dev/null
@@ -1,646 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
-#define V8_IA32_VIRTUAL_FRAME_IA32_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame: public ZoneObject {
- public:
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, but no attempt is made to require it
- // to stay spilled. It is intended as documentation while the code
- // generator is being transformed.
- class SpilledScope BASE_EMBEDDED {
- public:
- SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
- ASSERT(cgen()->has_valid_frame());
- cgen()->frame()->SpillAll();
- cgen()->set_in_spilled_code(true);
- }
-
- ~SpilledScope() {
- cgen()->set_in_spilled_code(previous_state_);
- }
-
- private:
- bool previous_state_;
-
- CodeGenerator* cgen() {return CodeGeneratorScope::Current();}
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
-
- MacroAssembler* masm() { return cgen()->masm(); }
-
- // Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index,
- TypeInfo info = TypeInfo::Uninitialized());
-
- // The number of elements on the virtual frame.
- int element_count() { return elements_.length(); }
-
- // The height of the virtual expression stack.
- int height() { return element_count() - expression_base_index(); }
-
- int register_location(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num];
- }
-
- inline int register_location(Register reg);
-
- inline void set_register_location(Register reg, int index);
-
- bool is_used(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num] != kIllegalIndex;
- }
-
- inline bool is_used(Register reg);
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget count elements from the top of the frame all in-memory
- // (including synced) and adjust the stack pointer downward, to
- // match an external frame effect (examples include a call removing
- // its arguments, and exiting a try/catch removing an exception
- // handler). No code will be emitted.
- void Forget(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_ -= count;
- ForgetElements(count);
- }
-
- // Forget count elements from the top of the frame without adjusting
- // the stack pointer downward. This is used, for example, before
- // merging frames at break, continue, and return targets.
- void ForgetElements(int count);
-
- // Spill all values from the frame to memory.
- inline void SpillAll();
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_location(reg));
- }
-
- // Make the two registers distinct and spill them. Returns the second
- // register. If the registers were not distinct then it returns the new
- // second register.
- Result MakeDistinctAndSpilled(Result* left, Result* right) {
- Spill(left->reg());
- Spill(right->reg());
- if (left->reg().is(right->reg())) {
- RegisterAllocator* allocator = cgen()->allocator();
- Result fresh = allocator->Allocate();
- ASSERT(fresh.is_valid());
- masm()->mov(fresh.reg(), right->reg());
- return fresh;
- }
- return *right;
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references).
- Register SpillAnyRegister();
-
- // Spill the top element of the frame.
- void SpillTop() { SpillElementAt(element_count() - 1); }
-
- // Sync the range of elements in [begin, end] with memory.
- void SyncRange(int begin, int end);
-
- // Make this frame so that an arbitrary frame of the same height can
- // be merged to it. Copies and constants are removed from the frame.
- void MakeMergable();
-
- // Prepare this virtual frame for merging to an expected frame by
- // performing some state changes that do not require generating
- // code. It is guaranteed that no code will be generated.
- void PrepareMergeTo(VirtualFrame* expected);
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(VirtualFrame* expected);
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Use(i);
- }
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by spilling locals. This
- // avoids generating unnecessary merge code when jumping to the
- // shared return site. Emits code for spills.
- inline void PrepareForReturn();
-
- // Number of local variables after when we use a loop for allocating.
- static const int kLocalVarBound = 10;
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // An element of the expression stack as an assembly operand.
- Operand ElementAt(int index) const {
- return Operand(esp, index * kPointerSize);
- }
-
- // Random-access store to a frame-top relative frame element. The result
- // becomes owned by the frame and is invalidated.
- void SetElementAt(int index, Result* value);
-
- // Set a frame element to a constant. The index is frame-top relative.
- inline void SetElementAt(int index, Handle<Object> value);
-
- void PushElementAt(int index) {
- PushFrameSlotAt(element_count() - index - 1);
- }
-
- void StoreToElementAt(int index) {
- StoreToFrameSlotAt(element_count() - index - 1);
- }
-
- // A frame-allocated local as an assembly operand.
- Operand LocalAt(int index) {
- ASSERT(0 <= index);
- ASSERT(index < local_count());
- return Operand(ebp, kLocal0Offset - index * kPointerSize);
- }
-
- // Push a copy of the value of a local frame slot on top of the frame.
- void PushLocalAt(int index) {
- PushFrameSlotAt(local0_index() + index);
- }
-
- // Push a copy of the value of a local frame slot on top of the frame.
- void UntaggedPushLocalAt(int index) {
- UntaggedPushFrameSlotAt(local0_index() + index);
- }
-
- // Push the value of a local frame slot on top of the frame and invalidate
- // the local slot. The slot should be written to before trying to read
- // from it again.
- void TakeLocalAt(int index) {
- TakeFrameSlotAt(local0_index() + index);
- }
-
- // Store the top value on the virtual frame into a local frame slot. The
- // value is left in place on top of the frame.
- void StoreToLocalAt(int index) {
- StoreToFrameSlotAt(local0_index() + index);
- }
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // Push the function on top of the frame.
- void PushFunction() {
- PushFrameSlotAt(function_index());
- }
-
- // Save the value of the esi register to the context frame slot.
- void SaveContextRegister();
-
- // Restore the esi register from the value of the context frame
- // slot.
- void RestoreContextRegister();
-
- // A parameter as an assembly operand.
- Operand ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index < parameter_count());
- return Operand(ebp, (1 + parameter_count() - index) * kPointerSize);
- }
-
- // Push a copy of the value of a parameter frame slot on top of the frame.
- void PushParameterAt(int index) {
- PushFrameSlotAt(param0_index() + index);
- }
-
- // Push a copy of the value of a parameter frame slot on top of the frame.
- void UntaggedPushParameterAt(int index) {
- UntaggedPushFrameSlotAt(param0_index() + index);
- }
-
- // Push the value of a paramter frame slot on top of the frame and
- // invalidate the parameter slot. The slot should be written to before
- // trying to read from it again.
- void TakeParameterAt(int index) {
- TakeFrameSlotAt(param0_index() + index);
- }
-
- // Store the top value on the virtual frame into a parameter frame slot.
- // The value is left in place on top of the frame.
- void StoreToParameterAt(int index) {
- StoreToFrameSlotAt(param0_index() + index);
- }
-
- // The receiver frame slot.
- Operand Receiver() {
- return ParameterAt(-1);
- }
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- inline Result CallStub(CodeStub* stub, int arg_count);
-
- // Call stub that takes a single argument passed in eax. The
- // argument is given as a result which does not have to be eax or
- // even a register. The argument is consumed by the call.
- Result CallStub(CodeStub* stub, Result* arg);
-
- // Call stub that takes a pair of arguments passed in edx (arg0) and
- // eax (arg1). The arguments are given as results which do not have
- // to be in the proper registers or even in registers. The
- // arguments are consumed by the call.
- Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
- // Call JS function from top of the stack with arguments
- // taken from the stack.
- Result CallJSFunction(int arg_count);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- Result CallRuntime(Runtime::Function* f, int arg_count);
- Result CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void DebugBreak();
-#endif
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
-
- // Call load IC. Name and receiver are found on top of the frame.
- // Both are dropped.
- Result CallLoadIC(RelocInfo::Mode mode);
-
- // Call keyed load IC. Key and receiver are found on top of the
- // frame. Both are dropped.
- Result CallKeyedLoadIC(RelocInfo::Mode mode);
-
- // Call store IC. If the load is contextual, value is found on top of the
- // frame. If not, value and receiver are on the frame. Both are dropped.
- Result CallStoreIC(Handle<String> name, bool is_contextual,
- StrictModeFlag strict_mode);
-
- // Call keyed store IC. Value, key, and receiver are found on top
- // of the frame. All three are dropped.
- Result CallKeyedStoreIC(StrictModeFlag strict_mode);
-
- // Call call IC. Function name, arguments, and receiver are found on top
- // of the frame and dropped by the call. The argument count does not
- // include the receiver.
- Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
- // Call keyed call IC. Same calling convention as CallCallIC.
- Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
- // Allocate and call JS function as constructor. Arguments,
- // receiver (global object), and function are found on top of the
- // frame. Function is not dropped. The argument count does not
- // include the receiver.
- Result CallConstructor(int arg_count);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
-
- // Drop one element.
- void Drop() {
- Drop(1);
- }
-
- // Duplicate the top element of the frame.
- void Dup() {
- PushFrameSlotAt(element_count() - 1);
- }
-
- // Pop an element from the top of the expression stack. Returns a
- // Result, which may be a constant or a register.
- Result Pop();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
- void EmitPop(Operand operand);
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Operand operand,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Immediate immediate,
- TypeInfo info = TypeInfo::Unknown());
-
- inline bool ConstantPoolOverflowed();
-
- // Push an element on the virtual frame.
- void Push(Handle<Object> value);
- inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
- inline void Push(Smi* value);
-
- void PushUntaggedElement(Handle<Object> value);
-
- // Pushing a result invalidates it (its contents become owned by the
- // frame).
- void Push(Result* result) {
- // This assert will trigger if you try to push the same value twice.
- ASSERT(result->is_valid());
- if (result->is_register()) {
- Push(result->reg(), result->type_info());
- } else {
- ASSERT(result->is_constant());
- Push(result->handle());
- }
- if (cgen()->in_safe_int32_mode()) {
- ASSERT(result->is_untagged_int32());
- elements_[element_count() - 1].set_untagged_int32(true);
- }
- result->Unuse();
- }
-
- // Pushing an expression expects that the expression is trivial (according
- // to Expression::IsTrivial).
- void Push(Expression* expr);
-
- // Nip removes zero or more elements from immediately below the top
- // of the frame, leaving the previous top-of-frame value on top of
- // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
- inline void Nip(int num_dropped);
-
- // Check that the frame has no elements containing untagged int32 elements.
- bool HasNoUntaggedInt32Elements() {
- for (int i = 0; i < element_count(); ++i) {
- if (elements_[i].is_untagged_int32()) return false;
- }
- return true;
- }
-
- // Update the type information of a variable frame element directly.
- inline void SetTypeForLocalAt(int index, TypeInfo info);
- inline void SetTypeForParamAt(int index, TypeInfo info);
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- ZoneList<FrameElement> elements_;
-
- // The index of the element that is at the processor's stack pointer
- // (the esp register).
- int stack_pointer_;
-
- // The index of the register frame element using each register, or
- // kIllegalIndex if a register is not on the frame.
- int register_locations_[RegisterAllocator::kNumRegisters];
-
- // The number of frame-allocated locals and parameters respectively.
- inline int parameter_count();
-
- inline int local_count();
-
- // The index of the element that is at the processor's frame pointer
- // (the ebp register). The parameters, receiver, and return address
- // are below the frame pointer.
- int frame_pointer() {
- return parameter_count() + 2;
- }
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() {
- return 1;
- }
-
- // The index of the context slot in the frame. It is immediately
- // above the frame pointer.
- int context_index() {
- return frame_pointer() + 1;
- }
-
- // The index of the function slot in the frame. It is above the frame
- // pointer and the context slot.
- int function_index() {
- return frame_pointer() + 2;
- }
-
- // The index of the first local. Between the frame pointer and the
- // locals lie the context and the function.
- int local0_index() {
- return frame_pointer() + 3;
- }
-
- // The index of the base of the expression stack.
- int expression_base_index() {
- return local0_index() + local_count();
- }
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- int fp_relative(int index) {
- ASSERT(index < element_count());
- ASSERT(frame_pointer() < element_count()); // FP is on the frame.
- return (frame_pointer() - index) * kPointerSize;
- }
-
- // Record an occurrence of a register in the virtual frame. This has the
- // effect of incrementing the register's external reference count and
- // of updating the index of the register's location in the frame.
- void Use(Register reg, int index) {
- ASSERT(!is_used(reg));
- set_register_location(reg, index);
- cgen()->allocator()->Use(reg);
- }
-
- // Record that a register reference has been dropped from the frame. This
- // decrements the register's external reference count and invalidates the
- // index of the register's location in the frame.
- void Unuse(Register reg) {
- ASSERT(is_used(reg));
- set_register_location(reg, kIllegalIndex);
- cgen()->allocator()->Unuse(reg);
- }
-
- // Spill the element at a particular index---write it to memory if
- // necessary, free any associated register, and forget its value if
- // constant.
- void SpillElementAt(int index);
-
- // Sync the element at a particular index. If it is a register or
- // constant that disagrees with the value on the stack, write it to memory.
- // Keep the element type as register or constant, and clear the dirty bit.
- void SyncElementAt(int index);
-
- // Sync a single unsynced element that lies beneath or at the stack pointer.
- void SyncElementBelowStackPointer(int index);
-
- // Sync a single unsynced element that lies just above the stack pointer.
- void SyncElementByPushing(int index);
-
- // Push a copy of a frame slot (typically a local or parameter) on top of
- // the frame.
- inline void PushFrameSlotAt(int index);
-
- // Push a copy of a frame slot (typically a local or parameter) on top of
- // the frame, at an untagged int32 value. Bails out if the value is not
- // an int32.
- void UntaggedPushFrameSlotAt(int index);
-
- // Push a the value of a frame slot (typically a local or parameter) on
- // top of the frame and invalidate the slot.
- void TakeFrameSlotAt(int index);
-
- // Store the value on top of the frame to a frame slot (typically a local
- // or parameter).
- void StoreToFrameSlotAt(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // Move frame elements currently in registers or constants, that
- // should be in memory in the expected frame, to memory.
- void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
- // Make the register-to-register moves necessary to
- // merge this frame with the expected frame.
- // Register to memory moves must already have been made,
- // and memory to register moves must follow this call.
- // This is because some new memory-to-register moves are
- // created in order to break cycles of register moves.
- // Used in the implementation of MergeTo().
- void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
- // Make the memory-to-register and constant-to-register moves
- // needed to make this frame equal the expected frame.
- // Called after all register-to-memory and register-to-register
- // moves have been made. After this function returns, the frames
- // should be equal.
- void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
- // Invalidates a frame slot (puts an invalid frame element in it).
- // Copies on the frame are correctly handled, and if this slot was
- // the backing store of copies, the index of the new backing store
- // is returned. Otherwise, returns kIllegalIndex.
- // Register counts are correctly updated.
- int InvalidateFrameSlotAt(int index);
-
- // This function assumes that a and b are the only results that could be in
- // the registers a_reg or b_reg. Other results can be live, but must not
- // be in the registers a_reg or b_reg. The results a and b are invalidated.
- void MoveResultsToRegisters(Result* a,
- Result* b,
- Register a_reg,
- Register b_reg);
-
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- Result RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
- inline bool Equals(VirtualFrame* other);
-
- // Classes that need raw access to the elements_ array.
- friend class FrameRegisterState;
- friend class JumpTarget;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_VIRTUAL_FRAME_IA32_H_
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index 9d358eddb..b4f789cb4 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -41,13 +41,14 @@ Address IC::address() {
Address result = pc() - Assembler::kCallTargetAddressOffset;
#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug* debug = Isolate::Current()->debug();
// First check if any break points are active if not just return the address
// of the call.
- if (!Debug::has_break_points()) return result;
+ if (!debug->has_break_points()) return result;
// At least one break point is active perform additional test to ensure that
// break point locations are updated correctly.
- if (Debug::IsDebugBreak(Assembler::target_address_at(result))) {
+ if (debug->IsDebugBreak(Assembler::target_address_at(result))) {
// If the call site is a call to debug break then return the address in
// the original code instead of the address in the running code. This will
// cause the original code to be updated and keeps the breakpoint active in
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index f7d4fb120..542466d1e 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -65,9 +65,35 @@ void IC::TraceIC(const char* type,
const char* extra_info) {
if (FLAG_trace_ic) {
State new_state = StateFrom(new_target,
- Heap::undefined_value(),
- Heap::undefined_value());
- PrintF("[%s (%c->%c)%s", type,
+ HEAP->undefined_value(),
+ HEAP->undefined_value());
+ PrintF("[%s in ", type);
+ StackFrameIterator it;
+ while (it.frame()->fp() != this->fp()) it.Advance();
+ StackFrame* raw_frame = it.frame();
+ if (raw_frame->is_internal()) {
+ Isolate* isolate = new_target->GetIsolate();
+ Code* apply_builtin = isolate->builtins()->builtin(
+ Builtins::kFunctionApply);
+ if (raw_frame->unchecked_code() == apply_builtin) {
+ PrintF("apply from ");
+ it.Advance();
+ raw_frame = it.frame();
+ }
+ }
+ if (raw_frame->is_java_script()) {
+ JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+ Code* js_code = frame->unchecked_code();
+ // Find the function on the stack and both the active code for the
+ // function and the original code.
+ JSFunction* function = JSFunction::cast(frame->function());
+ function->PrintName();
+ int code_offset = address() - js_code->instruction_start();
+ PrintF("+%d", code_offset);
+ } else {
+ PrintF("<unknown>");
+ }
+ PrintF(" (%c->%c)%s",
TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state),
extra_info);
@@ -78,11 +104,13 @@ void IC::TraceIC(const char* type,
#endif
-IC::IC(FrameDepth depth) {
+IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
+ ASSERT(isolate == Isolate::Current());
// To improve the performance of the (much used) IC code, we unfold
// a few levels of the stack frame iteration code. This yields a
// ~35% speedup when running DeltaBlue with the '--nouse-ic' flag.
- const Address entry = Top::c_entry_fp(Top::GetCurrentThread());
+ const Address entry =
+ Isolate::c_entry_fp(isolate->thread_local_top());
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
@@ -136,9 +164,11 @@ Address IC::OriginalCodeAddress() {
#endif
-static bool HasNormalObjectsInPrototypeChain(LookupResult* lookup,
+static bool HasNormalObjectsInPrototypeChain(Isolate* isolate,
+ LookupResult* lookup,
Object* receiver) {
- Object* end = lookup->IsProperty() ? lookup->holder() : Heap::null_value();
+ Object* end = lookup->IsProperty()
+ ? lookup->holder() : isolate->heap()->null_value();
for (Object* current = receiver;
current != end;
current = current->GetPrototype()) {
@@ -231,7 +261,7 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
RelocInfo::Mode IC::ComputeMode() {
Address addr = address();
- Code* code = Code::cast(Heap::FindCodeObject(addr));
+ Code* code = Code::cast(isolate()->heap()->FindCodeObject(addr));
for (RelocIterator it(code, RelocInfo::kCodeTargetMask);
!it.done(); it.next()) {
RelocInfo* info = it.rinfo();
@@ -245,18 +275,19 @@ RelocInfo::Mode IC::ComputeMode() {
Failure* IC::TypeError(const char* type,
Handle<Object> object,
Handle<Object> key) {
- HandleScope scope;
+ HandleScope scope(isolate());
Handle<Object> args[2] = { key, object };
- Handle<Object> error = Factory::NewTypeError(type, HandleVector(args, 2));
- return Top::Throw(*error);
+ Handle<Object> error = isolate()->factory()->NewTypeError(
+ type, HandleVector(args, 2));
+ return isolate()->Throw(*error);
}
Failure* IC::ReferenceError(const char* type, Handle<String> name) {
- HandleScope scope;
- Handle<Object> error =
- Factory::NewReferenceError(type, HandleVector(&name, 1));
- return Top::Throw(*error);
+ HandleScope scope(isolate());
+ Handle<Object> error = isolate()->factory()->NewReferenceError(
+ type, HandleVector(&name, 1));
+ return isolate()->Throw(*error);
}
@@ -268,13 +299,15 @@ void IC::Clear(Address address) {
switch (target->kind()) {
case Code::LOAD_IC: return LoadIC::Clear(address, target);
- case Code::KEYED_LOAD_IC: return KeyedLoadIC::Clear(address, target);
+ case Code::KEYED_LOAD_IC:
+ return KeyedLoadIC::Clear(address, target);
case Code::STORE_IC: return StoreIC::Clear(address, target);
- case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
+ case Code::KEYED_STORE_IC:
+ return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
+ case Code::UNARY_OP_IC:
case Code::BINARY_OP_IC:
- case Code::TYPE_RECORDING_BINARY_OP_IC:
case Code::COMPARE_IC:
// Clearing these is tricky and does not
// make any performance difference.
@@ -285,63 +318,36 @@ void IC::Clear(Address address) {
void CallICBase::Clear(Address address, Code* target) {
+ bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
State state = target->ic_state();
if (state == UNINITIALIZED) return;
Code* code =
- StubCache::FindCallInitialize(target->arguments_count(),
- target->ic_in_loop(),
- target->kind());
+ Isolate::Current()->stub_cache()->FindCallInitialize(
+ target->arguments_count(),
+ target->ic_in_loop(),
+ contextual ? RelocInfo::CODE_TARGET_CONTEXT : RelocInfo::CODE_TARGET,
+ target->kind());
SetTargetAtAddress(address, code);
}
-void KeyedLoadIC::ClearInlinedVersion(Address address) {
- // Insert null as the map to check for to make sure the map check fails
- // sending control flow to the IC instead of the inlined version.
- PatchInlinedLoad(address, Heap::null_value());
-}
-
-
void KeyedLoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
- ClearInlinedVersion(address);
SetTargetAtAddress(address, initialize_stub());
}
-void LoadIC::ClearInlinedVersion(Address address) {
- // Reset the map check of the inlined inobject property load (if
- // present) to guarantee failure by holding an invalid map (the null
- // value). The offset can be patched to anything.
- PatchInlinedLoad(address, Heap::null_value(), 0);
- PatchInlinedContextualLoad(address,
- Heap::null_value(),
- Heap::null_value(),
- true);
-}
-
-
void LoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
- ClearInlinedVersion(address);
SetTargetAtAddress(address, initialize_stub());
}
-void StoreIC::ClearInlinedVersion(Address address) {
- // Reset the map check of the inlined inobject property store (if
- // present) to guarantee failure by holding an invalid map (the null
- // value). The offset can be patched to anything.
- PatchInlinedStore(address, Heap::null_value(), 0);
-}
-
-
void StoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
- ClearInlinedVersion(address);
SetTargetAtAddress(address,
(target->extra_ic_state() == kStrictMode)
? initialize_stub_strict()
@@ -349,21 +355,6 @@ void StoreIC::Clear(Address address, Code* target) {
}
-void KeyedStoreIC::ClearInlinedVersion(Address address) {
- // Insert null as the elements map to check for. This will make
- // sure that the elements fast-case map check fails so that control
- // flows to the IC instead of the inlined version.
- PatchInlinedStore(address, Heap::null_value());
-}
-
-
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {
- // Restore the fast-case elements map check so that the inlined
- // version can be used again.
- PatchInlinedStore(address, Heap::fixed_array_map());
-}
-
-
void KeyedStoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address,
@@ -419,8 +410,8 @@ static void LookupForRead(Object* object,
Object* CallICBase::TryCallAsFunction(Object* object) {
- HandleScope scope;
- Handle<Object> target(object);
+ HandleScope scope(isolate());
+ Handle<Object> target(object, isolate());
Handle<Object> delegate = Execution::GetFunctionDelegate(target);
if (delegate->IsJSFunction()) {
@@ -455,7 +446,7 @@ void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
int index = frame->ComputeExpressionsCount() - (argc + 1);
- frame->SetExpression(index, *Factory::ToObject(object));
+ frame->SetExpression(index, *isolate()->factory()->ToObject(object));
}
}
@@ -527,7 +518,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
ASSERT(!result->IsTheHole());
- HandleScope scope;
+ HandleScope scope(isolate());
// Wrap result in a handle because ReceiverToObjectIfRequired may allocate
// new object and cause GC.
Handle<Object> result_handle(result);
@@ -539,11 +530,12 @@ MaybeObject* CallICBase::LoadFunction(State state,
if (result_handle->IsJSFunction()) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Handle stepping into a function if step into is active.
- if (Debug::StepInActive()) {
+ Debug* debug = isolate()->debug();
+ if (debug->StepInActive()) {
// Protect the result in a handle as the debugger can allocate and might
// cause GC.
- Handle<JSFunction> function(JSFunction::cast(*result_handle));
- Debug::HandleStepIn(function, object, fp(), false);
+ Handle<JSFunction> function(JSFunction::cast(*result_handle), isolate());
+ debug->HandleStepIn(function, object, fp(), false);
return *function;
}
#endif
@@ -569,7 +561,7 @@ bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
// Fetch the arguments passed to the called function.
const int argc = target()->arguments_count();
- Address entry = Top::c_entry_fp(Top::GetCurrentThread());
+ Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
Arguments args(argc + 1,
&Memory::Object_at(fp +
@@ -584,17 +576,13 @@ bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
// If we're in the default (fastest) state and the index is
// out of bounds, update the state to record this fact.
- if (*extra_ic_state == DEFAULT_STRING_STUB &&
+ if (StringStubState::decode(*extra_ic_state) == DEFAULT_STRING_STUB &&
argc >= 1 && args[1]->IsNumber()) {
- double index;
- if (args[1]->IsSmi()) {
- index = Smi::cast(args[1])->value();
- } else {
- ASSERT(args[1]->IsHeapNumber());
- index = DoubleToInteger(HeapNumber::cast(args[1])->value());
- }
+ double index = DoubleToInteger(args.number_at(1));
if (index < 0 || index >= string->length()) {
- *extra_ic_state = STRING_INDEX_OUT_OF_BOUNDS;
+ *extra_ic_state =
+ StringStubState::update(*extra_ic_state,
+ STRING_INDEX_OUT_OF_BOUNDS);
return true;
}
}
@@ -619,13 +607,14 @@ MaybeObject* CallICBase::ComputeMonomorphicStub(
switch (lookup->type()) {
case FIELD: {
int index = lookup->GetFieldIndex();
- maybe_code = StubCache::ComputeCallField(argc,
- in_loop,
- kind_,
- *name,
- *object,
- lookup->holder(),
- index);
+ maybe_code = isolate()->stub_cache()->ComputeCallField(argc,
+ in_loop,
+ kind_,
+ extra_ic_state,
+ *name,
+ *object,
+ lookup->holder(),
+ index);
break;
}
case CONSTANT_FUNCTION: {
@@ -633,14 +622,15 @@ MaybeObject* CallICBase::ComputeMonomorphicStub(
// call; used for rewriting to monomorphic state and making sure
// that the code stub is in the stub cache.
JSFunction* function = lookup->GetConstantFunction();
- maybe_code = StubCache::ComputeCallConstant(argc,
- in_loop,
- kind_,
- extra_ic_state,
- *name,
- *object,
- lookup->holder(),
- function);
+ maybe_code =
+ isolate()->stub_cache()->ComputeCallConstant(argc,
+ in_loop,
+ kind_,
+ extra_ic_state,
+ *name,
+ *object,
+ lookup->holder(),
+ function);
break;
}
case NORMAL: {
@@ -653,35 +643,39 @@ MaybeObject* CallICBase::ComputeMonomorphicStub(
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
if (!cell->value()->IsJSFunction()) return NULL;
JSFunction* function = JSFunction::cast(cell->value());
- maybe_code = StubCache::ComputeCallGlobal(argc,
- in_loop,
- kind_,
- *name,
- *receiver,
- global,
- cell,
- function);
+ maybe_code = isolate()->stub_cache()->ComputeCallGlobal(argc,
+ in_loop,
+ kind_,
+ extra_ic_state,
+ *name,
+ *receiver,
+ global,
+ cell,
+ function);
} else {
// There is only one shared stub for calling normalized
// properties. It does not traverse the prototype chain, so the
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return NULL;
- maybe_code = StubCache::ComputeCallNormal(argc,
- in_loop,
- kind_,
- *name,
- *receiver);
+ maybe_code = isolate()->stub_cache()->ComputeCallNormal(argc,
+ in_loop,
+ kind_,
+ extra_ic_state,
+ *name,
+ *receiver);
}
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = StubCache::ComputeCallInterceptor(argc,
- kind_,
- *name,
- *object,
- lookup->holder());
+ maybe_code = isolate()->stub_cache()->ComputeCallInterceptor(
+ argc,
+ kind_,
+ extra_ic_state,
+ *name,
+ *object,
+ lookup->holder());
break;
}
default:
@@ -701,7 +695,8 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
if (lookup->holder() != *object &&
- HasNormalObjectsInPrototypeChain(lookup, object->GetPrototype())) {
+ HasNormalObjectsInPrototypeChain(
+ isolate(), lookup, object->GetPrototype())) {
// Suppress optimization for prototype chains with slow properties objects
// in the middle.
return;
@@ -716,7 +711,11 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
- maybe_code = StubCache::ComputeCallPreMonomorphic(argc, in_loop, kind_);
+ maybe_code =
+ isolate()->stub_cache()->ComputeCallPreMonomorphic(argc,
+ in_loop,
+ kind_,
+ extra_ic_state);
} else if (state == MONOMORPHIC) {
if (kind_ == Code::CALL_IC &&
TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
@@ -736,7 +735,11 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
object,
name);
} else {
- maybe_code = StubCache::ComputeCallMegamorphic(argc, in_loop, kind_);
+ maybe_code =
+ isolate()->stub_cache()->ComputeCallMegamorphic(argc,
+ in_loop,
+ kind_,
+ extra_ic_state);
}
} else {
maybe_code = ComputeMonomorphicStub(lookup,
@@ -764,7 +767,7 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
object->GetPrototype())->map();
// Update the stub cache.
- StubCache::Set(*name, map, Code::cast(code));
+ isolate()->stub_cache()->Set(*name, map, Code::cast(code));
}
USE(had_proto_failure);
@@ -790,24 +793,41 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
return TypeError("non_object_property_call", object, key);
}
- if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) {
+ if (FLAG_use_ic && state != MEGAMORPHIC && object->IsHeapObject()) {
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
- MaybeObject* maybe_code = StubCache::ComputeCallMegamorphic(
- argc, in_loop, Code::KEYED_CALL_IC);
- Object* code;
- if (maybe_code->ToObject(&code)) {
- set_target(Code::cast(code));
+ Heap* heap = Handle<HeapObject>::cast(object)->GetHeap();
+ Map* map = heap->non_strict_arguments_elements_map();
+ if (object->IsJSObject() &&
+ Handle<JSObject>::cast(object)->elements()->map() == map) {
+ MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallArguments(
+ argc, in_loop, Code::KEYED_CALL_IC);
+ Object* code;
+ if (maybe_code->ToObject(&code)) {
+ set_target(Code::cast(code));
+#ifdef DEBUG
+ TraceIC(
+ "KeyedCallIC", key, state, target(), in_loop ? " (in-loop)" : "");
+#endif
+ }
+ } else if (FLAG_use_ic && state != MEGAMORPHIC &&
+ !object->IsAccessCheckNeeded()) {
+ MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
+ argc, in_loop, Code::KEYED_CALL_IC, Code::kNoExtraICState);
+ Object* code;
+ if (maybe_code->ToObject(&code)) {
+ set_target(Code::cast(code));
#ifdef DEBUG
- TraceIC(
- "KeyedCallIC", key, state, target(), in_loop ? " (in-loop)" : "");
+ TraceIC(
+ "KeyedCallIC", key, state, target(), in_loop ? " (in-loop)" : "");
#endif
+ }
}
}
- HandleScope scope;
+ HandleScope scope(isolate());
Handle<Object> result = GetProperty(object, key);
- RETURN_IF_EMPTY_HANDLE(result);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
// Make receiver an object if the callee requires it. Strict mode or builtin
// functions do not wrap the receiver, non-strict functions and objects
@@ -840,66 +860,86 @@ MaybeObject* LoadIC::Load(State state,
}
if (FLAG_use_ic) {
- Code* non_monomorphic_stub =
- (state == UNINITIALIZED) ? pre_monomorphic_stub() : megamorphic_stub();
-
// Use specialized code for getting the length of strings and
// string wrapper objects. The length property of string wrapper
// objects is read-only and therefore always returns the length of
// the underlying string value. See ECMA-262 15.5.5.1.
if ((object->IsString() || object->IsStringWrapper()) &&
- name->Equals(Heap::length_symbol())) {
- HandleScope scope;
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
-#endif
- if (state == PREMONOMORPHIC) {
+ name->Equals(isolate()->heap()->length_symbol())) {
+ AssertNoAllocation no_allocation;
+ Code* stub = NULL;
+ if (state == UNINITIALIZED) {
+ stub = pre_monomorphic_stub();
+ } else if (state == PREMONOMORPHIC) {
if (object->IsString()) {
- Map* map = HeapObject::cast(*object)->map();
- const int offset = String::kLengthOffset;
- PatchInlinedLoad(address(), map, offset);
- set_target(Builtins::builtin(Builtins::LoadIC_StringLength));
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_StringLength);
} else {
- set_target(Builtins::builtin(Builtins::LoadIC_StringWrapperLength));
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_StringWrapperLength);
}
} else if (state == MONOMORPHIC && object->IsStringWrapper()) {
- set_target(Builtins::builtin(Builtins::LoadIC_StringWrapperLength));
- } else {
- set_target(non_monomorphic_stub);
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_StringWrapperLength);
+ } else if (state != MEGAMORPHIC) {
+ stub = megamorphic_stub();
+ }
+ if (stub != NULL) {
+ set_target(stub);
+#ifdef DEBUG
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
+#endif
}
// Get the string if we have a string wrapper object.
if (object->IsJSValue()) {
- object = Handle<Object>(Handle<JSValue>::cast(object)->value());
+ return Smi::FromInt(
+ String::cast(Handle<JSValue>::cast(object)->value())->length());
}
return Smi::FromInt(String::cast(*object)->length());
}
// Use specialized code for getting the length of arrays.
- if (object->IsJSArray() && name->Equals(Heap::length_symbol())) {
+ if (object->IsJSArray() &&
+ name->Equals(isolate()->heap()->length_symbol())) {
+ AssertNoAllocation no_allocation;
+ Code* stub = NULL;
+ if (state == UNINITIALIZED) {
+ stub = pre_monomorphic_stub();
+ } else if (state == PREMONOMORPHIC) {
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_ArrayLength);
+ } else if (state != MEGAMORPHIC) {
+ stub = megamorphic_stub();
+ }
+ if (stub != NULL) {
+ set_target(stub);
#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
#endif
- if (state == PREMONOMORPHIC) {
- Map* map = HeapObject::cast(*object)->map();
- const int offset = JSArray::kLengthOffset;
- PatchInlinedLoad(address(), map, offset);
- set_target(Builtins::builtin(Builtins::LoadIC_ArrayLength));
- } else {
- set_target(non_monomorphic_stub);
}
return JSArray::cast(*object)->length();
}
// Use specialized code for getting prototype of functions.
- if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol()) &&
+ if (object->IsJSFunction() &&
+ name->Equals(isolate()->heap()->prototype_symbol()) &&
JSFunction::cast(*object)->should_have_prototype()) {
+ { AssertNoAllocation no_allocation;
+ Code* stub = NULL;
+ if (state == UNINITIALIZED) {
+ stub = pre_monomorphic_stub();
+ } else if (state == PREMONOMORPHIC) {
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_FunctionPrototype);
+ } else if (state != MEGAMORPHIC) {
+ stub = megamorphic_stub();
+ }
+ if (stub != NULL) {
+ set_target(stub);
#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif
- if (state == PREMONOMORPHIC) {
- set_target(Builtins::builtin(Builtins::LoadIC_FunctionPrototype));
- } else {
- set_target(non_monomorphic_stub);
+ }
}
return Accessors::FunctionGetPrototype(*object, 0);
}
@@ -919,64 +959,7 @@ MaybeObject* LoadIC::Load(State state,
if (FLAG_strict || IsContextual(object)) {
return ReferenceError("not_defined", name);
}
- LOG(SuspectReadEvent(*name, *object));
- }
-
- bool can_be_inlined_precheck =
- FLAG_use_ic &&
- lookup.IsProperty() &&
- lookup.IsCacheable() &&
- lookup.holder() == *object &&
- !object->IsAccessCheckNeeded();
-
- bool can_be_inlined =
- can_be_inlined_precheck &&
- state == PREMONOMORPHIC &&
- lookup.type() == FIELD;
-
- bool can_be_inlined_contextual =
- can_be_inlined_precheck &&
- state == UNINITIALIZED &&
- lookup.holder()->IsGlobalObject() &&
- lookup.type() == NORMAL;
-
- if (can_be_inlined) {
- Map* map = lookup.holder()->map();
- // Property's index in the properties array. If negative we have
- // an inobject property.
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- if (index < 0) {
- // Index is an offset from the end of the object.
- int offset = map->instance_size() + (index * kPointerSize);
- if (PatchInlinedLoad(address(), map, offset)) {
- set_target(megamorphic_stub());
- TRACE_IC_NAMED("[LoadIC : inline patch %s]\n", name);
- return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
- } else {
- TRACE_IC_NAMED("[LoadIC : no inline patch %s (patching failed)]\n",
- name);
- }
- } else {
- TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inobject)]\n", name);
- }
- } else if (can_be_inlined_contextual) {
- Map* map = lookup.holder()->map();
- JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
- lookup.holder()->property_dictionary()->ValueAt(
- lookup.GetDictionaryEntry()));
- if (PatchInlinedContextualLoad(address(),
- map,
- cell,
- lookup.IsDontDelete())) {
- set_target(megamorphic_stub());
- TRACE_IC_NAMED("[LoadIC : inline contextual patch %s]\n", name);
- ASSERT(cell->value() != Heap::the_hole_value());
- return cell->value();
- }
- } else {
- if (FLAG_use_ic && state == PREMONOMORPHIC) {
- TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inlinable)]\n", name);
- }
+ LOG(isolate(), SuspectReadEvent(*name, *object));
}
// Update inline cache and stub cache.
@@ -985,7 +968,8 @@ MaybeObject* LoadIC::Load(State state,
}
PropertyAttributes attr;
- if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
+ if (lookup.IsProperty() &&
+ (lookup.type() == INTERCEPTOR || lookup.type() == HANDLER)) {
// Get the property.
Object* result;
{ MaybeObject* maybe_result =
@@ -1017,7 +1001,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+ if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
// Compute the code stub for this load.
MaybeObject* maybe_code = NULL;
@@ -1029,20 +1013,23 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
maybe_code = pre_monomorphic_stub();
} else if (!lookup->IsProperty()) {
// Nonexistent property. The result is undefined.
- maybe_code = StubCache::ComputeLoadNonexistent(*name, *receiver);
+ maybe_code = isolate()->stub_cache()->ComputeLoadNonexistent(*name,
+ *receiver);
} else {
// Compute monomorphic stub.
switch (lookup->type()) {
case FIELD: {
- maybe_code = StubCache::ComputeLoadField(*name, *receiver,
- lookup->holder(),
- lookup->GetFieldIndex());
+ maybe_code = isolate()->stub_cache()->ComputeLoadField(
+ *name,
+ *receiver,
+ lookup->holder(),
+ lookup->GetFieldIndex());
break;
}
case CONSTANT_FUNCTION: {
Object* constant = lookup->GetConstantFunction();
- maybe_code = StubCache::ComputeLoadConstant(*name, *receiver,
- lookup->holder(), constant);
+ maybe_code = isolate()->stub_cache()->ComputeLoadConstant(
+ *name, *receiver, lookup->holder(), constant);
break;
}
case NORMAL: {
@@ -1050,7 +1037,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
GlobalObject* global = GlobalObject::cast(lookup->holder());
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- maybe_code = StubCache::ComputeLoadGlobal(*name,
+ maybe_code = isolate()->stub_cache()->ComputeLoadGlobal(*name,
*receiver,
global,
cell,
@@ -1061,7 +1048,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return;
- maybe_code = StubCache::ComputeLoadNormal();
+ maybe_code = isolate()->stub_cache()->ComputeLoadNormal();
}
break;
}
@@ -1070,14 +1057,14 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
AccessorInfo* callback =
AccessorInfo::cast(lookup->GetCallbackObject());
if (v8::ToCData<Address>(callback->getter()) == 0) return;
- maybe_code = StubCache::ComputeLoadCallback(*name, *receiver,
- lookup->holder(), callback);
+ maybe_code = isolate()->stub_cache()->ComputeLoadCallback(
+ *name, *receiver, lookup->holder(), callback);
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = StubCache::ComputeLoadInterceptor(*name, *receiver,
- lookup->holder());
+ maybe_code = isolate()->stub_cache()->ComputeLoadInterceptor(
+ *name, *receiver, lookup->holder());
break;
}
default:
@@ -1101,7 +1088,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
Map* map = JSObject::cast(object->IsJSObject() ? *object :
object->GetPrototype())->map();
- StubCache::Set(*name, map, Code::cast(code));
+ isolate()->stub_cache()->Set(*name, map, Code::cast(code));
}
#ifdef DEBUG
@@ -1110,17 +1097,57 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
}
+String* KeyedLoadIC::GetStubNameForCache(IC::State ic_state) {
+ if (ic_state == MONOMORPHIC) {
+ return isolate()->heap()->KeyedLoadElementMonomorphic_symbol();
+ } else {
+ ASSERT(ic_state == MEGAMORPHIC);
+ return isolate()->heap()->KeyedLoadElementPolymorphic_symbol();
+ }
+}
+
+
+MaybeObject* KeyedLoadIC::GetFastElementStubWithoutMapCheck(
+ bool is_js_array) {
+ return KeyedLoadFastElementStub().TryGetCode();
+}
+
+
+MaybeObject* KeyedLoadIC::GetExternalArrayStubWithoutMapCheck(
+ JSObject::ElementsKind elements_kind) {
+ return KeyedLoadExternalArrayStub(elements_kind).TryGetCode();
+}
+
+
+MaybeObject* KeyedLoadIC::ConstructMegamorphicStub(
+ MapList* receiver_maps,
+ CodeList* targets,
+ StrictModeFlag strict_mode) {
+ Object* object;
+ KeyedLoadStubCompiler compiler;
+ MaybeObject* maybe_code = compiler.CompileLoadMegamorphic(receiver_maps,
+ targets);
+ if (!maybe_code->ToObject(&object)) return maybe_code;
+ isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
+ PROFILE(isolate(), CodeCreateEvent(
+ Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG,
+ Code::cast(object), 0));
+ return object;
+}
+
+
MaybeObject* KeyedLoadIC::Load(State state,
Handle<Object> object,
- Handle<Object> key) {
+ Handle<Object> key,
+ bool force_generic_stub) {
// Check for values that can be converted into a symbol.
// TODO(1295): Remove this code.
- HandleScope scope;
+ HandleScope scope(isolate());
if (key->IsHeapNumber() &&
isnan(HeapNumber::cast(*key)->value())) {
- key = Factory::nan_symbol();
+ key = isolate()->factory()->nan_symbol();
} else if (key->IsUndefined()) {
- key = Factory::undefined_symbol();
+ key = isolate()->factory()->undefined_symbol();
}
if (key->IsSymbol()) {
@@ -1136,11 +1163,13 @@ MaybeObject* KeyedLoadIC::Load(State state,
// TODO(1073): don't ignore the current stub state.
// Use specialized code for getting the length of strings.
- if (object->IsString() && name->Equals(Heap::length_symbol())) {
+ if (object->IsString() &&
+ name->Equals(isolate()->heap()->length_symbol())) {
Handle<String> string = Handle<String>::cast(object);
Object* code = NULL;
{ MaybeObject* maybe_code =
- StubCache::ComputeKeyedLoadStringLength(*name, *string);
+ isolate()->stub_cache()->ComputeKeyedLoadStringLength(*name,
+ *string);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
set_target(Code::cast(code));
@@ -1151,11 +1180,13 @@ MaybeObject* KeyedLoadIC::Load(State state,
}
// Use specialized code for getting the length of arrays.
- if (object->IsJSArray() && name->Equals(Heap::length_symbol())) {
+ if (object->IsJSArray() &&
+ name->Equals(isolate()->heap()->length_symbol())) {
Handle<JSArray> array = Handle<JSArray>::cast(object);
Object* code;
{ MaybeObject* maybe_code =
- StubCache::ComputeKeyedLoadArrayLength(*name, *array);
+ isolate()->stub_cache()->ComputeKeyedLoadArrayLength(*name,
+ *array);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
set_target(Code::cast(code));
@@ -1166,12 +1197,14 @@ MaybeObject* KeyedLoadIC::Load(State state,
}
// Use specialized code for getting prototype of functions.
- if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol()) &&
+ if (object->IsJSFunction() &&
+ name->Equals(isolate()->heap()->prototype_symbol()) &&
JSFunction::cast(*object)->should_have_prototype()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(object);
Object* code;
{ MaybeObject* maybe_code =
- StubCache::ComputeKeyedLoadFunctionPrototype(*name, *function);
+ isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
+ *name, *function);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
set_target(Code::cast(code));
@@ -1186,10 +1219,10 @@ MaybeObject* KeyedLoadIC::Load(State state,
// the element or char if so.
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
- HandleScope scope;
+ HandleScope scope(isolate());
// Rewrite to the generic keyed load stub.
if (FLAG_use_ic) set_target(generic_stub());
- return Runtime::GetElementOrCharAt(object, index);
+ return Runtime::GetElementOrCharAt(isolate(), object, index);
}
// Named lookup.
@@ -1232,55 +1265,38 @@ MaybeObject* KeyedLoadIC::Load(State state,
if (use_ic) {
Code* stub = generic_stub();
- if (state == UNINITIALIZED) {
+ if (!force_generic_stub) {
if (object->IsString() && key->IsNumber()) {
- stub = string_stub();
+ if (state == UNINITIALIZED) {
+ stub = string_stub();
+ }
} else if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->HasExternalArrayElements()) {
- MaybeObject* probe =
- StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver,
- false,
- kNonStrictMode);
- stub = probe->IsFailure() ?
- NULL : Code::cast(probe->ToObjectUnchecked());
+ JSObject* receiver = JSObject::cast(*object);
+ Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
+ Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
+ if (elements_map == heap->non_strict_arguments_elements_map()) {
+ stub = non_strict_arguments_stub();
} else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub();
- } else if (receiver->HasPixelElements()) {
- MaybeObject* probe =
- StubCache::ComputeKeyedLoadPixelArray(*receiver);
- stub = probe->IsFailure() ?
- NULL : Code::cast(probe->ToObjectUnchecked());
- } else if (key->IsSmi() &&
- receiver->map()->has_fast_elements()) {
- MaybeObject* probe =
- StubCache::ComputeKeyedLoadSpecialized(*receiver);
- stub = probe->IsFailure() ?
- NULL : Code::cast(probe->ToObjectUnchecked());
+ } else if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
+ MaybeObject* maybe_stub = ComputeStub(receiver,
+ false,
+ kNonStrictMode,
+ stub);
+ stub = maybe_stub->IsFailure() ?
+ NULL : Code::cast(maybe_stub->ToObjectUnchecked());
}
}
}
if (stub != NULL) set_target(stub);
+ }
#ifdef DEBUG
- TraceIC("KeyedLoadIC", key, state, target());
+ TraceIC("KeyedLoadIC", key, state, target());
#endif // DEBUG
- // For JSObjects with fast elements that are not value wrappers
- // and that do not have indexed interceptors, we initialize the
- // inlined fast case (if present) by patching the inlined map
- // check.
- if (object->IsJSObject() &&
- !object->IsJSValue() &&
- !JSObject::cast(*object)->HasIndexedInterceptor() &&
- JSObject::cast(*object)->HasFastElements()) {
- Map* map = JSObject::cast(*object)->map();
- PatchInlinedLoad(address(), map);
- }
- }
-
// Get the property.
- return Runtime::GetObjectProperty(object, key);
+ return Runtime::GetObjectProperty(isolate(), object, key);
}
@@ -1292,7 +1308,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+ if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
// Compute the code stub for this load.
MaybeObject* maybe_code = NULL;
@@ -1307,17 +1323,14 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
// Compute a monomorphic stub.
switch (lookup->type()) {
case FIELD: {
- maybe_code = StubCache::ComputeKeyedLoadField(*name, *receiver,
- lookup->holder(),
- lookup->GetFieldIndex());
+ maybe_code = isolate()->stub_cache()->ComputeKeyedLoadField(
+ *name, *receiver, lookup->holder(), lookup->GetFieldIndex());
break;
}
case CONSTANT_FUNCTION: {
Object* constant = lookup->GetConstantFunction();
- maybe_code = StubCache::ComputeKeyedLoadConstant(*name,
- *receiver,
- lookup->holder(),
- constant);
+ maybe_code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
+ *name, *receiver, lookup->holder(), constant);
break;
}
case CALLBACKS: {
@@ -1325,16 +1338,14 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
AccessorInfo* callback =
AccessorInfo::cast(lookup->GetCallbackObject());
if (v8::ToCData<Address>(callback->getter()) == 0) return;
- maybe_code = StubCache::ComputeKeyedLoadCallback(*name,
- *receiver,
- lookup->holder(),
- callback);
+ maybe_code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
+ *name, *receiver, lookup->holder(), callback);
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = StubCache::ComputeKeyedLoadInterceptor(*name, *receiver,
- lookup->holder());
+ maybe_code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
+ *name, *receiver, lookup->holder());
break;
}
default: {
@@ -1377,15 +1388,16 @@ static bool StoreICableLookup(LookupResult* lookup) {
}
-static bool LookupForWrite(JSObject* object,
+static bool LookupForWrite(JSReceiver* receiver,
String* name,
LookupResult* lookup) {
- object->LocalLookup(name, lookup);
+ receiver->LocalLookup(name, lookup);
if (!StoreICableLookup(lookup)) {
return false;
}
if (lookup->type() == INTERCEPTOR) {
+ JSObject* object = JSObject::cast(receiver);
if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
object->LocalLookupRealNamedProperty(name, lookup);
return StoreICableLookup(lookup);
@@ -1407,30 +1419,44 @@ MaybeObject* StoreIC::Store(State state,
return TypeError("non_object_property_store", object, name);
}
- // Ignore stores where the receiver is not a JSObject.
- if (!object->IsJSObject()) return *value;
+ if (!object->IsJSReceiver()) {
+ // The length property of string values is read-only. Throw in strict mode.
+ if (strict_mode == kStrictMode && object->IsString() &&
+ name->Equals(isolate()->heap()->length_symbol())) {
+ return TypeError("strict_read_only_property", object, name);
+ }
+ // Ignore stores where the receiver is not a JSObject.
+ return *value;
+ }
+
+ // Handle proxies.
+ if (object->IsJSProxy()) {
+ return JSReceiver::cast(*object)->
+ SetProperty(*name, *value, NONE, strict_mode);
+ }
+
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- HandleScope scope;
- Handle<Object> result = SetElement(receiver, index, value);
+ HandleScope scope(isolate());
+ Handle<Object> result = SetElement(receiver, index, value, strict_mode);
if (result.is_null()) return Failure::Exception();
return *value;
}
// Use specialized code for setting the length of arrays.
if (receiver->IsJSArray()
- && name->Equals(Heap::length_symbol())
- && receiver->AllowsSetElementsLength()) {
+ && name->Equals(isolate()->heap()->length_symbol())
+ && JSArray::cast(*receiver)->AllowsSetElementsLength()) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
#endif
Builtins::Name target = (strict_mode == kStrictMode)
- ? Builtins::StoreIC_ArrayLength_Strict
- : Builtins::StoreIC_ArrayLength;
- set_target(Builtins::builtin(target));
+ ? Builtins::kStoreIC_ArrayLength_Strict
+ : Builtins::kStoreIC_ArrayLength;
+ set_target(isolate()->builtins()->builtin(target));
return receiver->SetProperty(*name, *value, NONE, strict_mode);
}
@@ -1439,57 +1465,7 @@ MaybeObject* StoreIC::Store(State state,
LookupResult lookup;
if (LookupForWrite(*receiver, *name, &lookup)) {
- bool can_be_inlined =
- state == UNINITIALIZED &&
- lookup.IsProperty() &&
- lookup.holder() == *receiver &&
- lookup.type() == FIELD &&
- !receiver->IsAccessCheckNeeded();
-
- if (can_be_inlined) {
- Map* map = lookup.holder()->map();
- // Property's index in the properties array. If negative we have
- // an inobject property.
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- if (index < 0) {
- // Index is an offset from the end of the object.
- int offset = map->instance_size() + (index * kPointerSize);
- if (PatchInlinedStore(address(), map, offset)) {
- set_target((strict_mode == kStrictMode)
- ? megamorphic_stub_strict()
- : megamorphic_stub());
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[StoreIC : inline patch %s]\n", *name->ToCString());
- }
-#endif
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
-#ifdef DEBUG
-
- } else {
- if (FLAG_trace_ic) {
- PrintF("[StoreIC : no inline patch %s (patching failed)]\n",
- *name->ToCString());
- }
- }
- } else {
- if (FLAG_trace_ic) {
- PrintF("[StoreIC : no inline patch %s (not inobject)]\n",
- *name->ToCString());
- }
- }
- } else {
- if (state == PREMONOMORPHIC) {
- if (FLAG_trace_ic) {
- PrintF("[StoreIC : no inline patch %s (not inlinable)]\n",
- *name->ToCString());
-#endif
- }
- }
- }
-
- // If no inlined store ic was patched, generate a stub for this
- // store.
+ // Generate a stub for this store.
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
} else {
// Strict mode doesn't allow setting non-existent global property
@@ -1546,17 +1522,17 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Object* code = NULL;
switch (type) {
case FIELD: {
- maybe_code = StubCache::ComputeStoreField(
+ maybe_code = isolate()->stub_cache()->ComputeStoreField(
*name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
break;
}
case MAP_TRANSITION: {
if (lookup->GetAttributes() != NONE) return;
- HandleScope scope;
+ HandleScope scope(isolate());
ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name);
- maybe_code = StubCache::ComputeStoreField(
+ maybe_code = isolate()->stub_cache()->ComputeStoreField(
*name, *receiver, index, *transition, strict_mode);
break;
}
@@ -1568,11 +1544,11 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- maybe_code = StubCache::ComputeStoreGlobal(
+ maybe_code = isolate()->stub_cache()->ComputeStoreGlobal(
*name, *global, cell, strict_mode);
} else {
if (lookup->holder() != *receiver) return;
- maybe_code = StubCache::ComputeStoreNormal(strict_mode);
+ maybe_code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
}
break;
}
@@ -1580,13 +1556,13 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
if (v8::ToCData<Address>(callback->setter()) == 0) return;
- maybe_code = StubCache::ComputeStoreCallback(
+ maybe_code = isolate()->stub_cache()->ComputeStoreCallback(
*name, *receiver, callback, strict_mode);
break;
}
case INTERCEPTOR: {
ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
- maybe_code = StubCache::ComputeStoreInterceptor(
+ maybe_code = isolate()->stub_cache()->ComputeStoreInterceptor(
*name, *receiver, strict_mode);
break;
}
@@ -1610,7 +1586,9 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
}
} else if (state == MEGAMORPHIC) {
// Update the stub cache.
- StubCache::Set(*name, receiver->map(), Code::cast(code));
+ isolate()->stub_cache()->Set(*name,
+ receiver->map(),
+ Code::cast(code));
}
#ifdef DEBUG
@@ -1619,11 +1597,213 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
}
+static bool AddOneReceiverMapIfMissing(MapList* receiver_maps,
+ Map* new_receiver_map) {
+ for (int current = 0; current < receiver_maps->length(); ++current) {
+ if (receiver_maps->at(current) == new_receiver_map) {
+ return false;
+ }
+ }
+ receiver_maps->Add(new_receiver_map);
+ return true;
+}
+
+
+void KeyedIC::GetReceiverMapsForStub(Code* stub, MapList* result) {
+ ASSERT(stub->is_inline_cache_stub());
+ if (stub == string_stub()) {
+ return result->Add(isolate()->heap()->string_map());
+ } else if (stub->is_keyed_load_stub() || stub->is_keyed_store_stub()) {
+ if (stub->ic_state() == MONOMORPHIC) {
+ result->Add(Map::cast(stub->FindFirstMap()));
+ } else {
+ ASSERT(stub->ic_state() == MEGAMORPHIC);
+ AssertNoAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(stub, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Object* object = info->target_object();
+ ASSERT(object->IsMap());
+ result->Add(Map::cast(object));
+ }
+ }
+ }
+}
+
+
+MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
+ bool is_store,
+ StrictModeFlag strict_mode,
+ Code* generic_stub) {
+ State ic_state = target()->ic_state();
+ Code* monomorphic_stub;
+ // Always compute the MONOMORPHIC stub, even if the MEGAMORPHIC stub ends up
+ // being used. This is necessary because the megamorphic stub needs to have
+ // access to more information than what is stored in the receiver map in some
+ // cases (external arrays need the array type from the MONOMORPHIC stub).
+ MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
+ is_store,
+ strict_mode,
+ generic_stub);
+ if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
+
+ if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ return monomorphic_stub;
+ }
+ ASSERT(target() != generic_stub);
+
+ // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
+ // via megamorphic stubs, since they don't have a map in their relocation info
+ // and so the stubs can't be harvested for the object needed for a map check.
+ if (target()->type() != NORMAL) {
+ return generic_stub;
+ }
+
+ // Determine the list of receiver maps that this call site has seen,
+ // adding the map that was just encountered.
+ MapList target_receiver_maps;
+ GetReceiverMapsForStub(target(), &target_receiver_maps);
+ if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver->map())) {
+ // If the miss wasn't due to an unseen map, a MEGAMORPHIC stub
+ // won't help, use the generic stub.
+ return generic_stub;
+ }
+
+ // If the maximum number of receiver maps has been exceeded, use the generic
+ // version of the IC.
+ if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+ return generic_stub;
+ }
+
+ PolymorphicCodeCache* cache = isolate()->heap()->polymorphic_code_cache();
+ Code::Flags flags = Code::ComputeFlags(this->kind(),
+ NOT_IN_LOOP,
+ MEGAMORPHIC,
+ strict_mode);
+ Object* maybe_cached_stub = cache->Lookup(&target_receiver_maps, flags);
+ // If there is a cached stub, use it.
+ if (!maybe_cached_stub->IsUndefined()) {
+ ASSERT(maybe_cached_stub->IsCode());
+ return Code::cast(maybe_cached_stub);
+ }
+ // Collect MONOMORPHIC stubs for all target_receiver_maps.
+ CodeList handler_ics(target_receiver_maps.length());
+ for (int i = 0; i < target_receiver_maps.length(); ++i) {
+ Map* receiver_map(target_receiver_maps.at(i));
+ MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
+ receiver_map, strict_mode, generic_stub);
+ Code* cached_stub;
+ if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
+ handler_ics.Add(cached_stub);
+ }
+ // Build the MEGAMORPHIC stub.
+ Code* stub;
+ maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
+ &handler_ics,
+ strict_mode);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ MaybeObject* maybe_update = cache->Update(&target_receiver_maps, flags, stub);
+ if (maybe_update->IsFailure()) return maybe_update;
+ return stub;
+}
+
+
+MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
+ Map* receiver_map,
+ StrictModeFlag strict_mode,
+ Code* generic_stub) {
+ if ((receiver_map->instance_type() & kNotStringTag) == 0) {
+ ASSERT(string_stub() != NULL);
+ return string_stub();
+ } else if (receiver_map->has_external_array_elements()) {
+ // Determine the array type from the default MONOMORPHIC already generated
+ // stub. There is no other way to determine the type of the external array
+ // directly from the receiver type.
+ Code::Kind kind = this->kind();
+ Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
+ NORMAL,
+ strict_mode);
+ String* monomorphic_name = GetStubNameForCache(MONOMORPHIC);
+ Object* maybe_default_stub = receiver_map->FindInCodeCache(monomorphic_name,
+ flags);
+ if (maybe_default_stub->IsUndefined()) {
+ return generic_stub;
+ }
+ Code* default_stub = Code::cast(maybe_default_stub);
+ Map* first_map = default_stub->FindFirstMap();
+ return GetExternalArrayStubWithoutMapCheck(first_map->elements_kind());
+ } else if (receiver_map->has_fast_elements()) {
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ return GetFastElementStubWithoutMapCheck(is_js_array);
+ } else {
+ return generic_stub;
+ }
+}
+
+
+MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
+ bool is_store,
+ StrictModeFlag strict_mode,
+ Code* generic_stub) {
+ Code* result = NULL;
+ if (receiver->HasFastElements() ||
+ receiver->HasExternalArrayElements()) {
+ MaybeObject* maybe_stub =
+ isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
+ receiver, is_store, strict_mode);
+ if (!maybe_stub->To(&result)) return maybe_stub;
+ } else {
+ result = generic_stub;
+ }
+ return result;
+}
+
+
+String* KeyedStoreIC::GetStubNameForCache(IC::State ic_state) {
+ if (ic_state == MONOMORPHIC) {
+ return isolate()->heap()->KeyedStoreElementMonomorphic_symbol();
+ } else {
+ ASSERT(ic_state == MEGAMORPHIC);
+ return isolate()->heap()->KeyedStoreElementPolymorphic_symbol();
+ }
+}
+
+
+MaybeObject* KeyedStoreIC::GetFastElementStubWithoutMapCheck(
+ bool is_js_array) {
+ return KeyedStoreFastElementStub(is_js_array).TryGetCode();
+}
+
+
+MaybeObject* KeyedStoreIC::GetExternalArrayStubWithoutMapCheck(
+ JSObject::ElementsKind elements_kind) {
+ return KeyedStoreExternalArrayStub(elements_kind).TryGetCode();
+}
+
+
+MaybeObject* KeyedStoreIC::ConstructMegamorphicStub(
+ MapList* receiver_maps,
+ CodeList* targets,
+ StrictModeFlag strict_mode) {
+ Object* object;
+ KeyedStoreStubCompiler compiler(strict_mode);
+ MaybeObject* maybe_code = compiler.CompileStoreMegamorphic(receiver_maps,
+ targets);
+ if (!maybe_code->ToObject(&object)) return maybe_code;
+ isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
+ PROFILE(isolate(), CodeCreateEvent(
+ Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG,
+ Code::cast(object), 0));
+ return object;
+}
+
+
MaybeObject* KeyedStoreIC::Store(State state,
StrictModeFlag strict_mode,
Handle<Object> object,
Handle<Object> key,
- Handle<Object> value) {
+ Handle<Object> value,
+ bool force_generic) {
if (key->IsSymbol()) {
Handle<String> name = Handle<String>::cast(key);
@@ -1640,8 +1820,8 @@ MaybeObject* KeyedStoreIC::Store(State state,
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- HandleScope scope;
- Handle<Object> result = SetElement(receiver, index, value);
+ HandleScope scope(isolate());
+ Handle<Object> result = SetElement(receiver, index, value, strict_mode);
if (result.is_null()) return Failure::Exception();
return *value;
}
@@ -1665,35 +1845,36 @@ MaybeObject* KeyedStoreIC::Store(State state,
ASSERT(!(use_ic && object->IsJSGlobalProxy()));
if (use_ic) {
- Code* stub =
- (strict_mode == kStrictMode) ? generic_stub_strict() : generic_stub();
- if (state == UNINITIALIZED) {
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->HasExternalArrayElements()) {
- MaybeObject* probe =
- StubCache::ComputeKeyedLoadOrStoreExternalArray(
- *receiver, true, strict_mode);
- stub = probe->IsFailure() ?
- NULL : Code::cast(probe->ToObjectUnchecked());
- } else if (receiver->HasPixelElements()) {
- MaybeObject* probe =
- StubCache::ComputeKeyedStorePixelArray(*receiver, strict_mode);
- stub = probe->IsFailure() ?
- NULL : Code::cast(probe->ToObjectUnchecked());
- } else if (key->IsSmi() && receiver->map()->has_fast_elements()) {
- MaybeObject* probe =
- StubCache::ComputeKeyedStoreSpecialized(*receiver, strict_mode);
- stub = probe->IsFailure() ?
- NULL : Code::cast(probe->ToObjectUnchecked());
+ Code* stub = (strict_mode == kStrictMode)
+ ? generic_stub_strict()
+ : generic_stub();
+ if (object->IsJSObject()) {
+ JSObject* receiver = JSObject::cast(*object);
+ Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
+ Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
+ if (elements_map == heap->non_strict_arguments_elements_map()) {
+ stub = non_strict_arguments_stub();
+ } else if (!force_generic) {
+ if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
+ MaybeObject* maybe_stub = ComputeStub(receiver,
+ true,
+ strict_mode,
+ stub);
+ stub = maybe_stub->IsFailure() ?
+ NULL : Code::cast(maybe_stub->ToObjectUnchecked());
}
}
}
if (stub != NULL) set_target(stub);
}
+#ifdef DEBUG
+ TraceIC("KeyedStoreIC", key, state, target());
+#endif
+
// Set the property.
- return Runtime::SetObjectProperty(object, key, value, NONE, strict_mode);
+ return Runtime::SetObjectProperty(
+ isolate(), object , key, value, NONE, strict_mode);
}
@@ -1726,17 +1907,17 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
switch (type) {
case FIELD: {
- maybe_code = StubCache::ComputeKeyedStoreField(
+ maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
*name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
break;
}
case MAP_TRANSITION: {
if (lookup->GetAttributes() == NONE) {
- HandleScope scope;
+ HandleScope scope(isolate());
ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name);
- maybe_code = StubCache::ComputeKeyedStoreField(
+ maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
*name, *receiver, index, *transition, strict_mode);
break;
}
@@ -1777,11 +1958,12 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
// Static IC stub generators.
//
-static JSFunction* CompileFunction(JSFunction* function,
+static JSFunction* CompileFunction(Isolate* isolate,
+ JSFunction* function,
InLoopFlag in_loop) {
// Compile now with optimization.
- HandleScope scope;
- Handle<JSFunction> function_handle(function);
+ HandleScope scope(isolate);
+ Handle<JSFunction> function_handle(function, isolate);
if (in_loop == IN_LOOP) {
CompileLazyInLoop(function_handle, CLEAR_EXCEPTION);
} else {
@@ -1792,10 +1974,10 @@ static JSFunction* CompileFunction(JSFunction* function,
// Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* CallIC_Miss(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
- CallIC ic;
+ CallIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
MaybeObject* maybe_result = ic.LoadFunction(state,
@@ -1815,15 +1997,17 @@ MUST_USE_RESULT MaybeObject* CallIC_Miss(Arguments args) {
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
- return CompileFunction(JSFunction::cast(result), ic.target()->ic_in_loop());
+ return CompileFunction(isolate,
+ JSFunction::cast(result),
+ ic.target()->ic_in_loop());
}
// Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* KeyedCallIC_Miss(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
- KeyedCallIC ic;
+ KeyedCallIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Object* result;
{ MaybeObject* maybe_result =
@@ -1834,35 +2018,46 @@ MUST_USE_RESULT MaybeObject* KeyedCallIC_Miss(Arguments args) {
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result;
}
- return CompileFunction(JSFunction::cast(result), ic.target()->ic_in_loop());
+ return CompileFunction(isolate,
+ JSFunction::cast(result),
+ ic.target()->ic_in_loop());
}
// Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* LoadIC_Miss(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
- LoadIC ic;
+ LoadIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Load(state, args.at<Object>(0), args.at<String>(1));
}
// Used from ic-<arch>.cc
-MUST_USE_RESULT MaybeObject* KeyedLoadIC_Miss(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
- KeyedLoadIC ic;
+ KeyedLoadIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<Object>(1));
+ return ic.Load(state, args.at<Object>(0), args.at<Object>(1), false);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
+ NoHandleAllocation na;
+ ASSERT(args.length() == 2);
+ KeyedLoadIC ic(isolate);
+ IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
+ return ic.Load(state, args.at<Object>(0), args.at<Object>(1), true);
}
// Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* StoreIC_Miss(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
- StoreIC ic;
+ StoreIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state,
@@ -1873,7 +2068,7 @@ MUST_USE_RESULT MaybeObject* StoreIC_Miss(Arguments args) {
}
-MUST_USE_RESULT MaybeObject* StoreIC_ArrayLength(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
NoHandleAllocation nha;
ASSERT(args.length() == 2);
@@ -1894,7 +2089,7 @@ MUST_USE_RESULT MaybeObject* StoreIC_ArrayLength(Arguments args) {
// Extend storage is called in a store inline cache when
// it is necessary to extend the properties array of a
// JSObject.
-MUST_USE_RESULT MaybeObject* SharedStoreIC_ExtendStorage(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
@@ -1928,171 +2123,133 @@ MUST_USE_RESULT MaybeObject* SharedStoreIC_ExtendStorage(Arguments args) {
// Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* KeyedStoreIC_Miss(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
- KeyedStoreIC ic;
+ KeyedStoreIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state,
static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
args.at<Object>(0),
args.at<Object>(1),
- args.at<Object>(2));
+ args.at<Object>(2),
+ false);
}
-void BinaryOpIC::patch(Code* code) {
+RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
+ NoHandleAllocation na;
+ ASSERT(args.length() == 3);
+ KeyedStoreIC ic(isolate);
+ Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+ Handle<Object> object = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ Handle<Object> value = args.at<Object>(2);
+ StrictModeFlag strict_mode =
+ static_cast<StrictModeFlag>(extra_ic_state & kStrictMode);
+ return Runtime::SetObjectProperty(isolate,
+ object,
+ key,
+ value,
+ NONE,
+ strict_mode);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
+ NoHandleAllocation na;
+ ASSERT(args.length() == 3);
+ KeyedStoreIC ic(isolate);
+ IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
+ Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+ return ic.Store(state,
+ static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
+ args.at<Object>(0),
+ args.at<Object>(1),
+ args.at<Object>(2),
+ true);
+}
+
+
+void UnaryOpIC::patch(Code* code) {
set_target(code);
}
-const char* BinaryOpIC::GetName(TypeInfo type_info) {
+const char* UnaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
- case UNINIT_OR_SMI: return "UninitOrSmi";
- case DEFAULT: return "Default";
+ case UNINITIALIZED: return "Uninitialized";
+ case SMI: return "Smi";
+ case HEAP_NUMBER: return "HeapNumbers";
case GENERIC: return "Generic";
- case HEAP_NUMBERS: return "HeapNumbers";
- case STRINGS: return "Strings";
default: return "Invalid";
}
}
-BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
+UnaryOpIC::State UnaryOpIC::ToState(TypeInfo type_info) {
switch (type_info) {
- case UNINIT_OR_SMI:
- return UNINITIALIZED;
- case DEFAULT:
- case HEAP_NUMBERS:
- case STRINGS:
+ case UNINITIALIZED:
+ return ::v8::internal::UNINITIALIZED;
+ case SMI:
+ case HEAP_NUMBER:
return MONOMORPHIC;
case GENERIC:
return MEGAMORPHIC;
}
UNREACHABLE();
- return UNINITIALIZED;
+ return ::v8::internal::UNINITIALIZED;
}
-
-BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left,
- Object* right) {
- if (left->IsSmi() && right->IsSmi()) {
- // If we have two smi inputs we can reach here because
- // of an overflow. Enter default state.
- return DEFAULT;
- }
-
- if (left->IsNumber() && right->IsNumber()) {
- return HEAP_NUMBERS;
- }
-
- if (left->IsString() || right->IsString()) {
- // Patching for fast string ADD makes sense even if only one of the
- // arguments is a string.
- return STRINGS;
+UnaryOpIC::TypeInfo UnaryOpIC::GetTypeInfo(Handle<Object> operand) {
+ ::v8::internal::TypeInfo operand_type =
+ ::v8::internal::TypeInfo::TypeFromValue(operand);
+ if (operand_type.IsSmi()) {
+ return SMI;
+ } else if (operand_type.IsNumber()) {
+ return HEAP_NUMBER;
+ } else {
+ return GENERIC;
}
-
- return GENERIC;
}
-// defined in code-stubs-<arch>.cc
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
-
-
-MUST_USE_RESULT MaybeObject* BinaryOp_Patch(Arguments args) {
- ASSERT(args.length() == 5);
-
- HandleScope scope;
- Handle<Object> left = args.at<Object>(0);
- Handle<Object> right = args.at<Object>(1);
- int key = Smi::cast(args[2])->value();
- Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
- BinaryOpIC::TypeInfo previous_type =
- static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
-
- BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(*left, *right);
- Handle<Code> code = GetBinaryOpStub(key, type);
- if (!code.is_null()) {
- BinaryOpIC ic;
- ic.patch(*code);
- if (FLAG_trace_ic) {
- PrintF("[BinaryOpIC (%s->%s)#%s]\n",
- BinaryOpIC::GetName(previous_type),
- BinaryOpIC::GetName(type),
- Token::Name(op));
- }
- }
-
- Handle<JSBuiltinsObject> builtins = Top::builtins();
- Object* builtin = NULL; // Initialization calms down the compiler.
- switch (op) {
- case Token::ADD:
- builtin = builtins->javascript_builtin(Builtins::ADD);
- break;
- case Token::SUB:
- builtin = builtins->javascript_builtin(Builtins::SUB);
- break;
- case Token::MUL:
- builtin = builtins->javascript_builtin(Builtins::MUL);
- break;
- case Token::DIV:
- builtin = builtins->javascript_builtin(Builtins::DIV);
- break;
- case Token::MOD:
- builtin = builtins->javascript_builtin(Builtins::MOD);
- break;
- case Token::BIT_AND:
- builtin = builtins->javascript_builtin(Builtins::BIT_AND);
- break;
- case Token::BIT_OR:
- builtin = builtins->javascript_builtin(Builtins::BIT_OR);
- break;
- case Token::BIT_XOR:
- builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
- break;
- case Token::SHR:
- builtin = builtins->javascript_builtin(Builtins::SHR);
- break;
- case Token::SAR:
- builtin = builtins->javascript_builtin(Builtins::SAR);
- break;
- case Token::SHL:
- builtin = builtins->javascript_builtin(Builtins::SHL);
- break;
- default:
+UnaryOpIC::TypeInfo UnaryOpIC::ComputeNewType(
+ UnaryOpIC::TypeInfo current_type,
+ UnaryOpIC::TypeInfo previous_type) {
+ switch (previous_type) {
+ case UnaryOpIC::UNINITIALIZED:
+ return current_type;
+ case UnaryOpIC::SMI:
+ return (current_type == UnaryOpIC::GENERIC)
+ ? UnaryOpIC::GENERIC
+ : UnaryOpIC::HEAP_NUMBER;
+ case UnaryOpIC::HEAP_NUMBER:
+ return UnaryOpIC::GENERIC;
+ case UnaryOpIC::GENERIC:
+ // We should never do patching if we are in GENERIC state.
UNREACHABLE();
+ return UnaryOpIC::GENERIC;
}
-
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin));
-
- bool caught_exception;
- Object** builtin_args[] = { right.location() };
- Handle<Object> result = Execution::Call(builtin_function,
- left,
- ARRAY_SIZE(builtin_args),
- builtin_args,
- &caught_exception);
- if (caught_exception) {
- return Failure::Exception();
- }
- return *result;
+ UNREACHABLE();
+ return UnaryOpIC::GENERIC;
}
-void TRBinaryOpIC::patch(Code* code) {
+void BinaryOpIC::patch(Code* code) {
set_target(code);
}
-const char* TRBinaryOpIC::GetName(TypeInfo type_info) {
+const char* BinaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
case SMI: return "SMI";
case INT32: return "Int32s";
case HEAP_NUMBER: return "HeapNumbers";
case ODDBALL: return "Oddball";
+ case BOTH_STRING: return "BothStrings";
case STRING: return "Strings";
case GENERIC: return "Generic";
default: return "Invalid";
@@ -2100,7 +2257,7 @@ const char* TRBinaryOpIC::GetName(TypeInfo type_info) {
}
-TRBinaryOpIC::State TRBinaryOpIC::ToState(TypeInfo type_info) {
+BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED:
return ::v8::internal::UNINITIALIZED;
@@ -2108,6 +2265,7 @@ TRBinaryOpIC::State TRBinaryOpIC::ToState(TypeInfo type_info) {
case INT32:
case HEAP_NUMBER:
case ODDBALL:
+ case BOTH_STRING:
case STRING:
return MONOMORPHIC;
case GENERIC:
@@ -2118,18 +2276,23 @@ TRBinaryOpIC::State TRBinaryOpIC::ToState(TypeInfo type_info) {
}
-TRBinaryOpIC::TypeInfo TRBinaryOpIC::JoinTypes(TRBinaryOpIC::TypeInfo x,
- TRBinaryOpIC::TypeInfo y) {
+BinaryOpIC::TypeInfo BinaryOpIC::JoinTypes(BinaryOpIC::TypeInfo x,
+ BinaryOpIC::TypeInfo y) {
if (x == UNINITIALIZED) return y;
if (y == UNINITIALIZED) return x;
- if (x == STRING && y == STRING) return STRING;
- if (x == STRING || y == STRING) return GENERIC;
- if (x >= y) return x;
+ if (x == y) return x;
+ if (x == BOTH_STRING && y == STRING) return STRING;
+ if (x == STRING && y == BOTH_STRING) return STRING;
+ if (x == STRING || x == BOTH_STRING || y == STRING || y == BOTH_STRING) {
+ return GENERIC;
+ }
+ if (x > y) return x;
return y;
}
-TRBinaryOpIC::TypeInfo TRBinaryOpIC::GetTypeInfo(Handle<Object> left,
- Handle<Object> right) {
+
+BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Handle<Object> left,
+ Handle<Object> right) {
::v8::internal::TypeInfo left_type =
::v8::internal::TypeInfo::TypeFromValue(left);
::v8::internal::TypeInfo right_type =
@@ -2149,9 +2312,11 @@ TRBinaryOpIC::TypeInfo TRBinaryOpIC::GetTypeInfo(Handle<Object> left,
return HEAP_NUMBER;
}
- if (left_type.IsString() || right_type.IsString()) {
- // Patching for fast string ADD makes sense even if only one of the
- // arguments is a string.
+ // Patching for fast string ADD makes sense even if only one of the
+ // arguments is a string.
+ if (left_type.IsString()) {
+ return right_type.IsString() ? BOTH_STRING : STRING;
+ } else if (right_type.IsString()) {
return STRING;
}
@@ -2163,68 +2328,116 @@ TRBinaryOpIC::TypeInfo TRBinaryOpIC::GetTypeInfo(Handle<Object> left,
}
-// defined in code-stubs-<arch>.cc
-// Only needed to remove dependency of ic.cc on code-stubs-<arch>.h.
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
- TRBinaryOpIC::TypeInfo type_info,
- TRBinaryOpIC::TypeInfo result_type);
+RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
+ ASSERT(args.length() == 4);
+
+ HandleScope scope(isolate);
+ Handle<Object> operand = args.at<Object>(0);
+ int key = args.smi_at(1);
+ Token::Value op = static_cast<Token::Value>(args.smi_at(2));
+ UnaryOpIC::TypeInfo previous_type =
+ static_cast<UnaryOpIC::TypeInfo>(args.smi_at(3));
+
+ UnaryOpIC::TypeInfo type = UnaryOpIC::GetTypeInfo(operand);
+ type = UnaryOpIC::ComputeNewType(type, previous_type);
+ UnaryOpStub stub(key, type);
+ Handle<Code> code = stub.GetCode();
+ if (!code.is_null()) {
+ if (FLAG_trace_ic) {
+ PrintF("[UnaryOpIC (%s->%s)#%s]\n",
+ UnaryOpIC::GetName(previous_type),
+ UnaryOpIC::GetName(type),
+ Token::Name(op));
+ }
+ UnaryOpIC ic(isolate);
+ ic.patch(*code);
+ }
-MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
+ Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
+ isolate->thread_local_top()->context_->builtins(), isolate);
+ Object* builtin = NULL; // Initialization calms down the compiler.
+ switch (op) {
+ case Token::SUB:
+ builtin = builtins->javascript_builtin(Builtins::UNARY_MINUS);
+ break;
+ case Token::BIT_NOT:
+ builtin = builtins->javascript_builtin(Builtins::BIT_NOT);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
+
+ bool caught_exception;
+ Handle<Object> result = Execution::Call(builtin_function, operand, 0, NULL,
+ &caught_exception);
+ if (caught_exception) {
+ return Failure::Exception();
+ }
+ return *result;
+}
+
+RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
ASSERT(args.length() == 5);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
- int key = Smi::cast(args[2])->value();
- Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
- TRBinaryOpIC::TypeInfo previous_type =
- static_cast<TRBinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
-
- TRBinaryOpIC::TypeInfo type = TRBinaryOpIC::GetTypeInfo(left, right);
- type = TRBinaryOpIC::JoinTypes(type, previous_type);
- TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED;
- if (type == TRBinaryOpIC::STRING && op != Token::ADD) {
- type = TRBinaryOpIC::GENERIC;
- }
- if (type == TRBinaryOpIC::SMI &&
- previous_type == TRBinaryOpIC::SMI) {
- if (op == Token::DIV || op == Token::MUL || kSmiValueSize == 32) {
+ int key = args.smi_at(2);
+ Token::Value op = static_cast<Token::Value>(args.smi_at(3));
+ BinaryOpIC::TypeInfo previous_type =
+ static_cast<BinaryOpIC::TypeInfo>(args.smi_at(4));
+
+ BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(left, right);
+ type = BinaryOpIC::JoinTypes(type, previous_type);
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
+ if ((type == BinaryOpIC::STRING || type == BinaryOpIC::BOTH_STRING) &&
+ op != Token::ADD) {
+ type = BinaryOpIC::GENERIC;
+ }
+ if (type == BinaryOpIC::SMI && previous_type == BinaryOpIC::SMI) {
+ if (op == Token::DIV ||
+ op == Token::MUL ||
+ op == Token::SHR ||
+ kSmiValueSize == 32) {
// Arithmetic on two Smi inputs has yielded a heap number.
// That is the only way to get here from the Smi stub.
// With 32-bit Smis, all overflows give heap numbers, but with
// 31-bit Smis, most operations overflow to int32 results.
- result_type = TRBinaryOpIC::HEAP_NUMBER;
+ result_type = BinaryOpIC::HEAP_NUMBER;
} else {
// Other operations on SMIs that overflow yield int32s.
- result_type = TRBinaryOpIC::INT32;
+ result_type = BinaryOpIC::INT32;
}
}
- if (type == TRBinaryOpIC::INT32 &&
- previous_type == TRBinaryOpIC::INT32) {
+ if (type == BinaryOpIC::INT32 && previous_type == BinaryOpIC::INT32) {
// We must be here because an operation on two INT32 types overflowed.
- result_type = TRBinaryOpIC::HEAP_NUMBER;
+ result_type = BinaryOpIC::HEAP_NUMBER;
}
- Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type);
+ BinaryOpStub stub(key, type, result_type);
+ Handle<Code> code = stub.GetCode();
if (!code.is_null()) {
if (FLAG_trace_ic) {
- PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n",
- TRBinaryOpIC::GetName(previous_type),
- TRBinaryOpIC::GetName(type),
- TRBinaryOpIC::GetName(result_type),
+ PrintF("[BinaryOpIC (%s->(%s->%s))#%s]\n",
+ BinaryOpIC::GetName(previous_type),
+ BinaryOpIC::GetName(type),
+ BinaryOpIC::GetName(result_type),
Token::Name(op));
}
- TRBinaryOpIC ic;
+ BinaryOpIC ic(isolate);
ic.patch(*code);
// Activate inlined smi code.
- if (previous_type == TRBinaryOpIC::UNINITIALIZED) {
+ if (previous_type == BinaryOpIC::UNINITIALIZED) {
PatchInlinedSmiCode(ic.address());
}
}
- Handle<JSBuiltinsObject> builtins = Top::builtins();
+ Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
+ isolate->thread_local_top()->context_->builtins(), isolate);
Object* builtin = NULL; // Initialization calms down the compiler.
switch (op) {
case Token::ADD:
@@ -2264,7 +2477,7 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
UNREACHABLE();
}
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin));
+ Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
bool caught_exception;
Object** builtin_args[] = { right.location() };
@@ -2300,6 +2513,8 @@ const char* CompareIC::GetStateName(State state) {
case SMIS: return "SMIS";
case HEAP_NUMBERS: return "HEAP_NUMBERS";
case OBJECTS: return "OBJECTS";
+ case SYMBOLS: return "SYMBOLS";
+ case STRINGS: return "STRINGS";
case GENERIC: return "GENERIC";
default:
UNREACHABLE();
@@ -2312,28 +2527,34 @@ CompareIC::State CompareIC::TargetState(State state,
bool has_inlined_smi_code,
Handle<Object> x,
Handle<Object> y) {
- if (!has_inlined_smi_code && state != UNINITIALIZED) return GENERIC;
+ if (!has_inlined_smi_code && state != UNINITIALIZED && state != SYMBOLS) {
+ return GENERIC;
+ }
if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
if (state == UNINITIALIZED &&
+ x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
+ if ((state == UNINITIALIZED || state == SYMBOLS) &&
+ x->IsString() && y->IsString()) return STRINGS;
+ if (state == UNINITIALIZED &&
x->IsJSObject() && y->IsJSObject()) return OBJECTS;
return GENERIC;
}
// Used from ic_<arch>.cc.
-Code* CompareIC_Miss(Arguments args) {
+RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
- CompareIC ic(static_cast<Token::Value>(Smi::cast(args[2])->value()));
+ CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
return ic.target();
}
-static Address IC_utilities[] = {
+static const Address IC_utilities[] = {
#define ADDR(name) FUNCTION_ADDR(name),
IC_UTIL_LIST(ADDR)
NULL
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index fca7fc00b..4b301c5ba 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,6 +29,7 @@
#define V8_IC_H_
#include "macro-assembler.h"
+#include "type-info.h"
namespace v8 {
namespace internal {
@@ -39,12 +40,15 @@ namespace internal {
#define IC_UTIL_LIST(ICU) \
ICU(LoadIC_Miss) \
ICU(KeyedLoadIC_Miss) \
+ ICU(KeyedLoadIC_MissForceGeneric) \
ICU(CallIC_Miss) \
ICU(KeyedCallIC_Miss) \
ICU(StoreIC_Miss) \
ICU(StoreIC_ArrayLength) \
ICU(SharedStoreIC_ExtendStorage) \
ICU(KeyedStoreIC_Miss) \
+ ICU(KeyedStoreIC_MissForceGeneric) \
+ ICU(KeyedStoreIC_Slow) \
/* Utilities for IC stubs. */ \
ICU(LoadCallbackProperty) \
ICU(StoreCallbackProperty) \
@@ -53,8 +57,8 @@ namespace internal {
ICU(LoadPropertyWithInterceptorForCall) \
ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty) \
+ ICU(UnaryOp_Patch) \
ICU(BinaryOp_Patch) \
- ICU(TypeRecordingBinaryOp_Patch) \
ICU(CompareIC_Miss)
//
// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
@@ -62,7 +66,6 @@ namespace internal {
//
class IC {
public:
-
// The ids for utility called from the generated code.
enum UtilityId {
#define CONST_NAME(name) k##name,
@@ -86,7 +89,7 @@ class IC {
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
- explicit IC(FrameDepth depth);
+ IC(FrameDepth depth, Isolate* isolate);
// Get the call-site target; used for determining the state.
Code* target() { return GetTargetAtAddress(address()); }
@@ -130,6 +133,7 @@ class IC {
protected:
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
+ Isolate* isolate() const { return isolate_; }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Computes the address in the original code when the code running is
@@ -141,17 +145,17 @@ class IC {
void set_target(Code* code) { SetTargetAtAddress(address(), code); }
#ifdef DEBUG
- static void TraceIC(const char* type,
- Handle<Object> name,
- State old_state,
- Code* new_target,
- const char* extra_info = "");
+ void TraceIC(const char* type,
+ Handle<Object> name,
+ State old_state,
+ Code* new_target,
+ const char* extra_info = "");
#endif
- static Failure* TypeError(const char* type,
- Handle<Object> object,
- Handle<Object> key);
- static Failure* ReferenceError(const char* type, Handle<String> name);
+ Failure* TypeError(const char* type,
+ Handle<Object> object,
+ Handle<Object> key);
+ Failure* ReferenceError(const char* type, Handle<String> name);
// Access the target code for the given IC address.
static inline Code* GetTargetAtAddress(Address address);
@@ -167,6 +171,8 @@ class IC {
// invoke the garbage collector.
Address* pc_address_;
+ Isolate* isolate_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
};
@@ -188,8 +194,13 @@ class IC_Utility {
class CallICBase: public IC {
+ public:
+ class Contextual: public BitField<bool, 0, 1> {};
+ class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
+
protected:
- explicit CallICBase(Code::Kind kind) : IC(EXTRA_CALL_FRAME), kind_(kind) {}
+ CallICBase(Code::Kind kind, Isolate* isolate)
+ : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
public:
MUST_USE_RESULT MaybeObject* LoadFunction(State state,
@@ -227,27 +238,37 @@ class CallICBase: public IC {
void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
static void Clear(Address address, Code* target);
+
friend class IC;
};
class CallIC: public CallICBase {
public:
- CallIC() : CallICBase(Code::CALL_IC) { ASSERT(target()->is_call_stub()); }
+ explicit CallIC(Isolate* isolate) : CallICBase(Code::CALL_IC, isolate) {
+ ASSERT(target()->is_call_stub());
+ }
// Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm, int argc) {
- GenerateMiss(masm, argc);
+ static void GenerateInitialize(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
+ GenerateMiss(masm, argc, extra_ic_state);
}
- static void GenerateMiss(MacroAssembler* masm, int argc);
- static void GenerateMegamorphic(MacroAssembler* masm, int argc);
+ static void GenerateMiss(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state);
+ static void GenerateMegamorphic(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state);
static void GenerateNormal(MacroAssembler* masm, int argc);
};
class KeyedCallIC: public CallICBase {
public:
- KeyedCallIC() : CallICBase(Code::KEYED_CALL_IC) {
+ explicit KeyedCallIC(Isolate* isolate)
+ : CallICBase(Code::KEYED_CALL_IC, isolate) {
ASSERT(target()->is_keyed_call_stub());
}
@@ -262,12 +283,15 @@ class KeyedCallIC: public CallICBase {
static void GenerateMiss(MacroAssembler* masm, int argc);
static void GenerateMegamorphic(MacroAssembler* masm, int argc);
static void GenerateNormal(MacroAssembler* masm, int argc);
+ static void GenerateNonStrictArguments(MacroAssembler* masm, int argc);
};
class LoadIC: public IC {
public:
- LoadIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_load_stub()); }
+ explicit LoadIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
+ ASSERT(target()->is_load_stub());
+ }
MUST_USE_RESULT MaybeObject* Load(State state,
Handle<Object> object,
@@ -288,14 +312,6 @@ class LoadIC: public IC {
bool support_wrappers);
static void GenerateFunctionPrototype(MacroAssembler* masm);
- // Clear the use of the inlined version.
- static void ClearInlinedVersion(Address address);
-
- // The offset from the inlined patch site to the start of the
- // inlined load instruction. It is architecture-dependent, and not
- // used on ARM.
- static const int kOffsetToLoadInstruction;
-
private:
// Update the inline cache and the global stub cache based on the
// lookup result.
@@ -305,51 +321,94 @@ class LoadIC: public IC {
Handle<String> name);
// Stub accessors.
- static Code* megamorphic_stub() {
- return Builtins::builtin(Builtins::LoadIC_Megamorphic);
+ Code* megamorphic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kLoadIC_Megamorphic);
}
static Code* initialize_stub() {
- return Builtins::builtin(Builtins::LoadIC_Initialize);
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::kLoadIC_Initialize);
}
- static Code* pre_monomorphic_stub() {
- return Builtins::builtin(Builtins::LoadIC_PreMonomorphic);
+ Code* pre_monomorphic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kLoadIC_PreMonomorphic);
}
static void Clear(Address address, Code* target);
- static bool PatchInlinedLoad(Address address, Object* map, int index);
+ friend class IC;
+};
- static bool PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete);
- friend class IC;
+class KeyedIC: public IC {
+ public:
+ explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
+ virtual ~KeyedIC() {}
+
+ virtual MaybeObject* GetFastElementStubWithoutMapCheck(
+ bool is_js_array) = 0;
+
+ virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
+ JSObject::ElementsKind elements_kind) = 0;
+
+ protected:
+ virtual Code* string_stub() {
+ return NULL;
+ }
+
+ virtual Code::Kind kind() const = 0;
+
+ virtual String* GetStubNameForCache(IC::State ic_state) = 0;
+
+ MaybeObject* ComputeStub(JSObject* receiver,
+ bool is_store,
+ StrictModeFlag strict_mode,
+ Code* default_stub);
+
+ virtual MaybeObject* ConstructMegamorphicStub(
+ MapList* receiver_maps,
+ CodeList* targets,
+ StrictModeFlag strict_mode) = 0;
+
+ private:
+ void GetReceiverMapsForStub(Code* stub, MapList* result);
+
+ MaybeObject* ComputeMonomorphicStubWithoutMapCheck(
+ Map* receiver_map,
+ StrictModeFlag strict_mode,
+ Code* generic_stub);
+
+ MaybeObject* ComputeMonomorphicStub(JSObject* receiver,
+ bool is_store,
+ StrictModeFlag strict_mode,
+ Code* default_stub);
};
-class KeyedLoadIC: public IC {
+class KeyedLoadIC: public KeyedIC {
public:
- KeyedLoadIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_keyed_load_stub()); }
+ explicit KeyedLoadIC(Isolate* isolate) : KeyedIC(isolate) {
+ ASSERT(target()->is_keyed_load_stub());
+ }
MUST_USE_RESULT MaybeObject* Load(State state,
Handle<Object> object,
- Handle<Object> key);
+ Handle<Object> key,
+ bool force_generic_stub);
// Code generator routines.
- static void GenerateMiss(MacroAssembler* masm);
+ static void GenerateMiss(MacroAssembler* masm, bool force_generic);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+ static void GenerateInitialize(MacroAssembler* masm) {
+ GenerateMiss(masm, false);
+ }
static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm);
+ GenerateMiss(masm, false);
}
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
-
static void GenerateIndexedInterceptor(MacroAssembler* masm);
-
- // Clear the use of the inlined version.
- static void ClearInlinedVersion(Address address);
+ static void GenerateNonStrictArguments(MacroAssembler* masm);
// Bit mask to be tested against bit field for the cases when
// generic stub should go into slow case.
@@ -358,6 +417,27 @@ class KeyedLoadIC: public IC {
static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
+ virtual MaybeObject* GetFastElementStubWithoutMapCheck(
+ bool is_js_array);
+
+ virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
+ JSObject::ElementsKind elements_kind);
+
+ protected:
+ virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
+
+ virtual String* GetStubNameForCache(IC::State ic_state);
+
+ virtual MaybeObject* ConstructMegamorphicStub(
+ MapList* receiver_maps,
+ CodeList* targets,
+ StrictModeFlag strict_mode);
+
+ virtual Code* string_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_String);
+ }
+
private:
// Update the inline cache.
void UpdateCaches(LookupResult* lookup,
@@ -367,38 +447,41 @@ class KeyedLoadIC: public IC {
// Stub accessors.
static Code* initialize_stub() {
- return Builtins::builtin(Builtins::KeyedLoadIC_Initialize);
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_Initialize);
}
- static Code* megamorphic_stub() {
- return Builtins::builtin(Builtins::KeyedLoadIC_Generic);
+ Code* megamorphic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_Generic);
}
- static Code* generic_stub() {
- return Builtins::builtin(Builtins::KeyedLoadIC_Generic);
+ Code* generic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_Generic);
}
- static Code* pre_monomorphic_stub() {
- return Builtins::builtin(Builtins::KeyedLoadIC_PreMonomorphic);
+ Code* pre_monomorphic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_PreMonomorphic);
}
- static Code* string_stub() {
- return Builtins::builtin(Builtins::KeyedLoadIC_String);
+ Code* indexed_interceptor_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_IndexedInterceptor);
}
-
- static Code* indexed_interceptor_stub() {
- return Builtins::builtin(Builtins::KeyedLoadIC_IndexedInterceptor);
+ Code* non_strict_arguments_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_NonStrictArguments);
}
static void Clear(Address address, Code* target);
- // Support for patching the map that is checked in an inlined
- // version of keyed load.
- static bool PatchInlinedLoad(Address address, Object* map);
-
friend class IC;
};
class StoreIC: public IC {
public:
- StoreIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_store_stub()); }
+ explicit StoreIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
+ ASSERT(target()->is_store_stub());
+ }
MUST_USE_RESULT MaybeObject* Store(State state,
StrictModeFlag strict_mode,
@@ -416,13 +499,6 @@ class StoreIC: public IC {
static void GenerateGlobalProxy(MacroAssembler* masm,
StrictModeFlag strict_mode);
- // Clear the use of an inlined version.
- static void ClearInlinedVersion(Address address);
-
- // The offset from the inlined patch site to the start of the
- // inlined store instruction.
- static const int kOffsetToStoreInstruction;
-
private:
// Update the inline cache and the global stub cache based on the
// lookup result.
@@ -441,59 +517,78 @@ class StoreIC: public IC {
}
// Stub accessors.
- static Code* megamorphic_stub() {
- return Builtins::builtin(Builtins::StoreIC_Megamorphic);
+ Code* megamorphic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kStoreIC_Megamorphic);
}
- static Code* megamorphic_stub_strict() {
- return Builtins::builtin(Builtins::StoreIC_Megamorphic_Strict);
+ Code* megamorphic_stub_strict() {
+ return isolate()->builtins()->builtin(
+ Builtins::kStoreIC_Megamorphic_Strict);
}
static Code* initialize_stub() {
- return Builtins::builtin(Builtins::StoreIC_Initialize);
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::kStoreIC_Initialize);
}
static Code* initialize_stub_strict() {
- return Builtins::builtin(Builtins::StoreIC_Initialize_Strict);
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::kStoreIC_Initialize_Strict);
}
- static Code* global_proxy_stub() {
- return Builtins::builtin(Builtins::StoreIC_GlobalProxy);
+ Code* global_proxy_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kStoreIC_GlobalProxy);
}
- static Code* global_proxy_stub_strict() {
- return Builtins::builtin(Builtins::StoreIC_GlobalProxy_Strict);
+ Code* global_proxy_stub_strict() {
+ return isolate()->builtins()->builtin(
+ Builtins::kStoreIC_GlobalProxy_Strict);
}
static void Clear(Address address, Code* target);
- // Support for patching the index and the map that is checked in an
- // inlined version of the named store.
- static bool PatchInlinedStore(Address address, Object* map, int index);
-
friend class IC;
};
-class KeyedStoreIC: public IC {
+class KeyedStoreIC: public KeyedIC {
public:
- KeyedStoreIC() : IC(NO_EXTRA_FRAME) { }
+ explicit KeyedStoreIC(Isolate* isolate) : KeyedIC(isolate) {
+ ASSERT(target()->is_keyed_store_stub());
+ }
MUST_USE_RESULT MaybeObject* Store(State state,
- StrictModeFlag strict_mode,
+ StrictModeFlag strict_mode,
Handle<Object> object,
Handle<Object> name,
- Handle<Object> value);
+ Handle<Object> value,
+ bool force_generic);
// Code generators for stub routines. Only called once at startup.
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GenerateMiss(MacroAssembler* masm);
+ static void GenerateInitialize(MacroAssembler* masm) {
+ GenerateMiss(masm, false);
+ }
+ static void GenerateMiss(MacroAssembler* masm, bool force_generic);
+ static void GenerateSlow(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode);
static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
+ static void GenerateNonStrictArguments(MacroAssembler* masm);
- // Clear the inlined version so the IC is always hit.
- static void ClearInlinedVersion(Address address);
+ virtual MaybeObject* GetFastElementStubWithoutMapCheck(
+ bool is_js_array);
- // Restore the inlined version so the fast case can get hit.
- static void RestoreInlinedVersion(Address address);
+ virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
+ JSObject::ElementsKind elements_kind);
- private:
+ protected:
+ virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
+
+ virtual String* GetStubNameForCache(IC::State ic_state);
+
+ virtual MaybeObject* ConstructMegamorphicStub(
+ MapList* receiver_maps,
+ CodeList* targets,
+ StrictModeFlag strict_mode);
+
+ private:
// Update the inline cache.
void UpdateCaches(LookupResult* lookup,
State state,
@@ -511,51 +606,52 @@ class KeyedStoreIC: public IC {
// Stub accessors.
static Code* initialize_stub() {
- return Builtins::builtin(Builtins::KeyedStoreIC_Initialize);
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::kKeyedStoreIC_Initialize);
+ }
+ Code* megamorphic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedStoreIC_Generic);
}
static Code* initialize_stub_strict() {
- return Builtins::builtin(Builtins::KeyedStoreIC_Initialize_Strict);
+ return Isolate::Current()->builtins()->builtin(
+ Builtins::kKeyedStoreIC_Initialize_Strict);
}
- static Code* megamorphic_stub() {
- return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
+ Code* megamorphic_stub_strict() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedStoreIC_Generic_Strict);
}
- static Code* megamorphic_stub_strict() {
- return Builtins::builtin(Builtins::KeyedStoreIC_Generic_Strict);
+ Code* generic_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedStoreIC_Generic);
}
- static Code* generic_stub() {
- return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
+ Code* generic_stub_strict() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedStoreIC_Generic_Strict);
}
- static Code* generic_stub_strict() {
- return Builtins::builtin(Builtins::KeyedStoreIC_Generic_Strict);
+ Code* non_strict_arguments_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedStoreIC_NonStrictArguments);
}
static void Clear(Address address, Code* target);
- // Support for patching the map that is checked in an inlined
- // version of keyed store.
- // The address is the patch point for the IC call
- // (Assembler::kCallTargetAddressOffset before the end of
- // the call/return address).
- // The map is the new map that the inlined code should check against.
- static bool PatchInlinedStore(Address address, Object* map);
-
friend class IC;
};
-class BinaryOpIC: public IC {
+class UnaryOpIC: public IC {
public:
-
+ // sorted: increasingly more unspecific (ignoring UNINITIALIZED)
+ // TODO(svenpanne) Using enums+switch is an antipattern, use a class instead.
enum TypeInfo {
- UNINIT_OR_SMI,
- DEFAULT, // Initial state. When first executed, patches to one
- // of the following states depending on the operands types.
- HEAP_NUMBERS, // Both arguments are HeapNumbers.
- STRINGS, // At least one of the arguments is String.
- GENERIC // Non-specialized case (processes any type combination).
+ UNINITIALIZED,
+ SMI,
+ HEAP_NUMBER,
+ GENERIC
};
- BinaryOpIC() : IC(NO_EXTRA_FRAME) { }
+ explicit UnaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
void patch(Code* code);
@@ -563,25 +659,27 @@ class BinaryOpIC: public IC {
static State ToState(TypeInfo type_info);
- static TypeInfo GetTypeInfo(Object* left, Object* right);
+ static TypeInfo GetTypeInfo(Handle<Object> operand);
+
+ static TypeInfo ComputeNewType(TypeInfo type, TypeInfo previous);
};
// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
-class TRBinaryOpIC: public IC {
+class BinaryOpIC: public IC {
public:
-
enum TypeInfo {
UNINITIALIZED,
SMI,
INT32,
HEAP_NUMBER,
ODDBALL,
+ BOTH_STRING, // Only used for addition operation.
STRING, // Only used for addition operation. At least one string operand.
GENERIC
};
- TRBinaryOpIC() : IC(NO_EXTRA_FRAME) { }
+ explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
void patch(Code* code);
@@ -601,11 +699,14 @@ class CompareIC: public IC {
UNINITIALIZED,
SMIS,
HEAP_NUMBERS,
+ SYMBOLS,
+ STRINGS,
OBJECTS,
GENERIC
};
- explicit CompareIC(Token::Value op) : IC(EXTRA_CALL_FRAME), op_(op) { }
+ CompareIC(Isolate* isolate, Token::Value op)
+ : IC(EXTRA_CALL_FRAME, isolate), op_(op) { }
// Update the inline cache for the given operands.
void UpdateCaches(Handle<Object> x, Handle<Object> y);
@@ -632,7 +733,7 @@ class CompareIC: public IC {
Token::Value op_;
};
-// Helper for TRBinaryOpIC and CompareIC.
+// Helper for BinaryOpIC and CompareIC.
void PatchInlinedSmiCode(Address address);
} } // namespace v8::internal
diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc
index c9c3cc4c0..1c6c52ca8 100644
--- a/deps/v8/src/interpreter-irregexp.cc
+++ b/deps/v8/src/interpreter-irregexp.cc
@@ -40,10 +40,10 @@ namespace v8 {
namespace internal {
-static unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize;
+typedef unibrow::Mapping<unibrow::Ecma262Canonicalize> Canonicalize;
-
-static bool BackRefMatchesNoCase(int from,
+static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
+ int from,
int current,
int len,
Vector<const uc16> subject) {
@@ -53,8 +53,8 @@ static bool BackRefMatchesNoCase(int from,
if (old_char == new_char) continue;
unibrow::uchar old_string[1] = { old_char };
unibrow::uchar new_string[1] = { new_char };
- interp_canonicalize.get(old_char, '\0', old_string);
- interp_canonicalize.get(new_char, '\0', new_string);
+ interp_canonicalize->get(old_char, '\0', old_string);
+ interp_canonicalize->get(new_char, '\0', new_string);
if (old_string[0] != new_string[0]) {
return false;
}
@@ -63,7 +63,8 @@ static bool BackRefMatchesNoCase(int from,
}
-static bool BackRefMatchesNoCase(int from,
+static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
+ int from,
int current,
int len,
Vector<const char> subject) {
@@ -150,11 +151,11 @@ static int32_t Load16Aligned(const byte* pc) {
// matching terminates.
class BacktrackStack {
public:
- explicit BacktrackStack() {
- if (cache_ != NULL) {
+ explicit BacktrackStack(Isolate* isolate) : isolate_(isolate) {
+ if (isolate->irregexp_interpreter_backtrack_stack_cache() != NULL) {
// If the cache is not empty reuse the previously allocated stack.
- data_ = cache_;
- cache_ = NULL;
+ data_ = isolate->irregexp_interpreter_backtrack_stack_cache();
+ isolate->set_irregexp_interpreter_backtrack_stack_cache(NULL);
} else {
// Cache was empty. Allocate a new backtrack stack.
data_ = NewArray<int>(kBacktrackStackSize);
@@ -162,9 +163,9 @@ class BacktrackStack {
}
~BacktrackStack() {
- if (cache_ == NULL) {
+ if (isolate_->irregexp_interpreter_backtrack_stack_cache() == NULL) {
// The cache is empty. Keep this backtrack stack around.
- cache_ = data_;
+ isolate_->set_irregexp_interpreter_backtrack_stack_cache(data_);
} else {
// A backtrack stack was already cached, just release this one.
DeleteArray(data_);
@@ -179,16 +180,15 @@ class BacktrackStack {
static const int kBacktrackStackSize = 10000;
int* data_;
- static int* cache_;
+ Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
};
-int* BacktrackStack::cache_ = NULL;
-
template <typename Char>
-static bool RawMatch(const byte* code_base,
+static bool RawMatch(Isolate* isolate,
+ const byte* code_base,
Vector<const Char> subject,
int* registers,
int current,
@@ -197,7 +197,7 @@ static bool RawMatch(const byte* code_base,
// BacktrackStack ensures that the memory allocated for the backtracking stack
// is returned to the system or cached if there is no stack being cached at
// the moment.
- BacktrackStack backtrack_stack;
+ BacktrackStack backtrack_stack(isolate);
int* backtrack_stack_base = backtrack_stack.data();
int* backtrack_sp = backtrack_stack_base;
int backtrack_stack_space = backtrack_stack.max_size();
@@ -584,7 +584,8 @@ static bool RawMatch(const byte* code_base,
pc = code_base + Load32Aligned(pc + 4);
break;
} else {
- if (BackRefMatchesNoCase(from, current, len, subject)) {
+ if (BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
+ from, current, len, subject)) {
current += len;
pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
} else {
@@ -624,7 +625,8 @@ static bool RawMatch(const byte* code_base,
}
-bool IrregexpInterpreter::Match(Handle<ByteArray> code_array,
+bool IrregexpInterpreter::Match(Isolate* isolate,
+ Handle<ByteArray> code_array,
Handle<String> subject,
int* registers,
int start_position) {
@@ -636,7 +638,8 @@ bool IrregexpInterpreter::Match(Handle<ByteArray> code_array,
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
if (start_position != 0) previous_char = subject_vector[start_position - 1];
- return RawMatch(code_base,
+ return RawMatch(isolate,
+ code_base,
subject_vector,
registers,
start_position,
@@ -644,7 +647,8 @@ bool IrregexpInterpreter::Match(Handle<ByteArray> code_array,
} else {
Vector<const uc16> subject_vector = subject->ToUC16Vector();
if (start_position != 0) previous_char = subject_vector[start_position - 1];
- return RawMatch(code_base,
+ return RawMatch(isolate,
+ code_base,
subject_vector,
registers,
start_position,
diff --git a/deps/v8/src/interpreter-irregexp.h b/deps/v8/src/interpreter-irregexp.h
index 0ad8846d7..076f0c508 100644
--- a/deps/v8/src/interpreter-irregexp.h
+++ b/deps/v8/src/interpreter-irregexp.h
@@ -36,7 +36,8 @@ namespace internal {
class IrregexpInterpreter {
public:
- static bool Match(Handle<ByteArray> code,
+ static bool Match(Isolate* isolate,
+ Handle<ByteArray> code,
Handle<String> subject,
int* captures,
int start_position);
diff --git a/deps/v8/src/frame-element.cc b/deps/v8/src/isolate-inl.h
index ee7be95f1..aa6b5372c 100644
--- a/deps/v8/src/frame-element.cc
+++ b/deps/v8/src/isolate-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,18 +25,26 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
+#ifndef V8_ISOLATE_INL_H_
+#define V8_ISOLATE_INL_H_
-#include "frame-element.h"
-#include "zone-inl.h"
+#include "isolate.h"
+
+#include "debug.h"
namespace v8 {
namespace internal {
-FrameElement::ZoneObjectList* FrameElement::ConstantList() {
- static ZoneObjectList list(10);
- return &list;
+
+bool Isolate::DebuggerHasBreakPoints() {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ return debug()->has_break_points();
+#else
+ return false;
+#endif
}
} } // namespace v8::internal
+
+#endif // V8_ISOLATE_INL_H_
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
new file mode 100644
index 000000000..e473b020f
--- /dev/null
+++ b/deps/v8/src/isolate.cc
@@ -0,0 +1,1871 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "ast.h"
+#include "bootstrapper.h"
+#include "codegen.h"
+#include "compilation-cache.h"
+#include "debug.h"
+#include "deoptimizer.h"
+#include "heap-profiler.h"
+#include "hydrogen.h"
+#include "isolate.h"
+#include "lithium-allocator.h"
+#include "log.h"
+#include "messages.h"
+#include "regexp-stack.h"
+#include "runtime-profiler.h"
+#include "scanner.h"
+#include "scopeinfo.h"
+#include "serialize.h"
+#include "simulator.h"
+#include "spaces.h"
+#include "stub-cache.h"
+#include "version.h"
+#include "vm-state-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+Atomic32 ThreadId::highest_thread_id_ = 0;
+
+int ThreadId::AllocateThreadId() {
+ int new_id = NoBarrier_AtomicIncrement(&highest_thread_id_, 1);
+ return new_id;
+}
+
+
+int ThreadId::GetCurrentThreadId() {
+ int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
+ if (thread_id == 0) {
+ thread_id = AllocateThreadId();
+ Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
+ }
+ return thread_id;
+}
+
+
+ThreadLocalTop::ThreadLocalTop() {
+ InitializeInternal();
+}
+
+
+void ThreadLocalTop::InitializeInternal() {
+ c_entry_fp_ = 0;
+ handler_ = 0;
+#ifdef USE_SIMULATOR
+ simulator_ = NULL;
+#endif
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ js_entry_sp_ = NULL;
+ external_callback_ = NULL;
+#endif
+#ifdef ENABLE_VMSTATE_TRACKING
+ current_vm_state_ = EXTERNAL;
+#endif
+ try_catch_handler_address_ = NULL;
+ context_ = NULL;
+ thread_id_ = ThreadId::Invalid();
+ external_caught_exception_ = false;
+ failed_access_check_callback_ = NULL;
+ save_context_ = NULL;
+ catcher_ = NULL;
+}
+
+
+void ThreadLocalTop::Initialize() {
+ InitializeInternal();
+#ifdef USE_SIMULATOR
+#ifdef V8_TARGET_ARCH_ARM
+ simulator_ = Simulator::current(isolate_);
+#elif V8_TARGET_ARCH_MIPS
+ simulator_ = Simulator::current(isolate_);
+#endif
+#endif
+ thread_id_ = ThreadId::Current();
+}
+
+
+v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
+ return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
+}
+
+
+// Create a dummy thread that will wait forever on a semaphore. The only
+// purpose for this thread is to have some stack area to save essential data
+// into for use by a stacks only core dump (aka minidump).
+class PreallocatedMemoryThread: public Thread {
+ public:
+ char* data() {
+ if (data_ready_semaphore_ != NULL) {
+ // Initial access is guarded until the data has been published.
+ data_ready_semaphore_->Wait();
+ delete data_ready_semaphore_;
+ data_ready_semaphore_ = NULL;
+ }
+ return data_;
+ }
+
+ unsigned length() {
+ if (data_ready_semaphore_ != NULL) {
+ // Initial access is guarded until the data has been published.
+ data_ready_semaphore_->Wait();
+ delete data_ready_semaphore_;
+ data_ready_semaphore_ = NULL;
+ }
+ return length_;
+ }
+
+ // Stop the PreallocatedMemoryThread and release its resources.
+ void StopThread() {
+ keep_running_ = false;
+ wait_for_ever_semaphore_->Signal();
+
+ // Wait for the thread to terminate.
+ Join();
+
+ if (data_ready_semaphore_ != NULL) {
+ delete data_ready_semaphore_;
+ data_ready_semaphore_ = NULL;
+ }
+
+ delete wait_for_ever_semaphore_;
+ wait_for_ever_semaphore_ = NULL;
+ }
+
+ protected:
+ // When the thread starts running it will allocate a fixed number of bytes
+ // on the stack and publish the location of this memory for others to use.
+ void Run() {
+ EmbeddedVector<char, 15 * 1024> local_buffer;
+
+ // Initialize the buffer with a known good value.
+ OS::StrNCpy(local_buffer, "Trace data was not generated.\n",
+ local_buffer.length());
+
+ // Publish the local buffer and signal its availability.
+ data_ = local_buffer.start();
+ length_ = local_buffer.length();
+ data_ready_semaphore_->Signal();
+
+ while (keep_running_) {
+ // This thread will wait here until the end of time.
+ wait_for_ever_semaphore_->Wait();
+ }
+
+ // Make sure we access the buffer after the wait to remove all possibility
+ // of it being optimized away.
+ OS::StrNCpy(local_buffer, "PreallocatedMemoryThread shutting down.\n",
+ local_buffer.length());
+ }
+
+
+ private:
+ PreallocatedMemoryThread()
+ : Thread("v8:PreallocMem"),
+ keep_running_(true),
+ wait_for_ever_semaphore_(OS::CreateSemaphore(0)),
+ data_ready_semaphore_(OS::CreateSemaphore(0)),
+ data_(NULL),
+ length_(0) {
+ }
+
+ // Used to make sure that the thread keeps looping even for spurious wakeups.
+ bool keep_running_;
+
+ // This semaphore is used by the PreallocatedMemoryThread to wait for ever.
+ Semaphore* wait_for_ever_semaphore_;
+ // Semaphore to signal that the data has been initialized.
+ Semaphore* data_ready_semaphore_;
+
+ // Location and size of the preallocated memory block.
+ char* data_;
+ unsigned length_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(PreallocatedMemoryThread);
+};
+
+
+void Isolate::PreallocatedMemoryThreadStart() {
+ if (preallocated_memory_thread_ != NULL) return;
+ preallocated_memory_thread_ = new PreallocatedMemoryThread();
+ preallocated_memory_thread_->Start();
+}
+
+
+void Isolate::PreallocatedMemoryThreadStop() {
+ if (preallocated_memory_thread_ == NULL) return;
+ preallocated_memory_thread_->StopThread();
+ // Done with the thread entirely.
+ delete preallocated_memory_thread_;
+ preallocated_memory_thread_ = NULL;
+}
+
+
+void Isolate::PreallocatedStorageInit(size_t size) {
+ ASSERT(free_list_.next_ == &free_list_);
+ ASSERT(free_list_.previous_ == &free_list_);
+ PreallocatedStorage* free_chunk =
+ reinterpret_cast<PreallocatedStorage*>(new char[size]);
+ free_list_.next_ = free_list_.previous_ = free_chunk;
+ free_chunk->next_ = free_chunk->previous_ = &free_list_;
+ free_chunk->size_ = size - sizeof(PreallocatedStorage);
+ preallocated_storage_preallocated_ = true;
+}
+
+
+void* Isolate::PreallocatedStorageNew(size_t size) {
+ if (!preallocated_storage_preallocated_) {
+ return FreeStoreAllocationPolicy::New(size);
+ }
+ ASSERT(free_list_.next_ != &free_list_);
+ ASSERT(free_list_.previous_ != &free_list_);
+
+ size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
+ // Search for exact fit.
+ for (PreallocatedStorage* storage = free_list_.next_;
+ storage != &free_list_;
+ storage = storage->next_) {
+ if (storage->size_ == size) {
+ storage->Unlink();
+ storage->LinkTo(&in_use_list_);
+ return reinterpret_cast<void*>(storage + 1);
+ }
+ }
+ // Search for first fit.
+ for (PreallocatedStorage* storage = free_list_.next_;
+ storage != &free_list_;
+ storage = storage->next_) {
+ if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
+ storage->Unlink();
+ storage->LinkTo(&in_use_list_);
+ PreallocatedStorage* left_over =
+ reinterpret_cast<PreallocatedStorage*>(
+ reinterpret_cast<char*>(storage + 1) + size);
+ left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
+ ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
+ storage->size_);
+ storage->size_ = size;
+ left_over->LinkTo(&free_list_);
+ return reinterpret_cast<void*>(storage + 1);
+ }
+ }
+ // Allocation failure.
+ ASSERT(false);
+ return NULL;
+}
+
+
+// We don't attempt to coalesce.
+void Isolate::PreallocatedStorageDelete(void* p) {
+ if (p == NULL) {
+ return;
+ }
+ if (!preallocated_storage_preallocated_) {
+ FreeStoreAllocationPolicy::Delete(p);
+ return;
+ }
+ PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
+ ASSERT(storage->next_->previous_ == storage);
+ ASSERT(storage->previous_->next_ == storage);
+ storage->Unlink();
+ storage->LinkTo(&free_list_);
+}
+
+
+Isolate* Isolate::default_isolate_ = NULL;
+Thread::LocalStorageKey Isolate::isolate_key_;
+Thread::LocalStorageKey Isolate::thread_id_key_;
+Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
+Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
+Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
+
+
+class IsolateInitializer {
+ public:
+ IsolateInitializer() {
+ Isolate::EnsureDefaultIsolate();
+ }
+};
+
+static IsolateInitializer* EnsureDefaultIsolateAllocated() {
+ // TODO(isolates): Use the system threading API to do this once?
+ static IsolateInitializer static_initializer;
+ return &static_initializer;
+}
+
+// This variable only needed to trigger static intialization.
+static IsolateInitializer* static_initializer = EnsureDefaultIsolateAllocated();
+
+
+
+
+
+Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
+ ThreadId thread_id) {
+ ASSERT(!thread_id.Equals(ThreadId::Invalid()));
+ PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
+ {
+ ScopedLock lock(process_wide_mutex_);
+ ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
+ thread_data_table_->Insert(per_thread);
+ ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
+ }
+ return per_thread;
+}
+
+
+Isolate::PerIsolateThreadData*
+ Isolate::FindOrAllocatePerThreadDataForThisThread() {
+ ThreadId thread_id = ThreadId::Current();
+ PerIsolateThreadData* per_thread = NULL;
+ {
+ ScopedLock lock(process_wide_mutex_);
+ per_thread = thread_data_table_->Lookup(this, thread_id);
+ if (per_thread == NULL) {
+ per_thread = AllocatePerIsolateThreadData(thread_id);
+ }
+ }
+ return per_thread;
+}
+
+
+Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
+ ThreadId thread_id = ThreadId::Current();
+ PerIsolateThreadData* per_thread = NULL;
+ {
+ ScopedLock lock(process_wide_mutex_);
+ per_thread = thread_data_table_->Lookup(this, thread_id);
+ }
+ return per_thread;
+}
+
+
+void Isolate::EnsureDefaultIsolate() {
+ ScopedLock lock(process_wide_mutex_);
+ if (default_isolate_ == NULL) {
+ isolate_key_ = Thread::CreateThreadLocalKey();
+ thread_id_key_ = Thread::CreateThreadLocalKey();
+ per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
+ thread_data_table_ = new Isolate::ThreadDataTable();
+ default_isolate_ = new Isolate();
+ }
+ // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
+ // becase a non-null thread data may be already set.
+ if (Thread::GetThreadLocal(isolate_key_) == NULL) {
+ Thread::SetThreadLocal(isolate_key_, default_isolate_);
+ }
+ CHECK(default_isolate_->PreInit());
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+Debugger* Isolate::GetDefaultIsolateDebugger() {
+ EnsureDefaultIsolate();
+ return default_isolate_->debugger();
+}
+#endif
+
+
+StackGuard* Isolate::GetDefaultIsolateStackGuard() {
+ EnsureDefaultIsolate();
+ return default_isolate_->stack_guard();
+}
+
+
+void Isolate::EnterDefaultIsolate() {
+ EnsureDefaultIsolate();
+ ASSERT(default_isolate_ != NULL);
+
+ PerIsolateThreadData* data = CurrentPerIsolateThreadData();
+ // If not yet in default isolate - enter it.
+ if (data == NULL || data->isolate() != default_isolate_) {
+ default_isolate_->Enter();
+ }
+}
+
+
+Isolate* Isolate::GetDefaultIsolateForLocking() {
+ EnsureDefaultIsolate();
+ return default_isolate_;
+}
+
+
+Address Isolate::get_address_from_id(Isolate::AddressId id) {
+ return isolate_addresses_[id];
+}
+
+
+char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
+ ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
+ Iterate(v, thread);
+ return thread_storage + sizeof(ThreadLocalTop);
+}
+
+
+void Isolate::IterateThread(ThreadVisitor* v) {
+ v->VisitThread(this, thread_local_top());
+}
+
+
+void Isolate::IterateThread(ThreadVisitor* v, char* t) {
+ ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
+ v->VisitThread(this, thread);
+}
+
+
+void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
+ // Visit the roots from the top for a given thread.
+ Object* pending;
+ // The pending exception can sometimes be a failure. We can't show
+ // that to the GC, which only understands objects.
+ if (thread->pending_exception_->ToObject(&pending)) {
+ v->VisitPointer(&pending);
+ thread->pending_exception_ = pending; // In case GC updated it.
+ }
+ v->VisitPointer(&(thread->pending_message_obj_));
+ v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
+ v->VisitPointer(BitCast<Object**>(&(thread->context_)));
+ Object* scheduled;
+ if (thread->scheduled_exception_->ToObject(&scheduled)) {
+ v->VisitPointer(&scheduled);
+ thread->scheduled_exception_ = scheduled;
+ }
+
+ for (v8::TryCatch* block = thread->TryCatchHandler();
+ block != NULL;
+ block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
+ v->VisitPointer(BitCast<Object**>(&(block->exception_)));
+ v->VisitPointer(BitCast<Object**>(&(block->message_)));
+ }
+
+ // Iterate over pointers on native execution stack.
+ for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
+ it.frame()->Iterate(v);
+ }
+}
+
+
+void Isolate::Iterate(ObjectVisitor* v) {
+ ThreadLocalTop* current_t = thread_local_top();
+ Iterate(v, current_t);
+}
+
+
+void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
+ // The ARM simulator has a separate JS stack. We therefore register
+ // the C++ try catch handler with the simulator and get back an
+ // address that can be used for comparisons with addresses into the
+ // JS stack. When running without the simulator, the address
+ // returned will be the address of the C++ try catch handler itself.
+ Address address = reinterpret_cast<Address>(
+ SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
+ thread_local_top()->set_try_catch_handler_address(address);
+}
+
+
+void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
+ ASSERT(thread_local_top()->TryCatchHandler() == that);
+ thread_local_top()->set_try_catch_handler_address(
+ reinterpret_cast<Address>(that->next_));
+ thread_local_top()->catcher_ = NULL;
+ SimulatorStack::UnregisterCTryCatch();
+}
+
+
+Handle<String> Isolate::StackTraceString() {
+ if (stack_trace_nesting_level_ == 0) {
+ stack_trace_nesting_level_++;
+ HeapStringAllocator allocator;
+ StringStream::ClearMentionedObjectCache();
+ StringStream accumulator(&allocator);
+ incomplete_message_ = &accumulator;
+ PrintStack(&accumulator);
+ Handle<String> stack_trace = accumulator.ToString();
+ incomplete_message_ = NULL;
+ stack_trace_nesting_level_ = 0;
+ return stack_trace;
+ } else if (stack_trace_nesting_level_ == 1) {
+ stack_trace_nesting_level_++;
+ OS::PrintError(
+ "\n\nAttempt to print stack while printing stack (double fault)\n");
+ OS::PrintError(
+ "If you are lucky you may find a partial stack dump on stdout.\n\n");
+ incomplete_message_->OutputToStdOut();
+ return factory()->empty_symbol();
+ } else {
+ OS::Abort();
+ // Unreachable
+ return factory()->empty_symbol();
+ }
+}
+
+
+Handle<JSArray> Isolate::CaptureCurrentStackTrace(
+ int frame_limit, StackTrace::StackTraceOptions options) {
+ // Ensure no negative values.
+ int limit = Max(frame_limit, 0);
+ Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
+
+ Handle<String> column_key = factory()->LookupAsciiSymbol("column");
+ Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
+ Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
+ Handle<String> name_or_source_url_key =
+ factory()->LookupAsciiSymbol("nameOrSourceURL");
+ Handle<String> script_name_or_source_url_key =
+ factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
+ Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
+ Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval");
+ Handle<String> constructor_key =
+ factory()->LookupAsciiSymbol("isConstructor");
+
+ StackTraceFrameIterator it(this);
+ int frames_seen = 0;
+ while (!it.done() && (frames_seen < limit)) {
+ JavaScriptFrame* frame = it.frame();
+ // Set initial size to the maximum inlining level + 1 for the outermost
+ // function.
+ List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
+ frame->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
+ // Create a JSObject to hold the information for the StackFrame.
+ Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
+
+ Handle<JSFunction> fun = frames[i].function();
+ Handle<Script> script(Script::cast(fun->shared()->script()));
+
+ if (options & StackTrace::kLineNumber) {
+ int script_line_offset = script->line_offset()->value();
+ int position = frames[i].code()->SourcePosition(frames[i].pc());
+ int line_number = GetScriptLineNumber(script, position);
+ // line_number is already shifted by the script_line_offset.
+ int relative_line_number = line_number - script_line_offset;
+ if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
+ Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+ int start = (relative_line_number == 0) ? 0 :
+ Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
+ int column_offset = position - start;
+ if (relative_line_number == 0) {
+ // For the case where the code is on the same line as the script
+ // tag.
+ column_offset += script->column_offset()->value();
+ }
+ SetLocalPropertyNoThrow(stackFrame, column_key,
+ Handle<Smi>(Smi::FromInt(column_offset + 1)));
+ }
+ SetLocalPropertyNoThrow(stackFrame, line_key,
+ Handle<Smi>(Smi::FromInt(line_number + 1)));
+ }
+
+ if (options & StackTrace::kScriptName) {
+ Handle<Object> script_name(script->name(), this);
+ SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
+ }
+
+ if (options & StackTrace::kScriptNameOrSourceURL) {
+ Handle<Object> script_name(script->name(), this);
+ Handle<JSValue> script_wrapper = GetScriptWrapper(script);
+ Handle<Object> property = GetProperty(script_wrapper,
+ name_or_source_url_key);
+ ASSERT(property->IsJSFunction());
+ Handle<JSFunction> method = Handle<JSFunction>::cast(property);
+ bool caught_exception;
+ Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
+ NULL, &caught_exception);
+ if (caught_exception) {
+ result = factory()->undefined_value();
+ }
+ SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
+ result);
+ }
+
+ if (options & StackTrace::kFunctionName) {
+ Handle<Object> fun_name(fun->shared()->name(), this);
+ if (fun_name->ToBoolean()->IsFalse()) {
+ fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
+ }
+ SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
+ }
+
+ if (options & StackTrace::kIsEval) {
+ int type = Smi::cast(script->compilation_type())->value();
+ Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
+ factory()->true_value() : factory()->false_value();
+ SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
+ }
+
+ if (options & StackTrace::kIsConstructor) {
+ Handle<Object> is_constructor = (frames[i].is_constructor()) ?
+ factory()->true_value() : factory()->false_value();
+ SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
+ }
+
+ FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
+ frames_seen++;
+ }
+ it.Advance();
+ }
+
+ stack_trace->set_length(Smi::FromInt(frames_seen));
+ return stack_trace;
+}
+
+
+void Isolate::PrintStack() {
+ if (stack_trace_nesting_level_ == 0) {
+ stack_trace_nesting_level_++;
+
+ StringAllocator* allocator;
+ if (preallocated_message_space_ == NULL) {
+ allocator = new HeapStringAllocator();
+ } else {
+ allocator = preallocated_message_space_;
+ }
+
+ StringStream::ClearMentionedObjectCache();
+ StringStream accumulator(allocator);
+ incomplete_message_ = &accumulator;
+ PrintStack(&accumulator);
+ accumulator.OutputToStdOut();
+ accumulator.Log();
+ incomplete_message_ = NULL;
+ stack_trace_nesting_level_ = 0;
+ if (preallocated_message_space_ == NULL) {
+ // Remove the HeapStringAllocator created above.
+ delete allocator;
+ }
+ } else if (stack_trace_nesting_level_ == 1) {
+ stack_trace_nesting_level_++;
+ OS::PrintError(
+ "\n\nAttempt to print stack while printing stack (double fault)\n");
+ OS::PrintError(
+ "If you are lucky you may find a partial stack dump on stdout.\n\n");
+ incomplete_message_->OutputToStdOut();
+ }
+}
+
+
+static void PrintFrames(StringStream* accumulator,
+ StackFrame::PrintMode mode) {
+ StackFrameIterator it;
+ for (int i = 0; !it.done(); it.Advance()) {
+ it.frame()->Print(accumulator, mode, i++);
+ }
+}
+
+
+void Isolate::PrintStack(StringStream* accumulator) {
+ if (!IsInitialized()) {
+ accumulator->Add(
+ "\n==== Stack trace is not available ==========================\n\n");
+ accumulator->Add(
+ "\n==== Isolate for the thread is not initialized =============\n\n");
+ return;
+ }
+ // The MentionedObjectCache is not GC-proof at the moment.
+ AssertNoAllocation nogc;
+ ASSERT(StringStream::IsMentionedObjectCacheClear());
+
+ // Avoid printing anything if there are no frames.
+ if (c_entry_fp(thread_local_top()) == 0) return;
+
+ accumulator->Add(
+ "\n==== Stack trace ============================================\n\n");
+ PrintFrames(accumulator, StackFrame::OVERVIEW);
+
+ accumulator->Add(
+ "\n==== Details ================================================\n\n");
+ PrintFrames(accumulator, StackFrame::DETAILS);
+
+ accumulator->PrintMentionedObjectCache();
+ accumulator->Add("=====================\n\n");
+}
+
+
+void Isolate::SetFailedAccessCheckCallback(
+ v8::FailedAccessCheckCallback callback) {
+ thread_local_top()->failed_access_check_callback_ = callback;
+}
+
+
+void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
+ if (!thread_local_top()->failed_access_check_callback_) return;
+
+ ASSERT(receiver->IsAccessCheckNeeded());
+ ASSERT(context());
+
+ // Get the data object from access check info.
+ JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+ if (!constructor->shared()->IsApiFunction()) return;
+ Object* data_obj =
+ constructor->shared()->get_api_func_data()->access_check_info();
+ if (data_obj == heap_.undefined_value()) return;
+
+ HandleScope scope;
+ Handle<JSObject> receiver_handle(receiver);
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
+ thread_local_top()->failed_access_check_callback_(
+ v8::Utils::ToLocal(receiver_handle),
+ type,
+ v8::Utils::ToLocal(data));
+}
+
+
+enum MayAccessDecision {
+ YES, NO, UNKNOWN
+};
+
+
+static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
+ JSObject* receiver,
+ v8::AccessType type) {
+ // During bootstrapping, callback functions are not enabled yet.
+ if (isolate->bootstrapper()->IsActive()) return YES;
+
+ if (receiver->IsJSGlobalProxy()) {
+ Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
+ if (!receiver_context->IsContext()) return NO;
+
+ // Get the global context of current top context.
+ // avoid using Isolate::global_context() because it uses Handle.
+ Context* global_context = isolate->context()->global()->global_context();
+ if (receiver_context == global_context) return YES;
+
+ if (Context::cast(receiver_context)->security_token() ==
+ global_context->security_token())
+ return YES;
+ }
+
+ return UNKNOWN;
+}
+
+
+bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
+ v8::AccessType type) {
+ ASSERT(receiver->IsAccessCheckNeeded());
+
+ // The callers of this method are not expecting a GC.
+ AssertNoAllocation no_gc;
+
+ // Skip checks for hidden properties access. Note, we do not
+ // require existence of a context in this case.
+ if (key == heap_.hidden_symbol()) return true;
+
+ // Check for compatibility between the security tokens in the
+ // current lexical context and the accessed object.
+ ASSERT(context());
+
+ MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
+ if (decision != UNKNOWN) return decision == YES;
+
+ // Get named access check callback
+ JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+ if (!constructor->shared()->IsApiFunction()) return false;
+
+ Object* data_obj =
+ constructor->shared()->get_api_func_data()->access_check_info();
+ if (data_obj == heap_.undefined_value()) return false;
+
+ Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
+ v8::NamedSecurityCallback callback =
+ v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
+
+ if (!callback) return false;
+
+ HandleScope scope(this);
+ Handle<JSObject> receiver_handle(receiver, this);
+ Handle<Object> key_handle(key, this);
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
+ LOG(this, ApiNamedSecurityCheck(key));
+ bool result = false;
+ {
+ // Leaving JavaScript.
+ VMState state(this, EXTERNAL);
+ result = callback(v8::Utils::ToLocal(receiver_handle),
+ v8::Utils::ToLocal(key_handle),
+ type,
+ v8::Utils::ToLocal(data));
+ }
+ return result;
+}
+
+
+bool Isolate::MayIndexedAccess(JSObject* receiver,
+ uint32_t index,
+ v8::AccessType type) {
+ ASSERT(receiver->IsAccessCheckNeeded());
+ // Check for compatibility between the security tokens in the
+ // current lexical context and the accessed object.
+ ASSERT(context());
+
+ MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
+ if (decision != UNKNOWN) return decision == YES;
+
+ // Get indexed access check callback
+ JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+ if (!constructor->shared()->IsApiFunction()) return false;
+
+ Object* data_obj =
+ constructor->shared()->get_api_func_data()->access_check_info();
+ if (data_obj == heap_.undefined_value()) return false;
+
+ Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
+ v8::IndexedSecurityCallback callback =
+ v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
+
+ if (!callback) return false;
+
+ HandleScope scope(this);
+ Handle<JSObject> receiver_handle(receiver, this);
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
+ LOG(this, ApiIndexedSecurityCheck(index));
+ bool result = false;
+ {
+ // Leaving JavaScript.
+ VMState state(this, EXTERNAL);
+ result = callback(v8::Utils::ToLocal(receiver_handle),
+ index,
+ type,
+ v8::Utils::ToLocal(data));
+ }
+ return result;
+}
+
+
+const char* const Isolate::kStackOverflowMessage =
+ "Uncaught RangeError: Maximum call stack size exceeded";
+
+
+Failure* Isolate::StackOverflow() {
+ HandleScope scope;
+ Handle<String> key = factory()->stack_overflow_symbol();
+ Handle<JSObject> boilerplate =
+ Handle<JSObject>::cast(GetProperty(js_builtins_object(), key));
+ Handle<Object> exception = Copy(boilerplate);
+ // TODO(1240995): To avoid having to call JavaScript code to compute
+ // the message for stack overflow exceptions which is very likely to
+ // double fault with another stack overflow exception, we use a
+ // precomputed message.
+ DoThrow(*exception, NULL);
+ return Failure::Exception();
+}
+
+
+Failure* Isolate::TerminateExecution() {
+ DoThrow(heap_.termination_exception(), NULL);
+ return Failure::Exception();
+}
+
+
+Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
+ DoThrow(exception, location);
+ return Failure::Exception();
+}
+
+
+Failure* Isolate::ReThrow(MaybeObject* exception, MessageLocation* location) {
+ bool can_be_caught_externally = false;
+ bool catchable_by_javascript = is_catchable_by_javascript(exception);
+ ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
+
+ thread_local_top()->catcher_ = can_be_caught_externally ?
+ try_catch_handler() : NULL;
+
+ // Set the exception being re-thrown.
+ set_pending_exception(exception);
+ if (exception->IsFailure()) return exception->ToFailureUnchecked();
+ return Failure::Exception();
+}
+
+
+Failure* Isolate::ThrowIllegalOperation() {
+ return Throw(heap_.illegal_access_symbol());
+}
+
+
+void Isolate::ScheduleThrow(Object* exception) {
+ // When scheduling a throw we first throw the exception to get the
+ // error reporting if it is uncaught before rescheduling it.
+ Throw(exception);
+ thread_local_top()->scheduled_exception_ = pending_exception();
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+}
+
+
+Failure* Isolate::PromoteScheduledException() {
+ MaybeObject* thrown = scheduled_exception();
+ clear_scheduled_exception();
+ // Re-throw the exception to avoid getting repeated error reporting.
+ return ReThrow(thrown);
+}
+
+
+void Isolate::PrintCurrentStackTrace(FILE* out) {
+ StackTraceFrameIterator it(this);
+ while (!it.done()) {
+ HandleScope scope;
+ // Find code position if recorded in relocation info.
+ JavaScriptFrame* frame = it.frame();
+ int pos = frame->LookupCode()->SourcePosition(frame->pc());
+ Handle<Object> pos_obj(Smi::FromInt(pos));
+ // Fetch function and receiver.
+ Handle<JSFunction> fun(JSFunction::cast(frame->function()));
+ Handle<Object> recv(frame->receiver());
+ // Advance to the next JavaScript frame and determine if the
+ // current frame is the top-level frame.
+ it.Advance();
+ Handle<Object> is_top_level = it.done()
+ ? factory()->true_value()
+ : factory()->false_value();
+ // Generate and print stack trace line.
+ Handle<String> line =
+ Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
+ if (line->length() > 0) {
+ line->PrintOn(out);
+ fprintf(out, "\n");
+ }
+ }
+}
+
+
+void Isolate::ComputeLocation(MessageLocation* target) {
+ *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
+ StackTraceFrameIterator it(this);
+ if (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ JSFunction* fun = JSFunction::cast(frame->function());
+ Object* script = fun->shared()->script();
+ if (script->IsScript() &&
+ !(Script::cast(script)->source()->IsUndefined())) {
+ int pos = frame->LookupCode()->SourcePosition(frame->pc());
+ // Compute the location from the function and the reloc info.
+ Handle<Script> casted_script(Script::cast(script));
+ *target = MessageLocation(casted_script, pos, pos + 1);
+ }
+ }
+}
+
+
+bool Isolate::ShouldReportException(bool* can_be_caught_externally,
+ bool catchable_by_javascript) {
+ // Find the top-most try-catch handler.
+ StackHandler* handler =
+ StackHandler::FromAddress(Isolate::handler(thread_local_top()));
+ while (handler != NULL && !handler->is_try_catch()) {
+ handler = handler->next();
+ }
+
+ // Get the address of the external handler so we can compare the address to
+ // determine which one is closer to the top of the stack.
+ Address external_handler_address =
+ thread_local_top()->try_catch_handler_address();
+
+ // The exception has been externally caught if and only if there is
+ // an external handler which is on top of the top-most try-catch
+ // handler.
+ *can_be_caught_externally = external_handler_address != NULL &&
+ (handler == NULL || handler->address() > external_handler_address ||
+ !catchable_by_javascript);
+
+ if (*can_be_caught_externally) {
+ // Only report the exception if the external handler is verbose.
+ return try_catch_handler()->is_verbose_;
+ } else {
+ // Report the exception if it isn't caught by JavaScript code.
+ return handler == NULL;
+ }
+}
+
+
+void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
+ ASSERT(!has_pending_exception());
+
+ HandleScope scope;
+ Object* exception_object = Smi::FromInt(0);
+ bool is_object = exception->ToObject(&exception_object);
+ Handle<Object> exception_handle(exception_object);
+
+ // Determine reporting and whether the exception is caught externally.
+ bool catchable_by_javascript = is_catchable_by_javascript(exception);
+ // Only real objects can be caught by JS.
+ ASSERT(!catchable_by_javascript || is_object);
+ bool can_be_caught_externally = false;
+ bool should_report_exception =
+ ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
+ bool report_exception = catchable_by_javascript && should_report_exception;
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Notify debugger of exception.
+ if (catchable_by_javascript) {
+ debugger_->OnException(exception_handle, report_exception);
+ }
+#endif
+
+ // Generate the message.
+ Handle<Object> message_obj;
+ MessageLocation potential_computed_location;
+ bool try_catch_needs_message =
+ can_be_caught_externally &&
+ try_catch_handler()->capture_message_;
+ if (report_exception || try_catch_needs_message) {
+ if (location == NULL) {
+ // If no location was specified we use a computed one instead
+ ComputeLocation(&potential_computed_location);
+ location = &potential_computed_location;
+ }
+ if (!bootstrapper()->IsActive()) {
+ // It's not safe to try to make message objects or collect stack
+ // traces while the bootstrapper is active since the infrastructure
+ // may not have been properly initialized.
+ Handle<String> stack_trace;
+ if (FLAG_trace_exception) stack_trace = StackTraceString();
+ Handle<JSArray> stack_trace_object;
+ if (report_exception && capture_stack_trace_for_uncaught_exceptions_) {
+ stack_trace_object = CaptureCurrentStackTrace(
+ stack_trace_for_uncaught_exceptions_frame_limit_,
+ stack_trace_for_uncaught_exceptions_options_);
+ }
+ ASSERT(is_object); // Can't use the handle unless there's a real object.
+ message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
+ location, HandleVector<Object>(&exception_handle, 1), stack_trace,
+ stack_trace_object);
+ }
+ }
+
+ // Save the message for reporting if the the exception remains uncaught.
+ thread_local_top()->has_pending_message_ = report_exception;
+ if (!message_obj.is_null()) {
+ thread_local_top()->pending_message_obj_ = *message_obj;
+ if (location != NULL) {
+ thread_local_top()->pending_message_script_ = *location->script();
+ thread_local_top()->pending_message_start_pos_ = location->start_pos();
+ thread_local_top()->pending_message_end_pos_ = location->end_pos();
+ }
+ }
+
+ // Do not forget to clean catcher_ if currently thrown exception cannot
+ // be caught. If necessary, ReThrow will update the catcher.
+ thread_local_top()->catcher_ = can_be_caught_externally ?
+ try_catch_handler() : NULL;
+
+ // NOTE: Notifying the debugger or generating the message
+ // may have caused new exceptions. For now, we just ignore
+ // that and set the pending exception to the original one.
+ if (is_object) {
+ set_pending_exception(*exception_handle);
+ } else {
+ // Failures are not on the heap so they neither need nor work with handles.
+ ASSERT(exception_handle->IsFailure());
+ set_pending_exception(exception);
+ }
+}
+
+
+bool Isolate::IsExternallyCaught() {
+ ASSERT(has_pending_exception());
+
+ if ((thread_local_top()->catcher_ == NULL) ||
+ (try_catch_handler() != thread_local_top()->catcher_)) {
+ // When throwing the exception, we found no v8::TryCatch
+ // which should care about this exception.
+ return false;
+ }
+
+ if (!is_catchable_by_javascript(pending_exception())) {
+ return true;
+ }
+
+ // Get the address of the external handler so we can compare the address to
+ // determine which one is closer to the top of the stack.
+ Address external_handler_address =
+ thread_local_top()->try_catch_handler_address();
+ ASSERT(external_handler_address != NULL);
+
+ // The exception has been externally caught if and only if there is
+ // an external handler which is on top of the top-most try-finally
+ // handler.
+ // There should be no try-catch blocks as they would prohibit us from
+ // finding external catcher in the first place (see catcher_ check above).
+ //
+ // Note, that finally clause would rethrow an exception unless it's
+ // aborted by jumps in control flow like return, break, etc. and we'll
+ // have another chances to set proper v8::TryCatch.
+ StackHandler* handler =
+ StackHandler::FromAddress(Isolate::handler(thread_local_top()));
+ while (handler != NULL && handler->address() < external_handler_address) {
+ ASSERT(!handler->is_try_catch());
+ if (handler->is_try_finally()) return false;
+
+ handler = handler->next();
+ }
+
+ return true;
+}
+
+
+void Isolate::ReportPendingMessages() {
+ ASSERT(has_pending_exception());
+ PropagatePendingExceptionToExternalTryCatch();
+
+ // If the pending exception is OutOfMemoryException set out_of_memory in
+ // the global context. Note: We have to mark the global context here
+ // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
+ // set it.
+ HandleScope scope;
+ if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
+ context()->mark_out_of_memory();
+ } else if (thread_local_top_.pending_exception_ ==
+ heap()->termination_exception()) {
+ // Do nothing: if needed, the exception has been already propagated to
+ // v8::TryCatch.
+ } else {
+ if (thread_local_top_.has_pending_message_) {
+ thread_local_top_.has_pending_message_ = false;
+ if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
+ HandleScope scope;
+ Handle<Object> message_obj(thread_local_top_.pending_message_obj_);
+ if (thread_local_top_.pending_message_script_ != NULL) {
+ Handle<Script> script(thread_local_top_.pending_message_script_);
+ int start_pos = thread_local_top_.pending_message_start_pos_;
+ int end_pos = thread_local_top_.pending_message_end_pos_;
+ MessageLocation location(script, start_pos, end_pos);
+ MessageHandler::ReportMessage(this, &location, message_obj);
+ } else {
+ MessageHandler::ReportMessage(this, NULL, message_obj);
+ }
+ }
+ }
+ }
+ clear_pending_message();
+}
+
+
+void Isolate::TraceException(bool flag) {
+ FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use.
+}
+
+
+bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
+ ASSERT(has_pending_exception());
+ PropagatePendingExceptionToExternalTryCatch();
+
+ // Allways reschedule out of memory exceptions.
+ if (!is_out_of_memory()) {
+ bool is_termination_exception =
+ pending_exception() == heap_.termination_exception();
+
+ // Do not reschedule the exception if this is the bottom call.
+ bool clear_exception = is_bottom_call;
+
+ if (is_termination_exception) {
+ if (is_bottom_call) {
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+ return false;
+ }
+ } else if (thread_local_top()->external_caught_exception_) {
+ // If the exception is externally caught, clear it if there are no
+ // JavaScript frames on the way to the C++ frame that has the
+ // external handler.
+ ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
+ Address external_handler_address =
+ thread_local_top()->try_catch_handler_address();
+ JavaScriptFrameIterator it;
+ if (it.done() || (it.frame()->sp() > external_handler_address)) {
+ clear_exception = true;
+ }
+ }
+
+ // Clear the exception if needed.
+ if (clear_exception) {
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+ return false;
+ }
+ }
+
+ // Reschedule the exception.
+ thread_local_top()->scheduled_exception_ = pending_exception();
+ clear_pending_exception();
+ return true;
+}
+
+
+void Isolate::SetCaptureStackTraceForUncaughtExceptions(
+ bool capture,
+ int frame_limit,
+ StackTrace::StackTraceOptions options) {
+ capture_stack_trace_for_uncaught_exceptions_ = capture;
+ stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
+ stack_trace_for_uncaught_exceptions_options_ = options;
+}
+
+
+bool Isolate::is_out_of_memory() {
+ if (has_pending_exception()) {
+ MaybeObject* e = pending_exception();
+ if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
+ return true;
+ }
+ }
+ if (has_scheduled_exception()) {
+ MaybeObject* e = scheduled_exception();
+ if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+Handle<Context> Isolate::global_context() {
+ GlobalObject* global = thread_local_top()->context_->global();
+ return Handle<Context>(global->global_context());
+}
+
+
+Handle<Context> Isolate::GetCallingGlobalContext() {
+ JavaScriptFrameIterator it;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (debug_->InDebugger()) {
+ while (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ Context* context = Context::cast(frame->context());
+ if (context->global_context() == *debug_->debug_context()) {
+ it.Advance();
+ } else {
+ break;
+ }
+ }
+ }
+#endif // ENABLE_DEBUGGER_SUPPORT
+ if (it.done()) return Handle<Context>::null();
+ JavaScriptFrame* frame = it.frame();
+ Context* context = Context::cast(frame->context());
+ return Handle<Context>(context->global_context());
+}
+
+
+char* Isolate::ArchiveThread(char* to) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
+ RuntimeProfiler::IsolateExitedJS(this);
+ }
+#endif
+ memcpy(to, reinterpret_cast<char*>(thread_local_top()),
+ sizeof(ThreadLocalTop));
+ InitializeThreadLocal();
+ return to + sizeof(ThreadLocalTop);
+}
+
+
+char* Isolate::RestoreThread(char* from) {
+ memcpy(reinterpret_cast<char*>(thread_local_top()), from,
+ sizeof(ThreadLocalTop));
+ // This might be just paranoia, but it seems to be needed in case a
+ // thread_local_top_ is restored on a separate OS thread.
+#ifdef USE_SIMULATOR
+#ifdef V8_TARGET_ARCH_ARM
+ thread_local_top()->simulator_ = Simulator::current(this);
+#elif V8_TARGET_ARCH_MIPS
+ thread_local_top()->simulator_ = Simulator::current(this);
+#endif
+#endif
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
+ RuntimeProfiler::IsolateEnteredJS(this);
+ }
+ ASSERT(context() == NULL || context()->IsContext());
+#endif
+ return from + sizeof(ThreadLocalTop);
+}
+
+
+Isolate::ThreadDataTable::ThreadDataTable()
+ : list_(NULL) {
+}
+
+
+Isolate::PerIsolateThreadData*
+ Isolate::ThreadDataTable::Lookup(Isolate* isolate,
+ ThreadId thread_id) {
+ for (PerIsolateThreadData* data = list_; data != NULL; data = data->next_) {
+ if (data->Matches(isolate, thread_id)) return data;
+ }
+ return NULL;
+}
+
+
+void Isolate::ThreadDataTable::Insert(Isolate::PerIsolateThreadData* data) {
+ if (list_ != NULL) list_->prev_ = data;
+ data->next_ = list_;
+ list_ = data;
+}
+
+
+void Isolate::ThreadDataTable::Remove(PerIsolateThreadData* data) {
+ if (list_ == data) list_ = data->next_;
+ if (data->next_ != NULL) data->next_->prev_ = data->prev_;
+ if (data->prev_ != NULL) data->prev_->next_ = data->next_;
+}
+
+
+void Isolate::ThreadDataTable::Remove(Isolate* isolate,
+ ThreadId thread_id) {
+ PerIsolateThreadData* data = Lookup(isolate, thread_id);
+ if (data != NULL) {
+ Remove(data);
+ }
+}
+
+
+void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
+ PerIsolateThreadData* data = list_;
+ while (data != NULL) {
+ PerIsolateThreadData* next = data->next_;
+ if (data->isolate() == isolate) Remove(data);
+ data = next;
+ }
+}
+
+
+#ifdef DEBUG
+#define TRACE_ISOLATE(tag) \
+ do { \
+ if (FLAG_trace_isolates) { \
+ PrintF("Isolate %p " #tag "\n", reinterpret_cast<void*>(this)); \
+ } \
+ } while (false)
+#else
+#define TRACE_ISOLATE(tag)
+#endif
+
+
+Isolate::Isolate()
+ : state_(UNINITIALIZED),
+ entry_stack_(NULL),
+ stack_trace_nesting_level_(0),
+ incomplete_message_(NULL),
+ preallocated_memory_thread_(NULL),
+ preallocated_message_space_(NULL),
+ bootstrapper_(NULL),
+ runtime_profiler_(NULL),
+ compilation_cache_(NULL),
+ counters_(new Counters()),
+ code_range_(NULL),
+ break_access_(OS::CreateMutex()),
+ logger_(new Logger()),
+ stats_table_(new StatsTable()),
+ stub_cache_(NULL),
+ deoptimizer_data_(NULL),
+ capture_stack_trace_for_uncaught_exceptions_(false),
+ stack_trace_for_uncaught_exceptions_frame_limit_(0),
+ stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
+ transcendental_cache_(NULL),
+ memory_allocator_(NULL),
+ keyed_lookup_cache_(NULL),
+ context_slot_cache_(NULL),
+ descriptor_lookup_cache_(NULL),
+ handle_scope_implementer_(NULL),
+ unicode_cache_(NULL),
+ in_use_list_(0),
+ free_list_(0),
+ preallocated_storage_preallocated_(false),
+ pc_to_code_cache_(NULL),
+ write_input_buffer_(NULL),
+ global_handles_(NULL),
+ context_switcher_(NULL),
+ thread_manager_(NULL),
+ ast_sentinels_(NULL),
+ string_tracker_(NULL),
+ regexp_stack_(NULL),
+ embedder_data_(NULL) {
+ TRACE_ISOLATE(constructor);
+
+ memset(isolate_addresses_, 0,
+ sizeof(isolate_addresses_[0]) * (k_isolate_address_count + 1));
+
+ heap_.isolate_ = this;
+ zone_.isolate_ = this;
+ stack_guard_.isolate_ = this;
+
+ // ThreadManager is initialized early to support locking an isolate
+ // before it is entered.
+ thread_manager_ = new ThreadManager();
+ thread_manager_->isolate_ = this;
+
+#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
+ defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
+ simulator_initialized_ = false;
+ simulator_i_cache_ = NULL;
+ simulator_redirection_ = NULL;
+#endif
+
+#ifdef DEBUG
+ // heap_histograms_ initializes itself.
+ memset(&js_spill_information_, 0, sizeof(js_spill_information_));
+ memset(code_kind_statistics_, 0,
+ sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS);
+#endif
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ debug_ = NULL;
+ debugger_ = NULL;
+#endif
+
+ handle_scope_data_.Initialize();
+
+#define ISOLATE_INIT_EXECUTE(type, name, initial_value) \
+ name##_ = (initial_value);
+ ISOLATE_INIT_LIST(ISOLATE_INIT_EXECUTE)
+#undef ISOLATE_INIT_EXECUTE
+
+#define ISOLATE_INIT_ARRAY_EXECUTE(type, name, length) \
+ memset(name##_, 0, sizeof(type) * length);
+ ISOLATE_INIT_ARRAY_LIST(ISOLATE_INIT_ARRAY_EXECUTE)
+#undef ISOLATE_INIT_ARRAY_EXECUTE
+}
+
+void Isolate::TearDown() {
+ TRACE_ISOLATE(tear_down);
+
+ // Temporarily set this isolate as current so that various parts of
+ // the isolate can access it in their destructors without having a
+ // direct pointer. We don't use Enter/Exit here to avoid
+ // initializing the thread data.
+ PerIsolateThreadData* saved_data = CurrentPerIsolateThreadData();
+ Isolate* saved_isolate = UncheckedCurrent();
+ SetIsolateThreadLocals(this, NULL);
+
+ Deinit();
+
+ { ScopedLock lock(process_wide_mutex_);
+ thread_data_table_->RemoveAllThreads(this);
+ }
+
+ if (!IsDefaultIsolate()) {
+ delete this;
+ }
+
+ // Restore the previous current isolate.
+ SetIsolateThreadLocals(saved_isolate, saved_data);
+}
+
+
+void Isolate::Deinit() {
+ if (state_ == INITIALIZED) {
+ TRACE_ISOLATE(deinit);
+
+ if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
+
+ // We must stop the logger before we tear down other components.
+ logger_->EnsureTickerStopped();
+
+ delete deoptimizer_data_;
+ deoptimizer_data_ = NULL;
+ if (FLAG_preemption) {
+ v8::Locker locker;
+ v8::Locker::StopPreemption();
+ }
+ builtins_.TearDown();
+ bootstrapper_->TearDown();
+
+ // Remove the external reference to the preallocated stack memory.
+ delete preallocated_message_space_;
+ preallocated_message_space_ = NULL;
+ PreallocatedMemoryThreadStop();
+
+ HeapProfiler::TearDown();
+ CpuProfiler::TearDown();
+ if (runtime_profiler_ != NULL) {
+ runtime_profiler_->TearDown();
+ delete runtime_profiler_;
+ runtime_profiler_ = NULL;
+ }
+ heap_.TearDown();
+ logger_->TearDown();
+
+ // The default isolate is re-initializable due to legacy API.
+ state_ = PREINITIALIZED;
+ }
+}
+
+
+void Isolate::SetIsolateThreadLocals(Isolate* isolate,
+ PerIsolateThreadData* data) {
+ Thread::SetThreadLocal(isolate_key_, isolate);
+ Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
+}
+
+
+Isolate::~Isolate() {
+ TRACE_ISOLATE(destructor);
+
+ delete unicode_cache_;
+ unicode_cache_ = NULL;
+
+ delete regexp_stack_;
+ regexp_stack_ = NULL;
+
+ delete ast_sentinels_;
+ ast_sentinels_ = NULL;
+
+ delete descriptor_lookup_cache_;
+ descriptor_lookup_cache_ = NULL;
+ delete context_slot_cache_;
+ context_slot_cache_ = NULL;
+ delete keyed_lookup_cache_;
+ keyed_lookup_cache_ = NULL;
+
+ delete transcendental_cache_;
+ transcendental_cache_ = NULL;
+ delete stub_cache_;
+ stub_cache_ = NULL;
+ delete stats_table_;
+ stats_table_ = NULL;
+
+ delete logger_;
+ logger_ = NULL;
+
+ delete counters_;
+ counters_ = NULL;
+
+ delete handle_scope_implementer_;
+ handle_scope_implementer_ = NULL;
+ delete break_access_;
+ break_access_ = NULL;
+
+ delete compilation_cache_;
+ compilation_cache_ = NULL;
+ delete bootstrapper_;
+ bootstrapper_ = NULL;
+ delete pc_to_code_cache_;
+ pc_to_code_cache_ = NULL;
+ delete write_input_buffer_;
+ write_input_buffer_ = NULL;
+
+ delete context_switcher_;
+ context_switcher_ = NULL;
+ delete thread_manager_;
+ thread_manager_ = NULL;
+
+ delete string_tracker_;
+ string_tracker_ = NULL;
+
+ delete memory_allocator_;
+ memory_allocator_ = NULL;
+ delete code_range_;
+ code_range_ = NULL;
+ delete global_handles_;
+ global_handles_ = NULL;
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ delete debugger_;
+ debugger_ = NULL;
+ delete debug_;
+ debug_ = NULL;
+#endif
+}
+
+
+bool Isolate::PreInit() {
+ if (state_ != UNINITIALIZED) return true;
+
+ TRACE_ISOLATE(preinit);
+
+ ASSERT(Isolate::Current() == this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ debug_ = new Debug(this);
+ debugger_ = new Debugger(this);
+#endif
+
+ memory_allocator_ = new MemoryAllocator();
+ memory_allocator_->isolate_ = this;
+ code_range_ = new CodeRange();
+ code_range_->isolate_ = this;
+
+ // Safe after setting Heap::isolate_, initializing StackGuard and
+ // ensuring that Isolate::Current() == this.
+ heap_.SetStackLimits();
+
+#ifdef DEBUG
+ DisallowAllocationFailure disallow_allocation_failure;
+#endif
+
+#define C(name) isolate_addresses_[Isolate::k_##name] = \
+ reinterpret_cast<Address>(name());
+ ISOLATE_ADDRESS_LIST(C)
+ ISOLATE_ADDRESS_LIST_PROF(C)
+#undef C
+
+ string_tracker_ = new StringTracker();
+ string_tracker_->isolate_ = this;
+ compilation_cache_ = new CompilationCache(this);
+ transcendental_cache_ = new TranscendentalCache();
+ keyed_lookup_cache_ = new KeyedLookupCache();
+ context_slot_cache_ = new ContextSlotCache();
+ descriptor_lookup_cache_ = new DescriptorLookupCache();
+ unicode_cache_ = new UnicodeCache();
+ pc_to_code_cache_ = new PcToCodeCache(this);
+ write_input_buffer_ = new StringInputBuffer();
+ global_handles_ = new GlobalHandles(this);
+ bootstrapper_ = new Bootstrapper();
+ handle_scope_implementer_ = new HandleScopeImplementer(this);
+ stub_cache_ = new StubCache(this);
+ ast_sentinels_ = new AstSentinels();
+ regexp_stack_ = new RegExpStack();
+ regexp_stack_->isolate_ = this;
+
+ state_ = PREINITIALIZED;
+ return true;
+}
+
+
+void Isolate::InitializeThreadLocal() {
+ thread_local_top_.isolate_ = this;
+ thread_local_top_.Initialize();
+ clear_pending_exception();
+ clear_pending_message();
+ clear_scheduled_exception();
+}
+
+
+void Isolate::PropagatePendingExceptionToExternalTryCatch() {
+ ASSERT(has_pending_exception());
+
+ bool external_caught = IsExternallyCaught();
+ thread_local_top_.external_caught_exception_ = external_caught;
+
+ if (!external_caught) return;
+
+ if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
+ // Do not propagate OOM exception: we should kill VM asap.
+ } else if (thread_local_top_.pending_exception_ ==
+ heap()->termination_exception()) {
+ try_catch_handler()->can_continue_ = false;
+ try_catch_handler()->exception_ = heap()->null_value();
+ } else {
+ // At this point all non-object (failure) exceptions have
+ // been dealt with so this shouldn't fail.
+ ASSERT(!pending_exception()->IsFailure());
+ try_catch_handler()->can_continue_ = true;
+ try_catch_handler()->exception_ = pending_exception();
+ if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
+ try_catch_handler()->message_ = thread_local_top_.pending_message_obj_;
+ }
+ }
+}
+
+
+bool Isolate::Init(Deserializer* des) {
+ ASSERT(state_ != INITIALIZED);
+
+ TRACE_ISOLATE(init);
+
+ bool create_heap_objects = des == NULL;
+
+#ifdef DEBUG
+ // The initialization process does not handle memory exhaustion.
+ DisallowAllocationFailure disallow_allocation_failure;
+#endif
+
+ if (state_ == UNINITIALIZED && !PreInit()) return false;
+
+ // Enable logging before setting up the heap
+ logger_->Setup();
+
+ CpuProfiler::Setup();
+ HeapProfiler::Setup();
+
+ // Initialize other runtime facilities
+#if defined(USE_SIMULATOR)
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
+ Simulator::Initialize(this);
+#endif
+#endif
+
+ { // NOLINT
+ // Ensure that the thread has a valid stack guard. The v8::Locker object
+ // will ensure this too, but we don't have to use lockers if we are only
+ // using one thread.
+ ExecutionAccess lock(this);
+ stack_guard_.InitThread(lock);
+ }
+
+ // Setup the object heap
+ ASSERT(!heap_.HasBeenSetup());
+ if (!heap_.Setup(create_heap_objects)) {
+ V8::SetFatalError();
+ return false;
+ }
+
+ bootstrapper_->Initialize(create_heap_objects);
+ builtins_.Setup(create_heap_objects);
+
+ InitializeThreadLocal();
+
+ // Only preallocate on the first initialization.
+ if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
+ // Start the thread which will set aside some memory.
+ PreallocatedMemoryThreadStart();
+ preallocated_message_space_ =
+ new NoAllocationStringAllocator(
+ preallocated_memory_thread_->data(),
+ preallocated_memory_thread_->length());
+ PreallocatedStorageInit(preallocated_memory_thread_->length() / 4);
+ }
+
+ if (FLAG_preemption) {
+ v8::Locker locker;
+ v8::Locker::StartPreemption(100);
+ }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ debug_->Setup(create_heap_objects);
+#endif
+ stub_cache_->Initialize(create_heap_objects);
+
+ // If we are deserializing, read the state into the now-empty heap.
+ if (des != NULL) {
+ des->Deserialize();
+ stub_cache_->Clear();
+ }
+
+ // Deserializing may put strange things in the root array's copy of the
+ // stack guard.
+ heap_.SetStackLimits();
+
+ deoptimizer_data_ = new DeoptimizerData;
+ runtime_profiler_ = new RuntimeProfiler(this);
+ runtime_profiler_->Setup();
+
+ // If we are deserializing, log non-function code objects and compiled
+ // functions found in the snapshot.
+ if (des != NULL && (FLAG_log_code || FLAG_ll_prof)) {
+ HandleScope scope;
+ LOG(this, LogCodeObjects());
+ LOG(this, LogCompiledFunctions());
+ }
+
+ state_ = INITIALIZED;
+ return true;
+}
+
+
+void Isolate::Enter() {
+ Isolate* current_isolate = NULL;
+ PerIsolateThreadData* current_data = CurrentPerIsolateThreadData();
+ if (current_data != NULL) {
+ current_isolate = current_data->isolate_;
+ ASSERT(current_isolate != NULL);
+ if (current_isolate == this) {
+ ASSERT(Current() == this);
+ ASSERT(entry_stack_ != NULL);
+ ASSERT(entry_stack_->previous_thread_data == NULL ||
+ entry_stack_->previous_thread_data->thread_id().Equals(
+ ThreadId::Current()));
+ // Same thread re-enters the isolate, no need to re-init anything.
+ entry_stack_->entry_count++;
+ return;
+ }
+ }
+
+ // Threads can have default isolate set into TLS as Current but not yet have
+ // PerIsolateThreadData for it, as it requires more advanced phase of the
+ // initialization. For example, a thread might be the one that system used for
+ // static initializers - in this case the default isolate is set in TLS but
+ // the thread did not yet Enter the isolate. If PerisolateThreadData is not
+ // there, use the isolate set in TLS.
+ if (current_isolate == NULL) {
+ current_isolate = Isolate::UncheckedCurrent();
+ }
+
+ PerIsolateThreadData* data = FindOrAllocatePerThreadDataForThisThread();
+ ASSERT(data != NULL);
+ ASSERT(data->isolate_ == this);
+
+ EntryStackItem* item = new EntryStackItem(current_data,
+ current_isolate,
+ entry_stack_);
+ entry_stack_ = item;
+
+ SetIsolateThreadLocals(this, data);
+
+ CHECK(PreInit());
+
+ // In case it's the first time some thread enters the isolate.
+ set_thread_id(data->thread_id());
+}
+
+
+void Isolate::Exit() {
+ ASSERT(entry_stack_ != NULL);
+ ASSERT(entry_stack_->previous_thread_data == NULL ||
+ entry_stack_->previous_thread_data->thread_id().Equals(
+ ThreadId::Current()));
+
+ if (--entry_stack_->entry_count > 0) return;
+
+ ASSERT(CurrentPerIsolateThreadData() != NULL);
+ ASSERT(CurrentPerIsolateThreadData()->isolate_ == this);
+
+ // Pop the stack.
+ EntryStackItem* item = entry_stack_;
+ entry_stack_ = item->previous_item;
+
+ PerIsolateThreadData* previous_thread_data = item->previous_thread_data;
+ Isolate* previous_isolate = item->previous_isolate;
+
+ delete item;
+
+ // Reinit the current thread for the isolate it was running before this one.
+ SetIsolateThreadLocals(previous_isolate, previous_thread_data);
+}
+
+
+void Isolate::ResetEagerOptimizingData() {
+ compilation_cache_->ResetEagerOptimizingData();
+}
+
+
+#ifdef DEBUG
+#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
+const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
+ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
+ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
+#undef ISOLATE_FIELD_OFFSET
+#endif
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
new file mode 100644
index 000000000..c623d433b
--- /dev/null
+++ b/deps/v8/src/isolate.h
@@ -0,0 +1,1363 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ISOLATE_H_
+#define V8_ISOLATE_H_
+
+#include "../include/v8-debug.h"
+#include "allocation.h"
+#include "apiutils.h"
+#include "atomicops.h"
+#include "builtins.h"
+#include "contexts.h"
+#include "execution.h"
+#include "frames.h"
+#include "global-handles.h"
+#include "handles.h"
+#include "heap.h"
+#include "regexp-stack.h"
+#include "runtime-profiler.h"
+#include "runtime.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+class AstSentinels;
+class Bootstrapper;
+class CodeGenerator;
+class CodeRange;
+class CompilationCache;
+class ContextSlotCache;
+class ContextSwitcher;
+class Counters;
+class CpuFeatures;
+class CpuProfiler;
+class DeoptimizerData;
+class Deserializer;
+class EmptyStatement;
+class ExternalReferenceTable;
+class Factory;
+class FunctionInfoListener;
+class HandleScopeImplementer;
+class HeapProfiler;
+class InlineRuntimeFunctionsTable;
+class NoAllocationStringAllocator;
+class PcToCodeCache;
+class PreallocatedMemoryThread;
+class RegExpStack;
+class SaveContext;
+class UnicodeCache;
+class StringInputBuffer;
+class StringTracker;
+class StubCache;
+class ThreadManager;
+class ThreadState;
+class ThreadVisitor; // Defined in v8threads.h
+class VMState;
+
+// 'void function pointer', used to roundtrip the
+// ExternalReference::ExternalReferenceRedirector since we can not include
+// assembler.h, where it is defined, here.
+typedef void* ExternalReferenceRedirectorPointer();
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+class Debug;
+class Debugger;
+class DebuggerAgent;
+#endif
+
+#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
+ !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+class Redirection;
+class Simulator;
+#endif
+
+
+// Static indirection table for handles to constants. If a frame
+// element represents a constant, the data contains an index into
+// this table of handles to the actual constants.
+// Static indirection table for handles to constants. If a Result
+// represents a constant, the data contains an index into this table
+// of handles to the actual constants.
+typedef ZoneList<Handle<Object> > ZoneObjectList;
+
+#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \
+ if (isolate->has_scheduled_exception()) \
+ return isolate->PromoteScheduledException()
+
+#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
+ if (call.is_null()) { \
+ ASSERT(isolate->has_pending_exception()); \
+ return value; \
+ }
+
+#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
+
+#define ISOLATE_ADDRESS_LIST(C) \
+ C(handler_address) \
+ C(c_entry_fp_address) \
+ C(context_address) \
+ C(pending_exception_address) \
+ C(external_caught_exception_address)
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+#define ISOLATE_ADDRESS_LIST_PROF(C) \
+ C(js_entry_sp_address)
+#else
+#define ISOLATE_ADDRESS_LIST_PROF(C)
+#endif
+
+
+// Platform-independent, reliable thread identifier.
+class ThreadId {
+ public:
+ // Creates an invalid ThreadId.
+ ThreadId() : id_(kInvalidId) {}
+
+ // Returns ThreadId for current thread.
+ static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
+
+ // Returns invalid ThreadId (guaranteed not to be equal to any thread).
+ static ThreadId Invalid() { return ThreadId(kInvalidId); }
+
+ // Compares ThreadIds for equality.
+ INLINE(bool Equals(const ThreadId& other) const) {
+ return id_ == other.id_;
+ }
+
+ // Checks whether this ThreadId refers to any thread.
+ INLINE(bool IsValid() const) {
+ return id_ != kInvalidId;
+ }
+
+ // Converts ThreadId to an integer representation
+ // (required for public API: V8::V8::GetCurrentThreadId).
+ int ToInteger() const { return id_; }
+
+ // Converts ThreadId to an integer representation
+ // (required for public API: V8::V8::TerminateExecution).
+ static ThreadId FromInteger(int id) { return ThreadId(id); }
+
+ private:
+ static const int kInvalidId = -1;
+
+ explicit ThreadId(int id) : id_(id) {}
+
+ static int AllocateThreadId();
+
+ static int GetCurrentThreadId();
+
+ int id_;
+
+ static Atomic32 highest_thread_id_;
+
+ friend class Isolate;
+};
+
+
+class ThreadLocalTop BASE_EMBEDDED {
+ public:
+ // Does early low-level initialization that does not depend on the
+ // isolate being present.
+ ThreadLocalTop();
+
+ // Initialize the thread data.
+ void Initialize();
+
+ // Get the top C++ try catch handler or NULL if none are registered.
+ //
+ // This method is not guarenteed to return an address that can be
+ // used for comparison with addresses into the JS stack. If such an
+ // address is needed, use try_catch_handler_address.
+ v8::TryCatch* TryCatchHandler();
+
+ // Get the address of the top C++ try catch handler or NULL if
+ // none are registered.
+ //
+ // This method always returns an address that can be compared to
+ // pointers into the JavaScript stack. When running on actual
+ // hardware, try_catch_handler_address and TryCatchHandler return
+ // the same pointer. When running on a simulator with a separate JS
+ // stack, try_catch_handler_address returns a JS stack address that
+ // corresponds to the place on the JS stack where the C++ handler
+ // would have been if the stack were not separate.
+ inline Address try_catch_handler_address() {
+ return try_catch_handler_address_;
+ }
+
+ // Set the address of the top C++ try catch handler.
+ inline void set_try_catch_handler_address(Address address) {
+ try_catch_handler_address_ = address;
+ }
+
+ void Free() {
+ ASSERT(!has_pending_message_);
+ ASSERT(!external_caught_exception_);
+ ASSERT(try_catch_handler_address_ == NULL);
+ }
+
+ Isolate* isolate_;
+ // The context where the current execution method is created and for variable
+ // lookups.
+ Context* context_;
+ ThreadId thread_id_;
+ MaybeObject* pending_exception_;
+ bool has_pending_message_;
+ Object* pending_message_obj_;
+ Script* pending_message_script_;
+ int pending_message_start_pos_;
+ int pending_message_end_pos_;
+ // Use a separate value for scheduled exceptions to preserve the
+ // invariants that hold about pending_exception. We may want to
+ // unify them later.
+ MaybeObject* scheduled_exception_;
+ bool external_caught_exception_;
+ SaveContext* save_context_;
+ v8::TryCatch* catcher_;
+
+ // Stack.
+ Address c_entry_fp_; // the frame pointer of the top c entry frame
+ Address handler_; // try-blocks are chained through the stack
+
+#ifdef USE_SIMULATOR
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
+ Simulator* simulator_;
+#endif
+#endif // USE_SIMULATOR
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Address js_entry_sp_; // the stack pointer of the bottom js entry frame
+ Address external_callback_; // the external callback we're currently in
+#endif
+
+#ifdef ENABLE_VMSTATE_TRACKING
+ StateTag current_vm_state_;
+#endif
+
+ // Generated code scratch locations.
+ int32_t formal_count_;
+
+ // Call back function to report unsafe JS accesses.
+ v8::FailedAccessCheckCallback failed_access_check_callback_;
+
+ private:
+ void InitializeInternal();
+
+ Address try_catch_handler_address_;
+};
+
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
+
+#define ISOLATE_PLATFORM_INIT_LIST(V) \
+ /* VirtualFrame::SpilledScope state */ \
+ V(bool, is_virtual_frame_in_spilled_scope, false) \
+ /* CodeGenerator::EmitNamedStore state */ \
+ V(int, inlined_write_barrier_size, -1)
+
+#if !defined(__arm__) && !defined(__mips__)
+class HashMap;
+#endif
+
+#else
+
+#define ISOLATE_PLATFORM_INIT_LIST(V)
+
+#endif
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+#define ISOLATE_DEBUGGER_INIT_LIST(V) \
+ V(v8::Debug::EventCallback, debug_event_callback, NULL) \
+ V(DebuggerAgent*, debugger_agent_instance, NULL)
+#else
+
+#define ISOLATE_DEBUGGER_INIT_LIST(V)
+
+#endif
+
+#ifdef DEBUG
+
+#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
+ V(CommentStatistic, paged_space_comments_statistics, \
+ CommentStatistic::kMaxComments + 1)
+#else
+
+#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
+
+#endif
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+#define ISOLATE_LOGGING_INIT_LIST(V) \
+ V(CpuProfiler*, cpu_profiler, NULL) \
+ V(HeapProfiler*, heap_profiler, NULL)
+
+#else
+
+#define ISOLATE_LOGGING_INIT_LIST(V)
+
+#endif
+
+#define ISOLATE_INIT_ARRAY_LIST(V) \
+ /* SerializerDeserializer state. */ \
+ V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity) \
+ V(int, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
+ V(int, bad_char_shift_table, kUC16AlphabetSize) \
+ V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
+ V(int, suffix_table, (kBMMaxShift + 1)) \
+ ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
+
+typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
+
+#define ISOLATE_INIT_LIST(V) \
+ /* AssertNoZoneAllocation state. */ \
+ V(bool, zone_allow_allocation, true) \
+ /* SerializerDeserializer state. */ \
+ V(int, serialize_partial_snapshot_cache_length, 0) \
+ /* Assembler state. */ \
+ /* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
+ V(byte*, assembler_spare_buffer, NULL) \
+ V(FatalErrorCallback, exception_behavior, NULL) \
+ V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
+ V(v8::Debug::MessageHandler, message_handler, NULL) \
+ /* To distinguish the function templates, so that we can find them in the */ \
+ /* function cache of the global context. */ \
+ V(int, next_serial_number, 0) \
+ V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
+ V(bool, always_allow_natives_syntax, false) \
+ /* Part of the state of liveedit. */ \
+ V(FunctionInfoListener*, active_function_info_listener, NULL) \
+ /* State for Relocatable. */ \
+ V(Relocatable*, relocatable_top, NULL) \
+ /* State for CodeEntry in profile-generator. */ \
+ V(CodeGenerator*, current_code_generator, NULL) \
+ V(bool, jump_target_compiling_deferred_code, false) \
+ V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
+ V(Object*, string_stream_current_security_token, NULL) \
+ /* TODO(isolates): Release this on destruction? */ \
+ V(int*, irregexp_interpreter_backtrack_stack_cache, NULL) \
+ /* Serializer state. */ \
+ V(ExternalReferenceTable*, external_reference_table, NULL) \
+ /* AstNode state. */ \
+ V(unsigned, ast_node_id, 0) \
+ V(unsigned, ast_node_count, 0) \
+ /* SafeStackFrameIterator activations count. */ \
+ V(int, safe_stack_iterator_counter, 0) \
+ V(uint64_t, enabled_cpu_features, 0) \
+ ISOLATE_PLATFORM_INIT_LIST(V) \
+ ISOLATE_LOGGING_INIT_LIST(V) \
+ ISOLATE_DEBUGGER_INIT_LIST(V)
+
+class Isolate {
+ // These forward declarations are required to make the friend declarations in
+ // PerIsolateThreadData work on some older versions of gcc.
+ class ThreadDataTable;
+ class EntryStackItem;
+ public:
+ ~Isolate();
+
+ // A thread has a PerIsolateThreadData instance for each isolate that it has
+ // entered. That instance is allocated when the isolate is initially entered
+ // and reused on subsequent entries.
+ class PerIsolateThreadData {
+ public:
+ PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
+ : isolate_(isolate),
+ thread_id_(thread_id),
+ stack_limit_(0),
+ thread_state_(NULL),
+#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
+ !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+ simulator_(NULL),
+#endif
+ next_(NULL),
+ prev_(NULL) { }
+ Isolate* isolate() const { return isolate_; }
+ ThreadId thread_id() const { return thread_id_; }
+ void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
+ uintptr_t stack_limit() const { return stack_limit_; }
+ ThreadState* thread_state() const { return thread_state_; }
+ void set_thread_state(ThreadState* value) { thread_state_ = value; }
+
+#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
+ !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+ Simulator* simulator() const { return simulator_; }
+ void set_simulator(Simulator* simulator) {
+ simulator_ = simulator;
+ }
+#endif
+
+ bool Matches(Isolate* isolate, ThreadId thread_id) const {
+ return isolate_ == isolate && thread_id_.Equals(thread_id);
+ }
+
+ private:
+ Isolate* isolate_;
+ ThreadId thread_id_;
+ uintptr_t stack_limit_;
+ ThreadState* thread_state_;
+
+#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
+ !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+ Simulator* simulator_;
+#endif
+
+ PerIsolateThreadData* next_;
+ PerIsolateThreadData* prev_;
+
+ friend class Isolate;
+ friend class ThreadDataTable;
+ friend class EntryStackItem;
+
+ DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
+ };
+
+
+ enum AddressId {
+#define C(name) k_##name,
+ ISOLATE_ADDRESS_LIST(C)
+ ISOLATE_ADDRESS_LIST_PROF(C)
+#undef C
+ k_isolate_address_count
+ };
+
+ // Returns the PerIsolateThreadData for the current thread (or NULL if one is
+ // not currently set).
+ static PerIsolateThreadData* CurrentPerIsolateThreadData() {
+ return reinterpret_cast<PerIsolateThreadData*>(
+ Thread::GetThreadLocal(per_isolate_thread_data_key_));
+ }
+
+ // Returns the isolate inside which the current thread is running.
+ INLINE(static Isolate* Current()) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(
+ Thread::GetExistingThreadLocal(isolate_key_));
+ ASSERT(isolate != NULL);
+ return isolate;
+ }
+
+ INLINE(static Isolate* UncheckedCurrent()) {
+ return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
+ }
+
+ bool Init(Deserializer* des);
+
+ bool IsInitialized() { return state_ == INITIALIZED; }
+
+ // True if at least one thread Enter'ed this isolate.
+ bool IsInUse() { return entry_stack_ != NULL; }
+
+ // Destroys the non-default isolates.
+ // Sets default isolate into "has_been_disposed" state rather then destroying,
+ // for legacy API reasons.
+ void TearDown();
+
+ bool IsDefaultIsolate() const { return this == default_isolate_; }
+
+ // Ensures that process-wide resources and the default isolate have been
+ // allocated. It is only necessary to call this method in rare casses, for
+ // example if you are using V8 from within the body of a static initializer.
+ // Safe to call multiple times.
+ static void EnsureDefaultIsolate();
+
+ // Find the PerThread for this particular (isolate, thread) combination
+ // If one does not yet exist, return null.
+ PerIsolateThreadData* FindPerThreadDataForThisThread();
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Get the debugger from the default isolate. Preinitializes the
+ // default isolate if needed.
+ static Debugger* GetDefaultIsolateDebugger();
+#endif
+
+ // Get the stack guard from the default isolate. Preinitializes the
+ // default isolate if needed.
+ static StackGuard* GetDefaultIsolateStackGuard();
+
+ // Returns the key used to store the pointer to the current isolate.
+ // Used internally for V8 threads that do not execute JavaScript but still
+ // are part of the domain of an isolate (like the context switcher).
+ static Thread::LocalStorageKey isolate_key() {
+ return isolate_key_;
+ }
+
+ // Returns the key used to store process-wide thread IDs.
+ static Thread::LocalStorageKey thread_id_key() {
+ return thread_id_key_;
+ }
+
+ // If a client attempts to create a Locker without specifying an isolate,
+ // we assume that the client is using legacy behavior. Set up the current
+ // thread to be inside the implicit isolate (or fail a check if we have
+ // switched to non-legacy behavior).
+ static void EnterDefaultIsolate();
+
+ // Debug.
+ // Mutex for serializing access to break control structures.
+ Mutex* break_access() { return break_access_; }
+
+ Address get_address_from_id(AddressId id);
+
+ // Access to top context (where the current function object was created).
+ Context* context() { return thread_local_top_.context_; }
+ void set_context(Context* context) {
+ ASSERT(context == NULL || context->IsContext());
+ thread_local_top_.context_ = context;
+ }
+ Context** context_address() { return &thread_local_top_.context_; }
+
+ SaveContext* save_context() {return thread_local_top_.save_context_; }
+ void set_save_context(SaveContext* save) {
+ thread_local_top_.save_context_ = save;
+ }
+
+ // Access to current thread id.
+ ThreadId thread_id() { return thread_local_top_.thread_id_; }
+ void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
+
+ // Interface to pending exception.
+ MaybeObject* pending_exception() {
+ ASSERT(has_pending_exception());
+ return thread_local_top_.pending_exception_;
+ }
+ bool external_caught_exception() {
+ return thread_local_top_.external_caught_exception_;
+ }
+ void set_external_caught_exception(bool value) {
+ thread_local_top_.external_caught_exception_ = value;
+ }
+ void set_pending_exception(MaybeObject* exception) {
+ thread_local_top_.pending_exception_ = exception;
+ }
+ void clear_pending_exception() {
+ thread_local_top_.pending_exception_ = heap_.the_hole_value();
+ }
+ MaybeObject** pending_exception_address() {
+ return &thread_local_top_.pending_exception_;
+ }
+ bool has_pending_exception() {
+ return !thread_local_top_.pending_exception_->IsTheHole();
+ }
+ void clear_pending_message() {
+ thread_local_top_.has_pending_message_ = false;
+ thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
+ thread_local_top_.pending_message_script_ = NULL;
+ }
+ v8::TryCatch* try_catch_handler() {
+ return thread_local_top_.TryCatchHandler();
+ }
+ Address try_catch_handler_address() {
+ return thread_local_top_.try_catch_handler_address();
+ }
+ bool* external_caught_exception_address() {
+ return &thread_local_top_.external_caught_exception_;
+ }
+ v8::TryCatch* catcher() {
+ return thread_local_top_.catcher_;
+ }
+ void set_catcher(v8::TryCatch* catcher) {
+ thread_local_top_.catcher_ = catcher;
+ }
+
+ MaybeObject** scheduled_exception_address() {
+ return &thread_local_top_.scheduled_exception_;
+ }
+ MaybeObject* scheduled_exception() {
+ ASSERT(has_scheduled_exception());
+ return thread_local_top_.scheduled_exception_;
+ }
+ bool has_scheduled_exception() {
+ return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
+ }
+ void clear_scheduled_exception() {
+ thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
+ }
+
+ bool IsExternallyCaught();
+
+ bool is_catchable_by_javascript(MaybeObject* exception) {
+ return (exception != Failure::OutOfMemoryException()) &&
+ (exception != heap()->termination_exception());
+ }
+
+ // JS execution stack (see frames.h).
+ static Address c_entry_fp(ThreadLocalTop* thread) {
+ return thread->c_entry_fp_;
+ }
+ static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
+
+ inline Address* c_entry_fp_address() {
+ return &thread_local_top_.c_entry_fp_;
+ }
+ inline Address* handler_address() { return &thread_local_top_.handler_; }
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // Bottom JS entry (see StackTracer::Trace in log.cc).
+ static Address js_entry_sp(ThreadLocalTop* thread) {
+ return thread->js_entry_sp_;
+ }
+ inline Address* js_entry_sp_address() {
+ return &thread_local_top_.js_entry_sp_;
+ }
+#endif
+
+ // Generated code scratch locations.
+ void* formal_count_address() { return &thread_local_top_.formal_count_; }
+
+ // Returns the global object of the current context. It could be
+ // a builtin object, or a js global object.
+ Handle<GlobalObject> global() {
+ return Handle<GlobalObject>(context()->global());
+ }
+
+ // Returns the global proxy object of the current context.
+ Object* global_proxy() {
+ return context()->global_proxy();
+ }
+
+ Handle<JSBuiltinsObject> js_builtins_object() {
+ return Handle<JSBuiltinsObject>(thread_local_top_.context_->builtins());
+ }
+
+ static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
+ void FreeThreadResources() { thread_local_top_.Free(); }
+
+ // This method is called by the api after operations that may throw
+ // exceptions. If an exception was thrown and not handled by an external
+ // handler the exception is scheduled to be rethrown when we return to running
+ // JavaScript code. If an exception is scheduled true is returned.
+ bool OptionalRescheduleException(bool is_bottom_call);
+
+ class ExceptionScope {
+ public:
+ explicit ExceptionScope(Isolate* isolate) :
+ // Scope currently can only be used for regular exceptions, not
+ // failures like OOM or termination exception.
+ isolate_(isolate),
+ pending_exception_(isolate_->pending_exception()->ToObjectUnchecked()),
+ catcher_(isolate_->catcher())
+ { }
+
+ ~ExceptionScope() {
+ isolate_->set_catcher(catcher_);
+ isolate_->set_pending_exception(*pending_exception_);
+ }
+
+ private:
+ Isolate* isolate_;
+ Handle<Object> pending_exception_;
+ v8::TryCatch* catcher_;
+ };
+
+ void SetCaptureStackTraceForUncaughtExceptions(
+ bool capture,
+ int frame_limit,
+ StackTrace::StackTraceOptions options);
+
+ // Tells whether the current context has experienced an out of memory
+ // exception.
+ bool is_out_of_memory();
+
+ void PrintCurrentStackTrace(FILE* out);
+ void PrintStackTrace(FILE* out, char* thread_data);
+ void PrintStack(StringStream* accumulator);
+ void PrintStack();
+ Handle<String> StackTraceString();
+ Handle<JSArray> CaptureCurrentStackTrace(
+ int frame_limit,
+ StackTrace::StackTraceOptions options);
+
+ // Returns if the top context may access the given global object. If
+ // the result is false, the pending exception is guaranteed to be
+ // set.
+ bool MayNamedAccess(JSObject* receiver,
+ Object* key,
+ v8::AccessType type);
+ bool MayIndexedAccess(JSObject* receiver,
+ uint32_t index,
+ v8::AccessType type);
+
+ void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
+ void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
+
+ // Exception throwing support. The caller should use the result
+ // of Throw() as its return value.
+ Failure* Throw(Object* exception, MessageLocation* location = NULL);
+ // Re-throw an exception. This involves no error reporting since
+ // error reporting was handled when the exception was thrown
+ // originally.
+ Failure* ReThrow(MaybeObject* exception, MessageLocation* location = NULL);
+ void ScheduleThrow(Object* exception);
+ void ReportPendingMessages();
+ Failure* ThrowIllegalOperation();
+
+ // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
+ Failure* PromoteScheduledException();
+ void DoThrow(MaybeObject* exception, MessageLocation* location);
+ // Checks if exception should be reported and finds out if it's
+ // caught externally.
+ bool ShouldReportException(bool* can_be_caught_externally,
+ bool catchable_by_javascript);
+
+ // Attempts to compute the current source location, storing the
+ // result in the target out parameter.
+ void ComputeLocation(MessageLocation* target);
+
+ // Override command line flag.
+ void TraceException(bool flag);
+
+ // Out of resource exception helpers.
+ Failure* StackOverflow();
+ Failure* TerminateExecution();
+
+ // Administration
+ void Iterate(ObjectVisitor* v);
+ void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
+ char* Iterate(ObjectVisitor* v, char* t);
+ void IterateThread(ThreadVisitor* v);
+ void IterateThread(ThreadVisitor* v, char* t);
+
+
+ // Returns the current global context.
+ Handle<Context> global_context();
+
+ // Returns the global context of the calling JavaScript code. That
+ // is, the global context of the top-most JavaScript frame.
+ Handle<Context> GetCallingGlobalContext();
+
+ void RegisterTryCatchHandler(v8::TryCatch* that);
+ void UnregisterTryCatchHandler(v8::TryCatch* that);
+
+ char* ArchiveThread(char* to);
+ char* RestoreThread(char* from);
+
+ static const char* const kStackOverflowMessage;
+
+ static const int kUC16AlphabetSize = 256; // See StringSearchBase.
+ static const int kBMMaxShift = 250; // See StringSearchBase.
+
+ // Accessors.
+#define GLOBAL_ACCESSOR(type, name, initialvalue) \
+ inline type name() const { \
+ ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
+ return name##_; \
+ } \
+ inline void set_##name(type value) { \
+ ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
+ name##_ = value; \
+ }
+ ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
+#undef GLOBAL_ACCESSOR
+
+#define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
+ inline type* name() { \
+ ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
+ return &(name##_)[0]; \
+ }
+ ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
+#undef GLOBAL_ARRAY_ACCESSOR
+
+#define GLOBAL_CONTEXT_FIELD_ACCESSOR(index, type, name) \
+ Handle<type> name() { \
+ return Handle<type>(context()->global_context()->name()); \
+ }
+ GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSOR)
+#undef GLOBAL_CONTEXT_FIELD_ACCESSOR
+
+ Bootstrapper* bootstrapper() { return bootstrapper_; }
+ Counters* counters() { return counters_; }
+ CodeRange* code_range() { return code_range_; }
+ RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
+ CompilationCache* compilation_cache() { return compilation_cache_; }
+ Logger* logger() { return logger_; }
+ StackGuard* stack_guard() { return &stack_guard_; }
+ Heap* heap() { return &heap_; }
+ StatsTable* stats_table() { return stats_table_; }
+ StubCache* stub_cache() { return stub_cache_; }
+ DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
+ ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
+
+ TranscendentalCache* transcendental_cache() const {
+ return transcendental_cache_;
+ }
+
+ MemoryAllocator* memory_allocator() {
+ return memory_allocator_;
+ }
+
+ KeyedLookupCache* keyed_lookup_cache() {
+ return keyed_lookup_cache_;
+ }
+
+ ContextSlotCache* context_slot_cache() {
+ return context_slot_cache_;
+ }
+
+ DescriptorLookupCache* descriptor_lookup_cache() {
+ return descriptor_lookup_cache_;
+ }
+
+ v8::ImplementationUtilities::HandleScopeData* handle_scope_data() {
+ return &handle_scope_data_;
+ }
+ HandleScopeImplementer* handle_scope_implementer() {
+ ASSERT(handle_scope_implementer_);
+ return handle_scope_implementer_;
+ }
+ Zone* zone() { return &zone_; }
+
+ UnicodeCache* unicode_cache() {
+ return unicode_cache_;
+ }
+
+ PcToCodeCache* pc_to_code_cache() { return pc_to_code_cache_; }
+
+ StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
+
+ GlobalHandles* global_handles() { return global_handles_; }
+
+ ThreadManager* thread_manager() { return thread_manager_; }
+
+ ContextSwitcher* context_switcher() { return context_switcher_; }
+
+ void set_context_switcher(ContextSwitcher* switcher) {
+ context_switcher_ = switcher;
+ }
+
+ StringTracker* string_tracker() { return string_tracker_; }
+
+ unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
+ return &jsregexp_uncanonicalize_;
+ }
+
+ unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
+ return &jsregexp_canonrange_;
+ }
+
+ StringInputBuffer* objects_string_compare_buffer_a() {
+ return &objects_string_compare_buffer_a_;
+ }
+
+ StringInputBuffer* objects_string_compare_buffer_b() {
+ return &objects_string_compare_buffer_b_;
+ }
+
+ StaticResource<StringInputBuffer>* objects_string_input_buffer() {
+ return &objects_string_input_buffer_;
+ }
+
+ AstSentinels* ast_sentinels() { return ast_sentinels_; }
+
+ RuntimeState* runtime_state() { return &runtime_state_; }
+
+ StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
+ return &compiler_safe_string_input_buffer_;
+ }
+
+ Builtins* builtins() { return &builtins_; }
+
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>*
+ regexp_macro_assembler_canonicalize() {
+ return &regexp_macro_assembler_canonicalize_;
+ }
+
+ RegExpStack* regexp_stack() { return regexp_stack_; }
+
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>*
+ interp_canonicalize_mapping() {
+ return &interp_canonicalize_mapping_;
+ }
+
+ void* PreallocatedStorageNew(size_t size);
+ void PreallocatedStorageDelete(void* p);
+ void PreallocatedStorageInit(size_t size);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debugger* debugger() { return debugger_; }
+ Debug* debug() { return debug_; }
+#endif
+
+ inline bool DebuggerHasBreakPoints();
+
+#ifdef DEBUG
+ HistogramInfo* heap_histograms() { return heap_histograms_; }
+
+ JSObject::SpillInformation* js_spill_information() {
+ return &js_spill_information_;
+ }
+
+ int* code_kind_statistics() { return code_kind_statistics_; }
+#endif
+
+#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
+ defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
+ bool simulator_initialized() { return simulator_initialized_; }
+ void set_simulator_initialized(bool initialized) {
+ simulator_initialized_ = initialized;
+ }
+
+ HashMap* simulator_i_cache() { return simulator_i_cache_; }
+ void set_simulator_i_cache(HashMap* hash_map) {
+ simulator_i_cache_ = hash_map;
+ }
+
+ Redirection* simulator_redirection() {
+ return simulator_redirection_;
+ }
+ void set_simulator_redirection(Redirection* redirection) {
+ simulator_redirection_ = redirection;
+ }
+#endif
+
+ Factory* factory() { return reinterpret_cast<Factory*>(this); }
+
+ // SerializerDeserializer state.
+ static const int kPartialSnapshotCacheCapacity = 1400;
+
+ static const int kJSRegexpStaticOffsetsVectorSize = 50;
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Address external_callback() {
+ return thread_local_top_.external_callback_;
+ }
+ void set_external_callback(Address callback) {
+ thread_local_top_.external_callback_ = callback;
+ }
+#endif
+
+#ifdef ENABLE_VMSTATE_TRACKING
+ StateTag current_vm_state() {
+ return thread_local_top_.current_vm_state_;
+ }
+
+ void SetCurrentVMState(StateTag state) {
+ if (RuntimeProfiler::IsEnabled()) {
+ StateTag current_state = thread_local_top_.current_vm_state_;
+ if (current_state != JS && state == JS) {
+ // Non-JS -> JS transition.
+ RuntimeProfiler::IsolateEnteredJS(this);
+ } else if (current_state == JS && state != JS) {
+ // JS -> non-JS transition.
+ ASSERT(RuntimeProfiler::IsSomeIsolateInJS());
+ RuntimeProfiler::IsolateExitedJS(this);
+ } else {
+ // Other types of state transitions are not interesting to the
+ // runtime profiler, because they don't affect whether we're
+ // in JS or not.
+ ASSERT((current_state == JS) == (state == JS));
+ }
+ }
+ thread_local_top_.current_vm_state_ = state;
+ }
+#endif
+
+ void ResetEagerOptimizingData();
+
+ void SetData(void* data) { embedder_data_ = data; }
+ void* GetData() { return embedder_data_; }
+
+ private:
+ Isolate();
+
+ // The per-process lock should be acquired before the ThreadDataTable is
+ // modified.
+ class ThreadDataTable {
+ public:
+ ThreadDataTable();
+ ~ThreadDataTable();
+
+ PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
+ void Insert(PerIsolateThreadData* data);
+ void Remove(Isolate* isolate, ThreadId thread_id);
+ void Remove(PerIsolateThreadData* data);
+ void RemoveAllThreads(Isolate* isolate);
+
+ private:
+ PerIsolateThreadData* list_;
+ };
+
+ // These items form a stack synchronously with threads Enter'ing and Exit'ing
+ // the Isolate. The top of the stack points to a thread which is currently
+ // running the Isolate. When the stack is empty, the Isolate is considered
+ // not entered by any thread and can be Disposed.
+ // If the same thread enters the Isolate more then once, the entry_count_
+ // is incremented rather then a new item pushed to the stack.
+ class EntryStackItem {
+ public:
+ EntryStackItem(PerIsolateThreadData* previous_thread_data,
+ Isolate* previous_isolate,
+ EntryStackItem* previous_item)
+ : entry_count(1),
+ previous_thread_data(previous_thread_data),
+ previous_isolate(previous_isolate),
+ previous_item(previous_item) { }
+
+ int entry_count;
+ PerIsolateThreadData* previous_thread_data;
+ Isolate* previous_isolate;
+ EntryStackItem* previous_item;
+
+ DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
+ };
+
+ // This mutex protects highest_thread_id_, thread_data_table_ and
+ // default_isolate_.
+ static Mutex* process_wide_mutex_;
+
+ static Thread::LocalStorageKey per_isolate_thread_data_key_;
+ static Thread::LocalStorageKey isolate_key_;
+ static Thread::LocalStorageKey thread_id_key_;
+ static Isolate* default_isolate_;
+ static ThreadDataTable* thread_data_table_;
+
+ bool PreInit();
+
+ void Deinit();
+
+ static void SetIsolateThreadLocals(Isolate* isolate,
+ PerIsolateThreadData* data);
+
+ enum State {
+ UNINITIALIZED, // Some components may not have been allocated.
+ PREINITIALIZED, // Components have been allocated but not initialized.
+ INITIALIZED // All components are fully initialized.
+ };
+
+ State state_;
+ EntryStackItem* entry_stack_;
+
+ // Allocate and insert PerIsolateThreadData into the ThreadDataTable
+ // (regardless of whether such data already exists).
+ PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
+
+ // Find the PerThread for this particular (isolate, thread) combination.
+ // If one does not yet exist, allocate a new one.
+ PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
+
+// PreInits and returns a default isolate. Needed when a new thread tries
+ // to create a Locker for the first time (the lock itself is in the isolate).
+ static Isolate* GetDefaultIsolateForLocking();
+
+ // Initializes the current thread to run this Isolate.
+ // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
+ // at the same time, this should be prevented using external locking.
+ void Enter();
+
+ // Exits the current thread. The previosuly entered Isolate is restored
+ // for the thread.
+ // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
+ // at the same time, this should be prevented using external locking.
+ void Exit();
+
+ void PreallocatedMemoryThreadStart();
+ void PreallocatedMemoryThreadStop();
+ void InitializeThreadLocal();
+
+ void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
+ void MarkCompactPrologue(bool is_compacting,
+ ThreadLocalTop* archived_thread_data);
+ void MarkCompactEpilogue(bool is_compacting,
+ ThreadLocalTop* archived_thread_data);
+
+ void FillCache();
+
+ void PropagatePendingExceptionToExternalTryCatch();
+
+ int stack_trace_nesting_level_;
+ StringStream* incomplete_message_;
+ // The preallocated memory thread singleton.
+ PreallocatedMemoryThread* preallocated_memory_thread_;
+ Address isolate_addresses_[k_isolate_address_count + 1]; // NOLINT
+ NoAllocationStringAllocator* preallocated_message_space_;
+
+ Bootstrapper* bootstrapper_;
+ RuntimeProfiler* runtime_profiler_;
+ CompilationCache* compilation_cache_;
+ Counters* counters_;
+ CodeRange* code_range_;
+ Mutex* break_access_;
+ Heap heap_;
+ Logger* logger_;
+ StackGuard stack_guard_;
+ StatsTable* stats_table_;
+ StubCache* stub_cache_;
+ DeoptimizerData* deoptimizer_data_;
+ ThreadLocalTop thread_local_top_;
+ bool capture_stack_trace_for_uncaught_exceptions_;
+ int stack_trace_for_uncaught_exceptions_frame_limit_;
+ StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
+ TranscendentalCache* transcendental_cache_;
+ MemoryAllocator* memory_allocator_;
+ KeyedLookupCache* keyed_lookup_cache_;
+ ContextSlotCache* context_slot_cache_;
+ DescriptorLookupCache* descriptor_lookup_cache_;
+ v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
+ HandleScopeImplementer* handle_scope_implementer_;
+ UnicodeCache* unicode_cache_;
+ Zone zone_;
+ PreallocatedStorage in_use_list_;
+ PreallocatedStorage free_list_;
+ bool preallocated_storage_preallocated_;
+ PcToCodeCache* pc_to_code_cache_;
+ StringInputBuffer* write_input_buffer_;
+ GlobalHandles* global_handles_;
+ ContextSwitcher* context_switcher_;
+ ThreadManager* thread_manager_;
+ AstSentinels* ast_sentinels_;
+ RuntimeState runtime_state_;
+ StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
+ Builtins builtins_;
+ StringTracker* string_tracker_;
+ unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
+ unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
+ StringInputBuffer objects_string_compare_buffer_a_;
+ StringInputBuffer objects_string_compare_buffer_b_;
+ StaticResource<StringInputBuffer> objects_string_input_buffer_;
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>
+ regexp_macro_assembler_canonicalize_;
+ RegExpStack* regexp_stack_;
+ unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
+ void* embedder_data_;
+
+#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
+ defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
+ bool simulator_initialized_;
+ HashMap* simulator_i_cache_;
+ Redirection* simulator_redirection_;
+#endif
+
+#ifdef DEBUG
+ // A static array of histogram info for each type.
+ HistogramInfo heap_histograms_[LAST_TYPE + 1];
+ JSObject::SpillInformation js_spill_information_;
+ int code_kind_statistics_[Code::NUMBER_OF_KINDS];
+#endif
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debugger* debugger_;
+ Debug* debug_;
+#endif
+
+#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
+ type name##_;
+ ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
+#undef GLOBAL_BACKING_STORE
+
+#define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \
+ type name##_[length];
+ ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
+#undef GLOBAL_ARRAY_BACKING_STORE
+
+#ifdef DEBUG
+ // This class is huge and has a number of fields controlled by
+ // preprocessor defines. Make sure the offsets of these fields agree
+ // between compilation units.
+#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
+ static const intptr_t name##_debug_offset_;
+ ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
+ ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
+#undef ISOLATE_FIELD_OFFSET
+#endif
+
+ friend class ExecutionAccess;
+ friend class IsolateInitializer;
+ friend class ThreadManager;
+ friend class Simulator;
+ friend class StackGuard;
+ friend class ThreadId;
+ friend class v8::Isolate;
+ friend class v8::Locker;
+ friend class v8::Unlocker;
+
+ DISALLOW_COPY_AND_ASSIGN(Isolate);
+};
+
+
+// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
+// class as a work around for a bug in the generated code found with these
+// versions of GCC. See V8 issue 122 for details.
+class SaveContext BASE_EMBEDDED {
+ public:
+ explicit SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
+ if (isolate->context() != NULL) {
+ context_ = Handle<Context>(isolate->context());
+#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
+ dummy_ = Handle<Context>(isolate->context());
+#endif
+ }
+ isolate->set_save_context(this);
+
+ // If there is no JS frame under the current C frame, use the value 0.
+ JavaScriptFrameIterator it(isolate);
+ js_sp_ = it.done() ? 0 : it.frame()->sp();
+ }
+
+ ~SaveContext() {
+ if (context_.is_null()) {
+ Isolate* isolate = Isolate::Current();
+ isolate->set_context(NULL);
+ isolate->set_save_context(prev_);
+ } else {
+ Isolate* isolate = context_->GetIsolate();
+ isolate->set_context(*context_);
+ isolate->set_save_context(prev_);
+ }
+ }
+
+ Handle<Context> context() { return context_; }
+ SaveContext* prev() { return prev_; }
+
+ // Returns true if this save context is below a given JavaScript frame.
+ bool below(JavaScriptFrame* frame) {
+ return (js_sp_ == 0) || (frame->sp() < js_sp_);
+ }
+
+ private:
+ Handle<Context> context_;
+#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
+ Handle<Context> dummy_;
+#endif
+ SaveContext* prev_;
+ Address js_sp_; // The top JS frame's sp when saving context.
+};
+
+
+class AssertNoContextChange BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ AssertNoContextChange() :
+ scope_(Isolate::Current()),
+ context_(Isolate::Current()->context(), Isolate::Current()) {
+ }
+
+ ~AssertNoContextChange() {
+ ASSERT(Isolate::Current()->context() == *context_);
+ }
+
+ private:
+ HandleScope scope_;
+ Handle<Context> context_;
+#else
+ public:
+ AssertNoContextChange() { }
+#endif
+};
+
+
+class ExecutionAccess BASE_EMBEDDED {
+ public:
+ explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
+ Lock(isolate);
+ }
+ ~ExecutionAccess() { Unlock(isolate_); }
+
+ static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
+ static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
+
+ static bool TryLock(Isolate* isolate) {
+ return isolate->break_access_->TryLock();
+ }
+
+ private:
+ Isolate* isolate_;
+};
+
+
+// Support for checking for stack-overflows in C++ code.
+class StackLimitCheck BASE_EMBEDDED {
+ public:
+ explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
+
+ bool HasOverflowed() const {
+ StackGuard* stack_guard = isolate_->stack_guard();
+ // Stack has overflowed in C++ code only if stack pointer exceeds the C++
+ // stack guard and the limits are not set to interrupt values.
+ // TODO(214): Stack overflows are ignored if a interrupt is pending. This
+ // code should probably always use the initial C++ limit.
+ return (reinterpret_cast<uintptr_t>(this) < stack_guard->climit()) &&
+ stack_guard->IsStackOverflow();
+ }
+ private:
+ Isolate* isolate_;
+};
+
+
+// Support for temporarily postponing interrupts. When the outermost
+// postpone scope is left the interrupts will be re-enabled and any
+// interrupts that occurred while in the scope will be taken into
+// account.
+class PostponeInterruptsScope BASE_EMBEDDED {
+ public:
+ explicit PostponeInterruptsScope(Isolate* isolate)
+ : stack_guard_(isolate->stack_guard()) {
+ stack_guard_->thread_local_.postpone_interrupts_nesting_++;
+ stack_guard_->DisableInterrupts();
+ }
+
+ ~PostponeInterruptsScope() {
+ if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
+ stack_guard_->EnableInterrupts();
+ }
+ }
+ private:
+ StackGuard* stack_guard_;
+};
+
+
+// Temporary macros for accessing current isolate and its subobjects.
+// They provide better readability, especially when used a lot in the code.
+#define HEAP (v8::internal::Isolate::Current()->heap())
+#define FACTORY (v8::internal::Isolate::Current()->factory())
+#define ISOLATE (v8::internal::Isolate::Current())
+#define ZONE (v8::internal::Isolate::Current()->zone())
+#define LOGGER (v8::internal::Isolate::Current()->logger())
+
+
+// Tells whether the global context is marked with out of memory.
+inline bool Context::has_out_of_memory() {
+ return global_context()->out_of_memory()->IsTrue();
+}
+
+
+// Mark the global context with out of memory.
+inline void Context::mark_out_of_memory() {
+ global_context()->set_out_of_memory(HEAP->true_value());
+}
+
+
+} } // namespace v8::internal
+
+// TODO(isolates): Get rid of these -inl.h includes and place them only where
+// they're needed.
+#include "allocation-inl.h"
+#include "zone-inl.h"
+#include "frames-inl.h"
+
+#endif // V8_ISOLATE_H_
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
new file mode 100644
index 000000000..1f30170a1
--- /dev/null
+++ b/deps/v8/src/json-parser.h
@@ -0,0 +1,598 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JSON_PARSER_H_
+#define V8_JSON_PARSER_H_
+
+#include "v8.h"
+
+#include "char-predicates-inl.h"
+#include "conversions.h"
+#include "messages.h"
+#include "spaces-inl.h"
+#include "token.h"
+
+namespace v8 {
+namespace internal {
+
+// A simple json parser.
+template <bool seq_ascii>
+class JsonParser BASE_EMBEDDED {
+ public:
+ static Handle<Object> Parse(Handle<String> source) {
+ return JsonParser().ParseJson(source);
+ }
+
+ static const int kEndOfString = -1;
+
+ private:
+ // Parse a string containing a single JSON value.
+ Handle<Object> ParseJson(Handle<String> source);
+
+ inline void Advance() {
+ position_++;
+ if (position_ >= source_length_) {
+ c0_ = kEndOfString;
+ } else if (seq_ascii) {
+ c0_ = seq_source_->SeqAsciiStringGet(position_);
+ } else {
+ c0_ = source_->Get(position_);
+ }
+ }
+
+ // The JSON lexical grammar is specified in the ECMAScript 5 standard,
+ // section 15.12.1.1. The only allowed whitespace characters between tokens
+ // are tab, carriage-return, newline and space.
+
+ inline void AdvanceSkipWhitespace() {
+ do {
+ Advance();
+ } while (c0_ == '\t' || c0_ == '\r' || c0_ == '\n' || c0_ == ' ');
+ }
+
+ inline void SkipWhitespace() {
+ while (c0_ == '\t' || c0_ == '\r' || c0_ == '\n' || c0_ == ' ') {
+ Advance();
+ }
+ }
+
+ inline uc32 AdvanceGetChar() {
+ Advance();
+ return c0_;
+ }
+
+ // Checks that current charater is c.
+ // If so, then consume c and skip whitespace.
+ inline bool MatchSkipWhiteSpace(uc32 c) {
+ if (c0_ == c) {
+ AdvanceSkipWhitespace();
+ return true;
+ }
+ return false;
+ }
+
+ // A JSON string (production JSONString) is subset of valid JavaScript string
+ // literals. The string must only be double-quoted (not single-quoted), and
+ // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
+ // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
+ Handle<String> ParseJsonString() {
+ return ScanJsonString<false>();
+ }
+ Handle<String> ParseJsonSymbol() {
+ return ScanJsonString<true>();
+ }
+ template <bool is_symbol>
+ Handle<String> ScanJsonString();
+ // Creates a new string and copies prefix[start..end] into the beginning
+ // of it. Then scans the rest of the string, adding characters after the
+ // prefix. Called by ScanJsonString when reaching a '\' or non-ASCII char.
+ template <typename StringType, typename SinkChar>
+ Handle<String> SlowScanJsonString(Handle<String> prefix, int start, int end);
+
+ // A JSON number (production JSONNumber) is a subset of the valid JavaScript
+ // decimal number literals.
+ // It includes an optional minus sign, must have at least one
+ // digit before and after a decimal point, may not have prefixed zeros (unless
+ // the integer part is zero), and may include an exponent part (e.g., "e-10").
+ // Hexadecimal and octal numbers are not allowed.
+ Handle<Object> ParseJsonNumber();
+
+ // Parse a single JSON value from input (grammar production JSONValue).
+ // A JSON value is either a (double-quoted) string literal, a number literal,
+ // one of "true", "false", or "null", or an object or array literal.
+ Handle<Object> ParseJsonValue();
+
+ // Parse a JSON object literal (grammar production JSONObject).
+ // An object literal is a squiggly-braced and comma separated sequence
+ // (possibly empty) of key/value pairs, where the key is a JSON string
+ // literal, the value is a JSON value, and the two are separated by a colon.
+ // A JSON array dosn't allow numbers and identifiers as keys, like a
+ // JavaScript array.
+ Handle<Object> ParseJsonObject();
+
+ // Parses a JSON array literal (grammar production JSONArray). An array
+ // literal is a square-bracketed and comma separated sequence (possibly empty)
+ // of JSON values.
+ // A JSON array doesn't allow leaving out values from the sequence, nor does
+ // it allow a terminal comma, like a JavaScript array does.
+ Handle<Object> ParseJsonArray();
+
+
+ // Mark that a parsing error has happened at the current token, and
+ // return a null handle. Primarily for readability.
+ inline Handle<Object> ReportUnexpectedCharacter() {
+ return Handle<Object>::null();
+ }
+
+ inline Isolate* isolate() { return isolate_; }
+
+ static const int kInitialSpecialStringLength = 1024;
+
+
+ private:
+ Handle<String> source_;
+ int source_length_;
+ Handle<SeqAsciiString> seq_source_;
+
+ Isolate* isolate_;
+ uc32 c0_;
+ int position_;
+};
+
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
+ isolate_ = source->map()->isolate();
+ source_ = Handle<String>(source->TryFlattenGetString());
+ source_length_ = source_->length();
+
+ // Optimized fast case where we only have ASCII characters.
+ if (seq_ascii) {
+ seq_source_ = Handle<SeqAsciiString>::cast(source_);
+ }
+
+ // Set initial position right before the string.
+ position_ = -1;
+ // Advance to the first character (posibly EOS)
+ AdvanceSkipWhitespace();
+ Handle<Object> result = ParseJsonValue();
+ if (result.is_null() || c0_ != kEndOfString) {
+ // Parse failed. Current character is the unexpected token.
+
+ const char* message;
+ Factory* factory = isolate()->factory();
+ Handle<JSArray> array;
+
+ switch (c0_) {
+ case kEndOfString:
+ message = "unexpected_eos";
+ array = factory->NewJSArray(0);
+ break;
+ case '-':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ message = "unexpected_token_number";
+ array = factory->NewJSArray(0);
+ break;
+ case '"':
+ message = "unexpected_token_string";
+ array = factory->NewJSArray(0);
+ break;
+ default:
+ message = "unexpected_token";
+ Handle<Object> name = LookupSingleCharacterStringFromCode(c0_);
+ Handle<FixedArray> element = factory->NewFixedArray(1);
+ element->set(0, *name);
+ array = factory->NewJSArrayWithElements(element);
+ break;
+ }
+
+ MessageLocation location(factory->NewScript(source),
+ position_,
+ position_ + 1);
+ Handle<Object> result = factory->NewSyntaxError(message, array);
+ isolate()->Throw(*result, &location);
+ return Handle<Object>::null();
+ }
+ return result;
+}
+
+
+// Parse any JSON value.
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
+ switch (c0_) {
+ case '"':
+ return ParseJsonString();
+ case '-':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ return ParseJsonNumber();
+ case 'f':
+ if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
+ AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
+ AdvanceSkipWhitespace();
+ return isolate()->factory()->false_value();
+ } else {
+ return ReportUnexpectedCharacter();
+ }
+ case 't':
+ if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
+ AdvanceGetChar() == 'e') {
+ AdvanceSkipWhitespace();
+ return isolate()->factory()->true_value();
+ } else {
+ return ReportUnexpectedCharacter();
+ }
+ case 'n':
+ if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
+ AdvanceGetChar() == 'l') {
+ AdvanceSkipWhitespace();
+ return isolate()->factory()->null_value();
+ } else {
+ return ReportUnexpectedCharacter();
+ }
+ case '{':
+ return ParseJsonObject();
+ case '[':
+ return ParseJsonArray();
+ default:
+ return ReportUnexpectedCharacter();
+ }
+}
+
+
+// Parse a JSON object. Position must be right at '{'.
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
+ Handle<JSFunction> object_constructor(
+ isolate()->global_context()->object_function());
+ Handle<JSObject> json_object =
+ isolate()->factory()->NewJSObject(object_constructor);
+ ASSERT_EQ(c0_, '{');
+
+ AdvanceSkipWhitespace();
+ if (c0_ != '}') {
+ do {
+ if (c0_ != '"') return ReportUnexpectedCharacter();
+ Handle<String> key = ParseJsonSymbol();
+ if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
+ AdvanceSkipWhitespace();
+ Handle<Object> value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
+
+ uint32_t index;
+ if (key->AsArrayIndex(&index)) {
+ SetOwnElement(json_object, index, value, kNonStrictMode);
+ } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
+ SetPrototype(json_object, value);
+ } else {
+ SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
+ }
+ } while (MatchSkipWhiteSpace(','));
+ if (c0_ != '}') {
+ return ReportUnexpectedCharacter();
+ }
+ }
+ AdvanceSkipWhitespace();
+ return json_object;
+}
+
+// Parse a JSON array. Position must be right at '['.
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
+ ZoneScope zone_scope(isolate(), DELETE_ON_EXIT);
+ ZoneList<Handle<Object> > elements(4);
+ ASSERT_EQ(c0_, '[');
+
+ AdvanceSkipWhitespace();
+ if (c0_ != ']') {
+ do {
+ Handle<Object> element = ParseJsonValue();
+ if (element.is_null()) return ReportUnexpectedCharacter();
+ elements.Add(element);
+ } while (MatchSkipWhiteSpace(','));
+ if (c0_ != ']') {
+ return ReportUnexpectedCharacter();
+ }
+ }
+ AdvanceSkipWhitespace();
+ // Allocate a fixed array with all the elements.
+ Handle<FixedArray> fast_elements =
+ isolate()->factory()->NewFixedArray(elements.length());
+ for (int i = 0, n = elements.length(); i < n; i++) {
+ fast_elements->set(i, *elements[i]);
+ }
+ return isolate()->factory()->NewJSArrayWithElements(fast_elements);
+}
+
+
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() {
+ bool negative = false;
+ int beg_pos = position_;
+ if (c0_ == '-') {
+ Advance();
+ negative = true;
+ }
+ if (c0_ == '0') {
+ Advance();
+ // Prefix zero is only allowed if it's the only digit before
+ // a decimal point or exponent.
+ if ('0' <= c0_ && c0_ <= '9') return ReportUnexpectedCharacter();
+ } else {
+ int i = 0;
+ int digits = 0;
+ if (c0_ < '1' || c0_ > '9') return ReportUnexpectedCharacter();
+ do {
+ i = i * 10 + c0_ - '0';
+ digits++;
+ Advance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
+ SkipWhitespace();
+ return Handle<Smi>(Smi::FromInt((negative ? -i : i)), isolate());
+ }
+ }
+ if (c0_ == '.') {
+ Advance();
+ if (c0_ < '0' || c0_ > '9') return ReportUnexpectedCharacter();
+ do {
+ Advance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ }
+ if (AsciiAlphaToLower(c0_) == 'e') {
+ Advance();
+ if (c0_ == '-' || c0_ == '+') Advance();
+ if (c0_ < '0' || c0_ > '9') return ReportUnexpectedCharacter();
+ do {
+ Advance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ }
+ int length = position_ - beg_pos;
+ double number;
+ if (seq_ascii) {
+ Vector<const char> chars(seq_source_->GetChars() + beg_pos, length);
+ number = StringToDouble(isolate()->unicode_cache(),
+ chars,
+ NO_FLAGS, // Hex, octal or trailing junk.
+ OS::nan_value());
+ } else {
+ Vector<char> buffer = Vector<char>::New(length);
+ String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
+ Vector<const char> result =
+ Vector<const char>(reinterpret_cast<const char*>(buffer.start()),
+ length);
+ number = StringToDouble(isolate()->unicode_cache(),
+ result,
+ NO_FLAGS, // Hex, octal or trailing junk.
+ 0.0);
+ buffer.Dispose();
+ }
+ SkipWhitespace();
+ return isolate()->factory()->NewNumber(number);
+}
+
+
+template <typename StringType>
+inline void SeqStringSet(Handle<StringType> seq_str, int i, uc32 c);
+
+template <>
+inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) {
+ seq_str->SeqTwoByteStringSet(i, c);
+}
+
+template <>
+inline void SeqStringSet(Handle<SeqAsciiString> seq_str, int i, uc32 c) {
+ seq_str->SeqAsciiStringSet(i, c);
+}
+
+template <typename StringType>
+inline Handle<StringType> NewRawString(Factory* factory, int length);
+
+template <>
+inline Handle<SeqTwoByteString> NewRawString(Factory* factory, int length) {
+ return factory->NewRawTwoByteString(length, NOT_TENURED);
+}
+
+template <>
+inline Handle<SeqAsciiString> NewRawString(Factory* factory, int length) {
+ return factory->NewRawAsciiString(length, NOT_TENURED);
+}
+
+
+// Scans the rest of a JSON string starting from position_ and writes
+// prefix[start..end] along with the scanned characters into a
+// sequential string of type StringType.
+template <bool seq_ascii>
+template <typename StringType, typename SinkChar>
+Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
+ Handle<String> prefix, int start, int end) {
+ int count = end - start;
+ int max_length = count + source_length_ - position_;
+ int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
+ Handle<StringType> seq_str = NewRawString<StringType>(isolate()->factory(),
+ length);
+ // Copy prefix into seq_str.
+ SinkChar* dest = seq_str->GetChars();
+ String::WriteToFlat(*prefix, dest, start, end);
+
+ while (c0_ != '"') {
+ if (count >= length) {
+ // We need to create a longer sequential string for the result.
+ return SlowScanJsonString<StringType, SinkChar>(seq_str, 0, count);
+ }
+ // Check for control character (0x00-0x1f) or unterminated string (<0).
+ if (c0_ < 0x20) return Handle<String>::null();
+ if (c0_ != '\\') {
+ // If the sink can contain UC16 characters, or source_ contains only
+ // ASCII characters, there's no need to test whether we can store the
+ // character. Otherwise check whether the UC16 source character can fit
+ // in the ASCII sink.
+ if (sizeof(SinkChar) == kUC16Size ||
+ seq_ascii ||
+ c0_ <= kMaxAsciiCharCode) {
+ SeqStringSet(seq_str, count++, c0_);
+ Advance();
+ } else {
+ // StringType is SeqAsciiString and we just read a non-ASCII char.
+ return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str, 0, count);
+ }
+ } else {
+ Advance(); // Advance past the \.
+ switch (c0_) {
+ case '"':
+ case '\\':
+ case '/':
+ SeqStringSet(seq_str, count++, c0_);
+ break;
+ case 'b':
+ SeqStringSet(seq_str, count++, '\x08');
+ break;
+ case 'f':
+ SeqStringSet(seq_str, count++, '\x0c');
+ break;
+ case 'n':
+ SeqStringSet(seq_str, count++, '\x0a');
+ break;
+ case 'r':
+ SeqStringSet(seq_str, count++, '\x0d');
+ break;
+ case 't':
+ SeqStringSet(seq_str, count++, '\x09');
+ break;
+ case 'u': {
+ uc32 value = 0;
+ for (int i = 0; i < 4; i++) {
+ Advance();
+ int digit = HexValue(c0_);
+ if (digit < 0) {
+ return Handle<String>::null();
+ }
+ value = value * 16 + digit;
+ }
+ if (sizeof(SinkChar) == kUC16Size || value <= kMaxAsciiCharCode) {
+ SeqStringSet(seq_str, count++, value);
+ break;
+ } else {
+ // StringType is SeqAsciiString and we just read a non-ASCII char.
+ position_ -= 6; // Rewind position_ to \ in \uxxxx.
+ Advance();
+ return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str,
+ 0,
+ count);
+ }
+ }
+ default:
+ return Handle<String>::null();
+ }
+ Advance();
+ }
+ }
+ // Shrink seq_string length to count.
+ if (isolate()->heap()->InNewSpace(*seq_str)) {
+ isolate()->heap()->new_space()->
+ template ShrinkStringAtAllocationBoundary<StringType>(
+ *seq_str, count);
+ } else {
+ int string_size = StringType::SizeFor(count);
+ int allocated_string_size = StringType::SizeFor(length);
+ int delta = allocated_string_size - string_size;
+ Address start_filler_object = seq_str->address() + string_size;
+ seq_str->set_length(count);
+ isolate()->heap()->CreateFillerObjectAt(start_filler_object, delta);
+ }
+ ASSERT_EQ('"', c0_);
+ // Advance past the last '"'.
+ AdvanceSkipWhitespace();
+ return seq_str;
+}
+
+
+template <bool seq_ascii>
+template <bool is_symbol>
+Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
+ ASSERT_EQ('"', c0_);
+ Advance();
+ if (c0_ == '"') {
+ AdvanceSkipWhitespace();
+ return Handle<String>(isolate()->heap()->empty_string());
+ }
+ int beg_pos = position_;
+ // Fast case for ASCII only without escape characters.
+ do {
+ // Check for control character (0x00-0x1f) or unterminated string (<0).
+ if (c0_ < 0x20) return Handle<String>::null();
+ if (c0_ != '\\') {
+ if (seq_ascii || c0_ <= kMaxAsciiCharCode) {
+ Advance();
+ } else {
+ return SlowScanJsonString<SeqTwoByteString, uc16>(source_,
+ beg_pos,
+ position_);
+ }
+ } else {
+ return SlowScanJsonString<SeqAsciiString, char>(source_,
+ beg_pos,
+ position_);
+ }
+ } while (c0_ != '"');
+ int length = position_ - beg_pos;
+ Handle<String> result;
+ if (seq_ascii && is_symbol) {
+ result = isolate()->factory()->LookupAsciiSymbol(seq_source_,
+ beg_pos,
+ length);
+ } else {
+ result = isolate()->factory()->NewRawAsciiString(length);
+ char* dest = SeqAsciiString::cast(*result)->GetChars();
+ String::WriteToFlat(*source_, dest, beg_pos, position_);
+ }
+ ASSERT_EQ('"', c0_);
+ // Advance past the last '"'.
+ AdvanceSkipWhitespace();
+ return result;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_JSON_PARSER_H_
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index e6ada51b4..6c984a157 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -49,7 +49,7 @@ function Revive(holder, name, reviver) {
}
}
}
- return reviver.call(holder, name, val);
+ return %_CallFunction(holder, name, val, reviver);
}
function JSONParse(text, reviver) {
@@ -63,11 +63,11 @@ function JSONParse(text, reviver) {
function SerializeArray(value, replacer, stack, indent, gap) {
if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', []);
+ throw MakeTypeError('circular_structure', $Array());
}
var stepback = indent;
indent += gap;
- var partial = [];
+ var partial = new InternalArray();
var len = value.length;
for (var i = 0; i < len; i++) {
var strP = JSONSerialize($String(i), value, replacer, stack,
@@ -93,11 +93,11 @@ function SerializeArray(value, replacer, stack, indent, gap) {
function SerializeObject(value, replacer, stack, indent, gap) {
if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', []);
+ throw MakeTypeError('circular_structure', $Array());
}
var stepback = indent;
indent += gap;
- var partial = [];
+ var partial = new InternalArray();
if (IS_ARRAY(replacer)) {
var length = replacer.length;
for (var i = 0; i < length; i++) {
@@ -153,7 +153,7 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
if (IS_STRING(value)) {
return %QuoteJSONString(value);
} else if (IS_NUMBER(value)) {
- return NUMBER_IS_FINITE(value) ? $String(value) : "null";
+ return JSON_NUMBER_TO_STRING(value);
} else if (IS_BOOLEAN(value)) {
return value ? "true" : "false";
} else if (IS_NULL(value)) {
@@ -164,7 +164,7 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
return SerializeArray(value, replacer, stack, indent, gap);
} else if (IS_NUMBER_WRAPPER(value)) {
value = ToNumber(value);
- return NUMBER_IS_FINITE(value) ? ToString(value) : "null";
+ return JSON_NUMBER_TO_STRING(value);
} else if (IS_STRING_WRAPPER(value)) {
return %QuoteJSONString(ToString(value));
} else if (IS_BOOLEAN_WRAPPER(value)) {
@@ -185,37 +185,43 @@ function BasicSerializeArray(value, stack, builder) {
return;
}
if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', []);
+ throw MakeTypeError('circular_structure', $Array());
}
builder.push("[");
var val = value[0];
if (IS_STRING(val)) {
// First entry is a string. Remaining entries are likely to be strings too.
- builder.push(%QuoteJSONString(val));
- for (var i = 1; i < len; i++) {
- val = value[i];
- if (IS_STRING(val)) {
- builder.push(%QuoteJSONStringComma(val));
- } else {
- builder.push(",");
- var before = builder.length;
- BasicJSONSerialize(i, value[i], stack, builder);
- if (before == builder.length) builder[before - 1] = ",null";
+ var array_string = %QuoteJSONStringArray(value);
+ if (!IS_UNDEFINED(array_string)) {
+ // array_string also includes bracket characters so we are done.
+ builder[builder.length - 1] = array_string;
+ stack.pop();
+ return;
+ } else {
+ builder.push(%QuoteJSONString(val));
+ for (var i = 1; i < len; i++) {
+ val = value[i];
+ if (IS_STRING(val)) {
+ builder.push(%QuoteJSONStringComma(val));
+ } else {
+ builder.push(",");
+ var before = builder.length;
+ BasicJSONSerialize(i, val, stack, builder);
+ if (before == builder.length) builder[before - 1] = ",null";
+ }
}
}
} else if (IS_NUMBER(val)) {
// First entry is a number. Remaining entries are likely to be numbers too.
- builder.push(NUMBER_IS_FINITE(val) ? %_NumberToString(val) : "null");
+ builder.push(JSON_NUMBER_TO_STRING(val));
for (var i = 1; i < len; i++) {
builder.push(",");
val = value[i];
if (IS_NUMBER(val)) {
- builder.push(NUMBER_IS_FINITE(val)
- ? %_NumberToString(val)
- : "null");
+ builder.push(JSON_NUMBER_TO_STRING(val));
} else {
var before = builder.length;
- BasicJSONSerialize(i, value[i], stack, builder);
+ BasicJSONSerialize(i, val, stack, builder);
if (before == builder.length) builder[before - 1] = ",null";
}
}
@@ -226,8 +232,7 @@ function BasicSerializeArray(value, stack, builder) {
for (var i = 1; i < len; i++) {
builder.push(",");
before = builder.length;
- val = value[i];
- BasicJSONSerialize(i, val, stack, builder);
+ BasicJSONSerialize(i, value[i], stack, builder);
if (before == builder.length) builder[before - 1] = ",null";
}
}
@@ -238,7 +243,7 @@ function BasicSerializeArray(value, stack, builder) {
function BasicSerializeObject(value, stack, builder) {
if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', []);
+ throw MakeTypeError('circular_structure', $Array());
}
builder.push("{");
var first = true;
@@ -273,9 +278,9 @@ function BasicJSONSerialize(key, value, stack, builder) {
}
}
if (IS_STRING(value)) {
- builder.push(%QuoteJSONString(value));
+ builder.push(value !== "" ? %QuoteJSONString(value) : '""');
} else if (IS_NUMBER(value)) {
- builder.push(NUMBER_IS_FINITE(value) ? %_NumberToString(value) : "null");
+ builder.push(JSON_NUMBER_TO_STRING(value));
} else if (IS_BOOLEAN(value)) {
builder.push(value ? "true" : "false");
} else if (IS_NULL(value)) {
@@ -285,7 +290,7 @@ function BasicJSONSerialize(key, value, stack, builder) {
// Unwrap value if necessary
if (IS_NUMBER_WRAPPER(value)) {
value = ToNumber(value);
- builder.push(NUMBER_IS_FINITE(value) ? %_NumberToString(value) : "null");
+ builder.push(JSON_NUMBER_TO_STRING(value));
} else if (IS_STRING_WRAPPER(value)) {
builder.push(%QuoteJSONString(ToString(value)));
} else if (IS_BOOLEAN_WRAPPER(value)) {
@@ -301,8 +306,8 @@ function BasicJSONSerialize(key, value, stack, builder) {
function JSONStringify(value, replacer, space) {
if (%_ArgumentsLength() == 1) {
- var builder = [];
- BasicJSONSerialize('', value, [], builder);
+ var builder = new InternalArray();
+ BasicJSONSerialize('', value, new InternalArray(), builder);
if (builder.length == 0) return;
var result = %_FastAsciiArrayJoin(builder, "");
if (!IS_UNDEFINED(result)) return result;
@@ -329,7 +334,7 @@ function JSONStringify(value, replacer, space) {
} else {
gap = "";
}
- return JSONSerialize('', {'': value}, replacer, [], "", gap);
+ return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
}
function SetupJSON() {
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 8e7c35f58..73dbdb0ce 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -35,7 +35,6 @@
#include "platform.h"
#include "string-search.h"
#include "runtime.h"
-#include "top.h"
#include "compilation-cache.h"
#include "string-stream.h"
#include "parser.h"
@@ -51,6 +50,8 @@
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/regexp-macro-assembler-mips.h"
#else
#error Unsupported target architecture.
#endif
@@ -62,7 +63,6 @@
namespace v8 {
namespace internal {
-
Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor,
Handle<String> pattern,
Handle<String> flags,
@@ -97,11 +97,14 @@ static inline void ThrowRegExpException(Handle<JSRegExp> re,
Handle<String> pattern,
Handle<String> error_text,
const char* message) {
- Handle<JSArray> array = Factory::NewJSArray(2);
- SetElement(array, 0, pattern);
- SetElement(array, 1, error_text);
- Handle<Object> regexp_err = Factory::NewSyntaxError(message, array);
- Top::Throw(*regexp_err);
+ Isolate* isolate = re->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(2);
+ elements->set(0, *pattern);
+ elements->set(1, *error_text);
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> regexp_err = factory->NewSyntaxError(message, array);
+ isolate->Throw(*regexp_err);
}
@@ -111,10 +114,12 @@ static inline void ThrowRegExpException(Handle<JSRegExp> re,
Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
Handle<String> flag_str) {
+ Isolate* isolate = re->GetIsolate();
JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
- Handle<FixedArray> cached = CompilationCache::LookupRegExp(pattern, flags);
+ CompilationCache* compilation_cache = isolate->compilation_cache();
+ Handle<FixedArray> cached = compilation_cache->LookupRegExp(pattern, flags);
bool in_cache = !cached.is_null();
- LOG(RegExpCompileEvent(re, in_cache));
+ LOG(isolate, RegExpCompileEvent(re, in_cache));
Handle<Object> result;
if (in_cache) {
@@ -122,10 +127,10 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
return re;
}
pattern = FlattenGetString(pattern);
- CompilationZoneScope zone_scope(DELETE_ON_EXIT);
- PostponeInterruptsScope postpone;
+ ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ PostponeInterruptsScope postpone(isolate);
RegExpCompileData parse_result;
- FlatStringReader reader(pattern);
+ FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
&parse_result)) {
// Throw an exception if we fail to parse the pattern.
@@ -144,7 +149,8 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
parse_result.capture_count == 0) {
RegExpAtom* atom = parse_result.tree->AsAtom();
Vector<const uc16> atom_pattern = atom->data();
- Handle<String> atom_string = Factory::NewStringFromTwoByte(atom_pattern);
+ Handle<String> atom_string =
+ isolate->factory()->NewStringFromTwoByte(atom_pattern);
AtomCompile(re, pattern, flags, atom_string);
} else {
IrregexpInitialize(re, pattern, flags, parse_result.capture_count);
@@ -153,7 +159,7 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
// Compilation succeeded so the data is set on the regexp
// and we can store it in the cache.
Handle<FixedArray> data(FixedArray::cast(re->data()));
- CompilationCache::PutRegExp(pattern, flags, data);
+ compilation_cache->PutRegExp(pattern, flags, data);
return re;
}
@@ -169,7 +175,7 @@ Handle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
case JSRegExp::IRREGEXP: {
Handle<Object> result =
IrregexpExec(regexp, subject, index, last_match_info);
- ASSERT(!result.is_null() || Top::has_pending_exception());
+ ASSERT(!result.is_null() || Isolate::Current()->has_pending_exception());
return result;
}
default:
@@ -186,11 +192,11 @@ void RegExpImpl::AtomCompile(Handle<JSRegExp> re,
Handle<String> pattern,
JSRegExp::Flags flags,
Handle<String> match_pattern) {
- Factory::SetRegExpAtomData(re,
- JSRegExp::ATOM,
- pattern,
- flags,
- match_pattern);
+ re->GetIsolate()->factory()->SetRegExpAtomData(re,
+ JSRegExp::ATOM,
+ pattern,
+ flags,
+ match_pattern);
}
@@ -223,6 +229,8 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
Handle<String> subject,
int index,
Handle<JSArray> last_match_info) {
+ Isolate* isolate = re->GetIsolate();
+
ASSERT(0 <= index);
ASSERT(index <= subject->length());
@@ -236,24 +244,30 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
int needle_len = needle->length();
if (needle_len != 0) {
- if (index + needle_len > subject->length()) return Factory::null_value();
+ if (index + needle_len > subject->length())
+ return isolate->factory()->null_value();
+
// dispatch on type of strings
index = (needle->IsAsciiRepresentation()
? (seq_sub->IsAsciiRepresentation()
- ? SearchString(seq_sub->ToAsciiVector(),
+ ? SearchString(isolate,
+ seq_sub->ToAsciiVector(),
needle->ToAsciiVector(),
index)
- : SearchString(seq_sub->ToUC16Vector(),
+ : SearchString(isolate,
+ seq_sub->ToUC16Vector(),
needle->ToAsciiVector(),
index))
: (seq_sub->IsAsciiRepresentation()
- ? SearchString(seq_sub->ToAsciiVector(),
+ ? SearchString(isolate,
+ seq_sub->ToAsciiVector(),
needle->ToUC16Vector(),
index)
- : SearchString(seq_sub->ToUC16Vector(),
+ : SearchString(isolate,
+ seq_sub->ToUC16Vector(),
needle->ToUC16Vector(),
index)));
- if (index == -1) return Factory::null_value();
+ if (index == -1) return FACTORY->null_value();
}
ASSERT(last_match_info->HasFastElements());
@@ -287,13 +301,14 @@ bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) {
bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
// Compile the RegExp.
- CompilationZoneScope zone_scope(DELETE_ON_EXIT);
- PostponeInterruptsScope postpone;
+ Isolate* isolate = re->GetIsolate();
+ ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ PostponeInterruptsScope postpone(isolate);
Object* entry = re->DataAt(JSRegExp::code_index(is_ascii));
if (entry->IsJSObject()) {
// If it's a JSObject, a previous compilation failed and threw this object.
// Re-throw the object without trying again.
- Top::Throw(entry);
+ isolate->Throw(entry);
return false;
}
ASSERT(entry->IsTheHole());
@@ -306,7 +321,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
}
RegExpCompileData compile_data;
- FlatStringReader reader(pattern);
+ FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
&compile_data)) {
// Throw an exception if we fail to parse the pattern.
@@ -325,14 +340,16 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
is_ascii);
if (result.error_message != NULL) {
// Unable to compile regexp.
- Handle<JSArray> array = Factory::NewJSArray(2);
- SetElement(array, 0, pattern);
- SetElement(array,
- 1,
- Factory::NewStringFromUtf8(CStrVector(result.error_message)));
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(2);
+ elements->set(0, *pattern);
+ Handle<String> error_message =
+ factory->NewStringFromUtf8(CStrVector(result.error_message));
+ elements->set(1, *error_message);
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
Handle<Object> regexp_err =
- Factory::NewSyntaxError("malformed_regexp", array);
- Top::Throw(*regexp_err);
+ factory->NewSyntaxError("malformed_regexp", array);
+ isolate->Throw(*regexp_err);
re->SetDataAt(JSRegExp::code_index(is_ascii), *regexp_err);
return false;
}
@@ -384,11 +401,11 @@ void RegExpImpl::IrregexpInitialize(Handle<JSRegExp> re,
JSRegExp::Flags flags,
int capture_count) {
// Initialize compiled code entries to null.
- Factory::SetRegExpIrregexpData(re,
- JSRegExp::IRREGEXP,
- pattern,
- flags,
- capture_count);
+ re->GetIsolate()->factory()->SetRegExpIrregexpData(re,
+ JSRegExp::IRREGEXP,
+ pattern,
+ flags,
+ capture_count);
}
@@ -426,7 +443,9 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
Handle<String> subject,
int index,
Vector<int> output) {
- Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()));
+ Isolate* isolate = regexp->GetIsolate();
+
+ Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
ASSERT(index >= 0);
ASSERT(index <= subject->length());
@@ -434,24 +453,24 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
// A flat ASCII string might have a two-byte first part.
if (subject->IsConsString()) {
- subject = Handle<String>(ConsString::cast(*subject)->first());
+ subject = Handle<String>(ConsString::cast(*subject)->first(), isolate);
}
#ifndef V8_INTERPRETED_REGEXP
- ASSERT(output.length() >=
- (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
+ ASSERT(output.length() >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
do {
bool is_ascii = subject->IsAsciiRepresentation();
- Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii));
+ Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
NativeRegExpMacroAssembler::Result res =
NativeRegExpMacroAssembler::Match(code,
subject,
output.start(),
output.length(),
- index);
+ index,
+ isolate);
if (res != NativeRegExpMacroAssembler::RETRY) {
ASSERT(res != NativeRegExpMacroAssembler::EXCEPTION ||
- Top::has_pending_exception());
+ isolate->has_pending_exception());
STATIC_ASSERT(
static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) == RE_SUCCESS);
STATIC_ASSERT(
@@ -482,9 +501,10 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
for (int i = number_of_capture_registers - 1; i >= 0; i--) {
register_vector[i] = -1;
}
- Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii));
+ Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
- if (IrregexpInterpreter::Match(byte_codes,
+ if (IrregexpInterpreter::Match(isolate,
+ byte_codes,
subject,
register_vector,
index)) {
@@ -514,7 +534,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
int required_registers = RegExpImpl::IrregexpPrepare(jsregexp, subject);
if (required_registers < 0) {
// Compiling failed with an exception.
- ASSERT(Top::has_pending_exception());
+ ASSERT(Isolate::Current()->has_pending_exception());
return Handle<Object>::null();
}
@@ -540,11 +560,11 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
return last_match_info;
}
if (res == RE_EXCEPTION) {
- ASSERT(Top::has_pending_exception());
+ ASSERT(Isolate::Current()->has_pending_exception());
return Handle<Object>::null();
}
ASSERT(res == RE_FAILURE);
- return Factory::null_value();
+ return Isolate::Current()->factory()->null_value();
}
@@ -790,7 +810,13 @@ class RegExpCompiler {
inline bool ignore_case() { return ignore_case_; }
inline bool ascii() { return ascii_; }
+ int current_expansion_factor() { return current_expansion_factor_; }
+ void set_current_expansion_factor(int value) {
+ current_expansion_factor_ = value;
+ }
+
static const int kNoRegister = -1;
+
private:
EndNode* accept_;
int next_register_;
@@ -800,6 +826,7 @@ class RegExpCompiler {
bool ignore_case_;
bool ascii_;
bool reg_exp_too_big_;
+ int current_expansion_factor_;
};
@@ -827,7 +854,8 @@ RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case, bool ascii)
recursion_depth_(0),
ignore_case_(ignore_case),
ascii_(ascii),
- reg_exp_too_big_(false) {
+ reg_exp_too_big_(false),
+ current_expansion_factor_(1) {
accept_ = new EndNode(EndNode::ACCEPT);
ASSERT(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
}
@@ -838,12 +866,25 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
RegExpNode* start,
int capture_count,
Handle<String> pattern) {
+ Heap* heap = pattern->GetHeap();
+
+ bool use_slow_safe_regexp_compiler = false;
+ if (heap->total_regexp_code_generated() >
+ RegExpImpl::kRegWxpCompiledLimit &&
+ heap->isolate()->memory_allocator()->SizeExecutable() >
+ RegExpImpl::kRegExpExecutableMemoryLimit) {
+ use_slow_safe_regexp_compiler = true;
+ }
+
+ macro_assembler->set_slow_safe(use_slow_safe_regexp_compiler);
+
#ifdef DEBUG
if (FLAG_trace_regexp_assembler)
macro_assembler_ = new RegExpMacroAssemblerTracer(macro_assembler);
else
#endif
macro_assembler_ = macro_assembler;
+
List <RegExpNode*> work_list(0);
work_list_ = &work_list;
Label fail;
@@ -857,10 +898,13 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
}
if (reg_exp_too_big_) return IrregexpRegExpTooBig();
- Handle<Object> code = macro_assembler_->GetCode(pattern);
-
+ Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
+ heap->IncreaseTotalRegexpCodeGenerated(code->Size());
work_list_ = NULL;
#ifdef DEBUG
+ if (FLAG_print_code) {
+ Handle<Code>::cast(code)->Disassemble(*pattern->ToCString());
+ }
if (FLAG_trace_regexp_assembler) {
delete macro_assembler_;
}
@@ -1302,16 +1346,14 @@ void ChoiceNode::GenerateGuard(RegExpMacroAssembler* macro_assembler,
}
-static unibrow::Mapping<unibrow::Ecma262UnCanonicalize> uncanonicalize;
-static unibrow::Mapping<unibrow::CanonicalizationRange> canonrange;
-
-
// Returns the number of characters in the equivalence class, omitting those
// that cannot occur in the source string because it is ASCII.
-static int GetCaseIndependentLetters(uc16 character,
+static int GetCaseIndependentLetters(Isolate* isolate,
+ uc16 character,
bool ascii_subject,
unibrow::uchar* letters) {
- int length = uncanonicalize.get(character, '\0', letters);
+ int length =
+ isolate->jsregexp_uncanonicalize()->get(character, '\0', letters);
// Unibrow returns 0 or 1 for characters where case independence is
// trivial.
if (length == 0) {
@@ -1327,7 +1369,8 @@ static int GetCaseIndependentLetters(uc16 character,
}
-static inline bool EmitSimpleCharacter(RegExpCompiler* compiler,
+static inline bool EmitSimpleCharacter(Isolate* isolate,
+ RegExpCompiler* compiler,
uc16 c,
Label* on_failure,
int cp_offset,
@@ -1349,7 +1392,8 @@ static inline bool EmitSimpleCharacter(RegExpCompiler* compiler,
// Only emits non-letters (things that don't have case). Only used for case
// independent matches.
-static inline bool EmitAtomNonLetter(RegExpCompiler* compiler,
+static inline bool EmitAtomNonLetter(Isolate* isolate,
+ RegExpCompiler* compiler,
uc16 c,
Label* on_failure,
int cp_offset,
@@ -1358,7 +1402,7 @@ static inline bool EmitAtomNonLetter(RegExpCompiler* compiler,
RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
bool ascii = compiler->ascii();
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(c, ascii, chars);
+ int length = GetCaseIndependentLetters(isolate, c, ascii, chars);
if (length < 1) {
// This can't match. Must be an ASCII subject and a non-ASCII character.
// We do not need to do anything since the ASCII pass already handled this.
@@ -1420,7 +1464,8 @@ static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
}
-typedef bool EmitCharacterFunction(RegExpCompiler* compiler,
+typedef bool EmitCharacterFunction(Isolate* isolate,
+ RegExpCompiler* compiler,
uc16 c,
Label* on_failure,
int cp_offset,
@@ -1429,7 +1474,8 @@ typedef bool EmitCharacterFunction(RegExpCompiler* compiler,
// Only emits letters (things that have case). Only used for case independent
// matches.
-static inline bool EmitAtomLetter(RegExpCompiler* compiler,
+static inline bool EmitAtomLetter(Isolate* isolate,
+ RegExpCompiler* compiler,
uc16 c,
Label* on_failure,
int cp_offset,
@@ -1438,7 +1484,7 @@ static inline bool EmitAtomLetter(RegExpCompiler* compiler,
RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
bool ascii = compiler->ascii();
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(c, ascii, chars);
+ int length = GetCaseIndependentLetters(isolate, c, ascii, chars);
if (length <= 1) return false;
// We may not need to check against the end of the input string
// if this character lies before a character that matched.
@@ -1876,6 +1922,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start) {
+ Isolate* isolate = Isolate::Current();
ASSERT(characters_filled_in < details->characters());
int characters = details->characters();
int char_mask;
@@ -1906,7 +1953,8 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
}
if (compiler->ignore_case()) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int length = GetCaseIndependentLetters(c, compiler->ascii(), chars);
+ int length = GetCaseIndependentLetters(isolate, c, compiler->ascii(),
+ chars);
ASSERT(length != 0); // Can only happen if c > char_mask (see above).
if (length == 1) {
// This letter has no case equivalents, so it's nice and simple
@@ -2406,6 +2454,7 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
Trace* trace,
bool first_element_checked,
int* checked_up_to) {
+ Isolate* isolate = Isolate::Current();
RegExpMacroAssembler* assembler = compiler->macro_assembler();
bool ascii = compiler->ascii();
Label* backtrack = trace->backtrack();
@@ -2441,7 +2490,8 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
break;
}
if (emit_function != NULL) {
- bool bound_checked = emit_function(compiler,
+ bool bound_checked = emit_function(isolate,
+ compiler,
quarks[j],
backtrack,
cp_offset + j,
@@ -2725,6 +2775,7 @@ class AlternativeGenerationList {
AlternativeGeneration* at(int i) {
return alt_gens_[i];
}
+
private:
static const int kAFew = 10;
ZoneList<AlternativeGeneration*> alt_gens_;
@@ -3284,6 +3335,7 @@ class TableEntryHeaderPrinter {
}
stream()->Add("}}");
}
+
private:
bool first_;
StringStream* stream() { return stream_; }
@@ -3685,6 +3737,44 @@ RegExpNode* RegExpQuantifier::ToNode(RegExpCompiler* compiler,
}
+// Scoped object to keep track of how much we unroll quantifier loops in the
+// regexp graph generator.
+class RegExpExpansionLimiter {
+ public:
+ static const int kMaxExpansionFactor = 6;
+ RegExpExpansionLimiter(RegExpCompiler* compiler, int factor)
+ : compiler_(compiler),
+ saved_expansion_factor_(compiler->current_expansion_factor()),
+ ok_to_expand_(saved_expansion_factor_ <= kMaxExpansionFactor) {
+ ASSERT(factor > 0);
+ if (ok_to_expand_) {
+ if (factor > kMaxExpansionFactor) {
+ // Avoid integer overflow of the current expansion factor.
+ ok_to_expand_ = false;
+ compiler->set_current_expansion_factor(kMaxExpansionFactor + 1);
+ } else {
+ int new_factor = saved_expansion_factor_ * factor;
+ ok_to_expand_ = (new_factor <= kMaxExpansionFactor);
+ compiler->set_current_expansion_factor(new_factor);
+ }
+ }
+ }
+
+ ~RegExpExpansionLimiter() {
+ compiler_->set_current_expansion_factor(saved_expansion_factor_);
+ }
+
+ bool ok_to_expand() { return ok_to_expand_; }
+
+ private:
+ RegExpCompiler* compiler_;
+ int saved_expansion_factor_;
+ bool ok_to_expand_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpExpansionLimiter);
+};
+
+
RegExpNode* RegExpQuantifier::ToNode(int min,
int max,
bool is_greedy,
@@ -3724,38 +3814,46 @@ RegExpNode* RegExpQuantifier::ToNode(int min,
} else if (FLAG_regexp_optimization && !needs_capture_clearing) {
// Only unroll if there are no captures and the body can't be
// empty.
- if (min > 0 && min <= kMaxUnrolledMinMatches) {
- int new_max = (max == kInfinity) ? max : max - min;
- // Recurse once to get the loop or optional matches after the fixed ones.
- RegExpNode* answer = ToNode(
- 0, new_max, is_greedy, body, compiler, on_success, true);
- // Unroll the forced matches from 0 to min. This can cause chains of
- // TextNodes (which the parser does not generate). These should be
- // combined if it turns out they hinder good code generation.
- for (int i = 0; i < min; i++) {
- answer = body->ToNode(compiler, answer);
+ {
+ RegExpExpansionLimiter limiter(
+ compiler, min + ((max != min) ? 1 : 0));
+ if (min > 0 && min <= kMaxUnrolledMinMatches && limiter.ok_to_expand()) {
+ int new_max = (max == kInfinity) ? max : max - min;
+ // Recurse once to get the loop or optional matches after the fixed
+ // ones.
+ RegExpNode* answer = ToNode(
+ 0, new_max, is_greedy, body, compiler, on_success, true);
+ // Unroll the forced matches from 0 to min. This can cause chains of
+ // TextNodes (which the parser does not generate). These should be
+ // combined if it turns out they hinder good code generation.
+ for (int i = 0; i < min; i++) {
+ answer = body->ToNode(compiler, answer);
+ }
+ return answer;
}
- return answer;
}
- if (max <= kMaxUnrolledMaxMatches) {
- ASSERT(min == 0);
- // Unroll the optional matches up to max.
- RegExpNode* answer = on_success;
- for (int i = 0; i < max; i++) {
- ChoiceNode* alternation = new ChoiceNode(2);
- if (is_greedy) {
- alternation->AddAlternative(GuardedAlternative(body->ToNode(compiler,
- answer)));
- alternation->AddAlternative(GuardedAlternative(on_success));
- } else {
- alternation->AddAlternative(GuardedAlternative(on_success));
- alternation->AddAlternative(GuardedAlternative(body->ToNode(compiler,
- answer)));
+ if (max <= kMaxUnrolledMaxMatches && min == 0) {
+ ASSERT(max > 0); // Due to the 'if' above.
+ RegExpExpansionLimiter limiter(compiler, max);
+ if (limiter.ok_to_expand()) {
+ // Unroll the optional matches up to max.
+ RegExpNode* answer = on_success;
+ for (int i = 0; i < max; i++) {
+ ChoiceNode* alternation = new ChoiceNode(2);
+ if (is_greedy) {
+ alternation->AddAlternative(
+ GuardedAlternative(body->ToNode(compiler, answer)));
+ alternation->AddAlternative(GuardedAlternative(on_success));
+ } else {
+ alternation->AddAlternative(GuardedAlternative(on_success));
+ alternation->AddAlternative(
+ GuardedAlternative(body->ToNode(compiler, answer)));
+ }
+ answer = alternation;
+ if (not_at_start) alternation->set_not_at_start();
}
- answer = alternation;
- if (not_at_start) alternation->set_not_at_start();
+ return answer;
}
- return answer;
}
}
bool has_min = min > 0;
@@ -4081,13 +4179,9 @@ void CharacterRange::Split(ZoneList<CharacterRange>* base,
}
-static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
- int bottom,
- int top);
-
-
void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
bool is_ascii) {
+ Isolate* isolate = Isolate::Current();
uc16 bottom = from();
uc16 top = to();
if (is_ascii) {
@@ -4097,7 +4191,7 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
if (top == bottom) {
// If this is a singleton we just expand the one character.
- int length = uncanonicalize.get(bottom, '\0', chars);
+ int length = isolate->jsregexp_uncanonicalize()->get(bottom, '\0', chars);
for (int i = 0; i < length; i++) {
uc32 chr = chars[i];
if (chr != bottom) {
@@ -4126,7 +4220,7 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
unibrow::uchar range[unibrow::Ecma262UnCanonicalize::kMaxWidth];
int pos = bottom;
while (pos < top) {
- int length = canonrange.get(pos, '\0', range);
+ int length = isolate->jsregexp_canonrange()->get(pos, '\0', range);
uc16 block_end;
if (length == 0) {
block_end = pos;
@@ -4135,7 +4229,7 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
block_end = range[0];
}
int end = (block_end > top) ? top : block_end;
- length = uncanonicalize.get(block_end, '\0', range);
+ length = isolate->jsregexp_uncanonicalize()->get(block_end, '\0', range);
for (int i = 0; i < length; i++) {
uc32 c = range[i];
uc16 range_from = c - (block_end - pos);
@@ -4245,99 +4339,6 @@ SetRelation CharacterRange::WordCharacterRelation(
}
-static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
- int bottom,
- int top) {
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- // Zones with no case mappings. There is a DEBUG-mode loop to assert that
- // this table is correct.
- // 0x0600 - 0x0fff
- // 0x1100 - 0x1cff
- // 0x2000 - 0x20ff
- // 0x2200 - 0x23ff
- // 0x2500 - 0x2bff
- // 0x2e00 - 0xa5ff
- // 0xa800 - 0xfaff
- // 0xfc00 - 0xfeff
- const int boundary_count = 18;
- int boundaries[] = {
- 0x600, 0x1000, 0x1100, 0x1d00, 0x2000, 0x2100, 0x2200, 0x2400, 0x2500,
- 0x2c00, 0x2e00, 0xa600, 0xa800, 0xfb00, 0xfc00, 0xff00};
-
- // Special ASCII rule from spec can save us some work here.
- if (bottom == 0x80 && top == 0xffff) return;
-
- if (top <= boundaries[0]) {
- CharacterRange range(bottom, top);
- range.AddCaseEquivalents(ranges, false);
- return;
- }
-
- // Split up very large ranges. This helps remove ranges where there are no
- // case mappings.
- for (int i = 0; i < boundary_count; i++) {
- if (bottom < boundaries[i] && top >= boundaries[i]) {
- AddUncanonicals(ranges, bottom, boundaries[i] - 1);
- AddUncanonicals(ranges, boundaries[i], top);
- return;
- }
- }
-
- // If we are completely in a zone with no case mappings then we are done.
- for (int i = 0; i < boundary_count; i += 2) {
- if (bottom >= boundaries[i] && top < boundaries[i + 1]) {
-#ifdef DEBUG
- for (int j = bottom; j <= top; j++) {
- unsigned current_char = j;
- int length = uncanonicalize.get(current_char, '\0', chars);
- for (int k = 0; k < length; k++) {
- ASSERT(chars[k] == current_char);
- }
- }
-#endif
- return;
- }
- }
-
- // Step through the range finding equivalent characters.
- ZoneList<unibrow::uchar> *characters = new ZoneList<unibrow::uchar>(100);
- for (int i = bottom; i <= top; i++) {
- int length = uncanonicalize.get(i, '\0', chars);
- for (int j = 0; j < length; j++) {
- uc32 chr = chars[j];
- if (chr != i && (chr < bottom || chr > top)) {
- characters->Add(chr);
- }
- }
- }
-
- // Step through the equivalent characters finding simple ranges and
- // adding ranges to the character class.
- if (characters->length() > 0) {
- int new_from = characters->at(0);
- int new_to = new_from;
- for (int i = 1; i < characters->length(); i++) {
- int chr = characters->at(i);
- if (chr == new_to + 1) {
- new_to++;
- } else {
- if (new_to == new_from) {
- ranges->Add(CharacterRange::Singleton(new_from));
- } else {
- ranges->Add(CharacterRange(new_from, new_to));
- }
- new_from = new_to = chr;
- }
- }
- if (new_to == new_from) {
- ranges->Add(CharacterRange::Singleton(new_from));
- } else {
- ranges->Add(CharacterRange(new_from, new_to));
- }
- }
-}
-
-
ZoneList<CharacterRange>* CharacterSet::ranges() {
if (ranges_ == NULL) {
ranges_ = new ZoneList<CharacterRange>(2);
@@ -4824,7 +4825,7 @@ OutSet* DispatchTable::Get(uc16 value) {
void Analysis::EnsureAnalyzed(RegExpNode* that) {
- StackLimitCheck check;
+ StackLimitCheck check(Isolate::Current());
if (check.HasOverflowed()) {
fail("Stack overflow");
return;
@@ -5310,6 +5311,8 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2);
+#elif V8_TARGET_ARCH_MIPS
+ RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2);
#endif
#else // V8_INTERPRETED_REGEXP
@@ -5334,7 +5337,4 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
}
-int OffsetsVector::static_offsets_vector_[
- OffsetsVector::kStaticOffsetsVectorSize];
-
}} // namespace v8::internal
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index af28a8722..58958d851 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -28,6 +28,7 @@
#ifndef V8_JSREGEXP_H_
#define V8_JSREGEXP_H_
+#include "allocation.h"
#include "macro-assembler.h"
#include "zone-inl.h"
@@ -175,6 +176,14 @@ class RegExpImpl {
static ByteArray* IrregexpByteCode(FixedArray* re, bool is_ascii);
static Code* IrregexpNativeCode(FixedArray* re, bool is_ascii);
+ // Limit the space regexps take up on the heap. In order to limit this we
+ // would like to keep track of the amount of regexp code on the heap. This
+ // is not tracked, however. As a conservative approximation we track the
+ // total regexp code compiled including code that has subsequently been freed
+ // and the total executable memory at any point.
+ static const int kRegExpExecutableMemoryLimit = 16 * MB;
+ static const int kRegWxpCompiledLimit = 1 * MB;
+
private:
static String* last_ascii_string_;
static String* two_byte_cached_string_;
@@ -1424,7 +1433,7 @@ class RegExpEngine: public AllStatic {
struct CompilationResult {
explicit CompilationResult(const char* error_message)
: error_message(error_message),
- code(Heap::the_hole_value()),
+ code(HEAP->the_hole_value()),
num_registers(0) {}
CompilationResult(Object* code, int registers)
: error_message(NULL),
@@ -1447,16 +1456,16 @@ class RegExpEngine: public AllStatic {
class OffsetsVector {
public:
- inline OffsetsVector(int num_registers)
+ explicit inline OffsetsVector(int num_registers)
: offsets_vector_length_(num_registers) {
- if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
+ if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
vector_ = NewArray<int>(offsets_vector_length_);
} else {
- vector_ = static_offsets_vector_;
+ vector_ = Isolate::Current()->jsregexp_static_offsets_vector();
}
}
inline ~OffsetsVector() {
- if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
+ if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
DeleteArray(vector_);
vector_ = NULL;
}
@@ -1467,13 +1476,12 @@ class OffsetsVector {
static const int kStaticOffsetsVectorSize = 50;
private:
- static Address static_offsets_vector_address() {
- return reinterpret_cast<Address>(&static_offsets_vector_);
+ static Address static_offsets_vector_address(Isolate* isolate) {
+ return reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector());
}
int* vector_;
int offsets_vector_length_;
- static int static_offsets_vector_[kStaticOffsetsVectorSize];
friend class ExternalReference;
};
diff --git a/deps/v8/src/jump-target-heavy-inl.h b/deps/v8/src/jump-target-heavy-inl.h
deleted file mode 100644
index 0a2a5691f..000000000
--- a/deps/v8/src/jump-target-heavy-inl.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_HEAVY_INL_H_
-#define V8_JUMP_TARGET_HEAVY_INL_H_
-
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
- FrameElement* element = &entry_frame_->elements_[index];
- element->clear_copied();
- if (target->is_register()) {
- entry_frame_->set_register_location(target->reg(), index);
- } else if (target->is_copy()) {
- entry_frame_->elements_[target->index()].set_copied();
- }
- if (direction_ == BIDIRECTIONAL && !target->is_copy()) {
- element->set_type_info(TypeInfo::Unknown());
- }
-}
-
-} } // namespace v8::internal
-
-#endif // V8_JUMP_TARGET_HEAVY_INL_H_
diff --git a/deps/v8/src/jump-target-heavy.cc b/deps/v8/src/jump-target-heavy.cc
deleted file mode 100644
index c3c22f1ac..000000000
--- a/deps/v8/src/jump-target-heavy.cc
+++ /dev/null
@@ -1,430 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-bool JumpTarget::compiling_deferred_code_ = false;
-
-
-void JumpTarget::Jump(Result* arg) {
- ASSERT(cgen()->has_valid_frame());
-
- cgen()->frame()->Push(arg);
- DoJump();
-}
-
-
-void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
- ASSERT(cgen()->has_valid_frame());
-
- // We want to check that non-frame registers at the call site stay in
- // the same registers on the fall-through branch.
-#ifdef DEBUG
- Result::Type arg_type = arg->type();
- Register arg_reg = arg->is_register() ? arg->reg() : no_reg;
-#endif
-
- cgen()->frame()->Push(arg);
- DoBranch(cc, hint);
- *arg = cgen()->frame()->Pop();
-
- ASSERT(arg->type() == arg_type);
- ASSERT(!arg->is_register() || arg->reg().is(arg_reg));
-}
-
-
-void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
- ASSERT(cgen()->has_valid_frame());
-
- // We want to check that non-frame registers at the call site stay in
- // the same registers on the fall-through branch.
-#ifdef DEBUG
- Result::Type arg0_type = arg0->type();
- Register arg0_reg = arg0->is_register() ? arg0->reg() : no_reg;
- Result::Type arg1_type = arg1->type();
- Register arg1_reg = arg1->is_register() ? arg1->reg() : no_reg;
-#endif
-
- cgen()->frame()->Push(arg0);
- cgen()->frame()->Push(arg1);
- DoBranch(cc, hint);
- *arg1 = cgen()->frame()->Pop();
- *arg0 = cgen()->frame()->Pop();
-
- ASSERT(arg0->type() == arg0_type);
- ASSERT(!arg0->is_register() || arg0->reg().is(arg0_reg));
- ASSERT(arg1->type() == arg1_type);
- ASSERT(!arg1->is_register() || arg1->reg().is(arg1_reg));
-}
-
-
-void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
- ASSERT(cgen()->has_valid_frame());
-
- int count = cgen()->frame()->height() - expected_height_;
- if (count > 0) {
- // We negate and branch here rather than using DoBranch's negate
- // and branch. This gives us a hook to remove statement state
- // from the frame.
- JumpTarget fall_through;
- // Branch to fall through will not negate, because it is a
- // forward-only target.
- fall_through.Branch(NegateCondition(cc), NegateHint(hint));
- Jump(arg); // May emit merge code here.
- fall_through.Bind();
- } else {
-#ifdef DEBUG
- Result::Type arg_type = arg->type();
- Register arg_reg = arg->is_register() ? arg->reg() : no_reg;
-#endif
- cgen()->frame()->Push(arg);
- DoBranch(cc, hint);
- *arg = cgen()->frame()->Pop();
- ASSERT(arg->type() == arg_type);
- ASSERT(!arg->is_register() || arg->reg().is(arg_reg));
- }
-}
-
-
-void JumpTarget::Bind(Result* arg) {
- if (cgen()->has_valid_frame()) {
- cgen()->frame()->Push(arg);
- }
- DoBind();
- *arg = cgen()->frame()->Pop();
-}
-
-
-void JumpTarget::Bind(Result* arg0, Result* arg1) {
- if (cgen()->has_valid_frame()) {
- cgen()->frame()->Push(arg0);
- cgen()->frame()->Push(arg1);
- }
- DoBind();
- *arg1 = cgen()->frame()->Pop();
- *arg0 = cgen()->frame()->Pop();
-}
-
-
-void JumpTarget::ComputeEntryFrame() {
- // Given: a collection of frames reaching by forward CFG edges and
- // the directionality of the block. Compute: an entry frame for the
- // block.
-
- Counters::compute_entry_frame.Increment();
-#ifdef DEBUG
- if (compiling_deferred_code_) {
- ASSERT(reaching_frames_.length() > 1);
- VirtualFrame* frame = reaching_frames_[0];
- bool all_identical = true;
- for (int i = 1; i < reaching_frames_.length(); i++) {
- if (!frame->Equals(reaching_frames_[i])) {
- all_identical = false;
- break;
- }
- }
- ASSERT(!all_identical || all_identical);
- }
-#endif
-
- // Choose an initial frame.
- VirtualFrame* initial_frame = reaching_frames_[0];
-
- // A list of pointers to frame elements in the entry frame. NULL
- // indicates that the element has not yet been determined.
- int length = initial_frame->element_count();
- ZoneList<FrameElement*> elements(length);
-
- // Initially populate the list of elements based on the initial
- // frame.
- for (int i = 0; i < length; i++) {
- FrameElement element = initial_frame->elements_[i];
- // We do not allow copies or constants in bidirectional frames.
- if (direction_ == BIDIRECTIONAL) {
- if (element.is_constant() || element.is_copy()) {
- elements.Add(NULL);
- continue;
- }
- }
- elements.Add(&initial_frame->elements_[i]);
- }
-
- // Compute elements based on the other reaching frames.
- if (reaching_frames_.length() > 1) {
- for (int i = 0; i < length; i++) {
- FrameElement* element = elements[i];
- for (int j = 1; j < reaching_frames_.length(); j++) {
- // Element computation is monotonic: new information will not
- // change our decision about undetermined or invalid elements.
- if (element == NULL || !element->is_valid()) break;
-
- FrameElement* other = &reaching_frames_[j]->elements_[i];
- element = element->Combine(other);
- if (element != NULL && !element->is_copy()) {
- ASSERT(other != NULL);
- // We overwrite the number information of one of the incoming frames.
- // This is safe because we only use the frame for emitting merge code.
- // The number information of incoming frames is not used anymore.
- element->set_type_info(TypeInfo::Combine(element->type_info(),
- other->type_info()));
- }
- }
- elements[i] = element;
- }
- }
-
- // Build the new frame. A freshly allocated frame has memory elements
- // for the parameters and some platform-dependent elements (e.g.,
- // return address). Replace those first.
- entry_frame_ = new VirtualFrame();
- int index = 0;
- for (; index < entry_frame_->element_count(); index++) {
- FrameElement* target = elements[index];
- // If the element is determined, set it now. Count registers. Mark
- // elements as copied exactly when they have a copy. Undetermined
- // elements are initially recorded as if in memory.
- if (target != NULL) {
- entry_frame_->elements_[index] = *target;
- InitializeEntryElement(index, target);
- }
- }
- // Then fill in the rest of the frame with new elements.
- for (; index < length; index++) {
- FrameElement* target = elements[index];
- if (target == NULL) {
- entry_frame_->elements_.Add(
- FrameElement::MemoryElement(TypeInfo::Uninitialized()));
- } else {
- entry_frame_->elements_.Add(*target);
- InitializeEntryElement(index, target);
- }
- }
-
- // Allocate any still-undetermined frame elements to registers or
- // memory, from the top down.
- for (int i = length - 1; i >= 0; i--) {
- if (elements[i] == NULL) {
- // Loop over all the reaching frames to check whether the element
- // is synced on all frames and to count the registers it occupies.
- bool is_synced = true;
- RegisterFile candidate_registers;
- int best_count = kMinInt;
- int best_reg_num = RegisterAllocator::kInvalidRegister;
- TypeInfo info = TypeInfo::Uninitialized();
-
- for (int j = 0; j < reaching_frames_.length(); j++) {
- FrameElement element = reaching_frames_[j]->elements_[i];
- if (direction_ == BIDIRECTIONAL) {
- info = TypeInfo::Unknown();
- } else if (!element.is_copy()) {
- info = TypeInfo::Combine(info, element.type_info());
- } else {
- // New elements will not be copies, so get number information from
- // backing element in the reaching frame.
- info = TypeInfo::Combine(info,
- reaching_frames_[j]->elements_[element.index()].type_info());
- }
- is_synced = is_synced && element.is_synced();
- if (element.is_register() && !entry_frame_->is_used(element.reg())) {
- // Count the register occurrence and remember it if better
- // than the previous best.
- int num = RegisterAllocator::ToNumber(element.reg());
- candidate_registers.Use(num);
- if (candidate_registers.count(num) > best_count) {
- best_count = candidate_registers.count(num);
- best_reg_num = num;
- }
- }
- }
-
- // We must have a number type information now (not for copied elements).
- ASSERT(entry_frame_->elements_[i].is_copy()
- || !info.IsUninitialized());
-
- // If the value is synced on all frames, put it in memory. This
- // costs nothing at the merge code but will incur a
- // memory-to-register move when the value is needed later.
- if (is_synced) {
- // Already recorded as a memory element.
- // Set combined number info.
- entry_frame_->elements_[i].set_type_info(info);
- continue;
- }
-
- // Try to put it in a register. If there was no best choice
- // consider any free register.
- if (best_reg_num == RegisterAllocator::kInvalidRegister) {
- for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) {
- if (!entry_frame_->is_used(j)) {
- best_reg_num = j;
- break;
- }
- }
- }
-
- if (best_reg_num != RegisterAllocator::kInvalidRegister) {
- // If there was a register choice, use it. Preserve the copied
- // flag on the element.
- bool is_copied = entry_frame_->elements_[i].is_copied();
- Register reg = RegisterAllocator::ToRegister(best_reg_num);
- entry_frame_->elements_[i] =
- FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
- TypeInfo::Uninitialized());
- if (is_copied) entry_frame_->elements_[i].set_copied();
- entry_frame_->set_register_location(reg, i);
- }
- // Set combined number info.
- entry_frame_->elements_[i].set_type_info(info);
- }
- }
-
- // If we have incoming backward edges assert we forget all number information.
-#ifdef DEBUG
- if (direction_ == BIDIRECTIONAL) {
- for (int i = 0; i < length; ++i) {
- if (!entry_frame_->elements_[i].is_copy()) {
- ASSERT(entry_frame_->elements_[i].type_info().IsUnknown());
- }
- }
- }
-#endif
-
- // The stack pointer is at the highest synced element or the base of
- // the expression stack.
- int stack_pointer = length - 1;
- while (stack_pointer >= entry_frame_->expression_base_index() &&
- !entry_frame_->elements_[stack_pointer].is_synced()) {
- stack_pointer--;
- }
- entry_frame_->stack_pointer_ = stack_pointer;
-}
-
-
-FrameRegisterState::FrameRegisterState(VirtualFrame* frame) {
- // Copy the register locations from the code generator's frame.
- // These are the registers that will be spilled on entry to the
- // deferred code and restored on exit.
- int sp_offset = frame->fp_relative(frame->stack_pointer_);
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int loc = frame->register_location(i);
- if (loc == VirtualFrame::kIllegalIndex) {
- registers_[i] = kIgnore;
- } else if (frame->elements_[loc].is_synced()) {
- // Needs to be restored on exit but not saved on entry.
- registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
- } else {
- int offset = frame->fp_relative(loc);
- registers_[i] = (offset < sp_offset) ? kPush : offset;
- }
- }
-}
-
-
-void JumpTarget::Unuse() {
- reaching_frames_.Clear();
- merge_labels_.Clear();
- entry_frame_ = NULL;
- entry_label_.Unuse();
-}
-
-
-void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
- ASSERT(reaching_frames_.length() == merge_labels_.length());
- ASSERT(entry_frame_ == NULL);
- Label fresh;
- merge_labels_.Add(fresh);
- reaching_frames_.Add(frame);
-}
-
-
-// -------------------------------------------------------------------------
-// BreakTarget implementation.
-
-void BreakTarget::set_direction(Directionality direction) {
- JumpTarget::set_direction(direction);
- ASSERT(cgen()->has_valid_frame());
- expected_height_ = cgen()->frame()->height();
-}
-
-
-void BreakTarget::CopyTo(BreakTarget* destination) {
- ASSERT(destination != NULL);
- destination->direction_ = direction_;
- destination->reaching_frames_.Rewind(0);
- destination->reaching_frames_.AddAll(reaching_frames_);
- destination->merge_labels_.Rewind(0);
- destination->merge_labels_.AddAll(merge_labels_);
- destination->entry_frame_ = entry_frame_;
- destination->entry_label_ = entry_label_;
- destination->expected_height_ = expected_height_;
-}
-
-
-void BreakTarget::Branch(Condition cc, Hint hint) {
- ASSERT(cgen()->has_valid_frame());
-
- int count = cgen()->frame()->height() - expected_height_;
- if (count > 0) {
- // We negate and branch here rather than using DoBranch's negate
- // and branch. This gives us a hook to remove statement state
- // from the frame.
- JumpTarget fall_through;
- // Branch to fall through will not negate, because it is a
- // forward-only target.
- fall_through.Branch(NegateCondition(cc), NegateHint(hint));
- Jump(); // May emit merge code here.
- fall_through.Bind();
- } else {
- DoBranch(cc, hint);
- }
-}
-
-
-DeferredCode::DeferredCode()
- : masm_(CodeGeneratorScope::Current()->masm()),
- statement_position_(masm_->positions_recorder()->
- current_statement_position()),
- position_(masm_->positions_recorder()->current_position()),
- frame_state_(CodeGeneratorScope::Current()->frame()) {
- ASSERT(statement_position_ != RelocInfo::kNoPosition);
- ASSERT(position_ != RelocInfo::kNoPosition);
-
- CodeGeneratorScope::Current()->AddDeferred(this);
-#ifdef DEBUG
- comment_ = "";
-#endif
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/jump-target-heavy.h b/deps/v8/src/jump-target-heavy.h
deleted file mode 100644
index 8cec86926..000000000
--- a/deps/v8/src/jump-target-heavy.h
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_HEAVY_H_
-#define V8_JUMP_TARGET_HEAVY_H_
-
-#include "macro-assembler.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class FrameElement;
-class Result;
-class VirtualFrame;
-
-// -------------------------------------------------------------------------
-// Jump targets
-//
-// A jump target is an abstraction of a basic-block entry in generated
-// code. It collects all the virtual frames reaching the block by
-// forward jumps and pairs them with labels for the merge code along
-// all forward-reaching paths. When bound, an expected frame for the
-// block is determined and code is generated to merge to the expected
-// frame. For backward jumps, the merge code is generated at the edge
-// leaving the predecessor block.
-//
-// A jump target must have been reached via control flow (either by
-// jumping, branching, or falling through) at the time it is bound.
-// In particular, this means that at least one of the control-flow
-// graph edges reaching the target must be a forward edge.
-
-class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
- public:
- // Forward-only jump targets can only be reached by forward CFG edges.
- enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
-
- // Construct a jump target used to generate code and to provide
- // access to a current frame.
- explicit JumpTarget(Directionality direction)
- : direction_(direction),
- reaching_frames_(0),
- merge_labels_(0),
- entry_frame_(NULL) {
- }
-
- // Construct a jump target.
- JumpTarget()
- : direction_(FORWARD_ONLY),
- reaching_frames_(0),
- merge_labels_(0),
- entry_frame_(NULL) {
- }
-
- virtual ~JumpTarget() {}
-
- // Set the direction of the jump target.
- virtual void set_direction(Directionality direction) {
- direction_ = direction;
- }
-
- // Treat the jump target as a fresh one. The state is reset.
- void Unuse();
-
- inline CodeGenerator* cgen();
-
- Label* entry_label() { return &entry_label_; }
-
- VirtualFrame* entry_frame() const { return entry_frame_; }
- void set_entry_frame(VirtualFrame* frame) {
- entry_frame_ = frame;
- }
-
- // Predicates testing the state of the encapsulated label.
- bool is_bound() const { return entry_label_.is_bound(); }
- bool is_linked() const {
- return !is_bound() && !reaching_frames_.is_empty();
- }
- bool is_unused() const {
- // This is !is_bound() && !is_linked().
- return !is_bound() && reaching_frames_.is_empty();
- }
-
- // Emit a jump to the target. There must be a current frame at the
- // jump and there will be no current frame after the jump.
- virtual void Jump();
- virtual void Jump(Result* arg);
-
- // Emit a conditional branch to the target. There must be a current
- // frame at the branch. The current frame will fall through to the
- // code after the branch. The arg is a result that is live both at
- // the target and the fall-through.
- virtual void Branch(Condition cc, Hint hint = no_hint);
- virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
- void Branch(Condition cc,
- Result* arg0,
- Result* arg1,
- Hint hint = no_hint);
-
- // Bind a jump target. If there is no current frame at the binding
- // site, there must be at least one frame reaching via a forward
- // jump.
- virtual void Bind();
- virtual void Bind(Result* arg);
- void Bind(Result* arg0, Result* arg1);
-
- // Emit a call to a jump target. There must be a current frame at
- // the call. The frame at the target is the same as the current
- // frame except for an extra return address on top of it. The frame
- // after the call is the same as the frame before the call.
- void Call();
-
- static void set_compiling_deferred_code(bool flag) {
- compiling_deferred_code_ = flag;
- }
-
- protected:
- // Directionality flag set at initialization time.
- Directionality direction_;
-
- // A list of frames reaching this block via forward jumps.
- ZoneList<VirtualFrame*> reaching_frames_;
-
- // A parallel list of labels for merge code.
- ZoneList<Label> merge_labels_;
-
- // The frame used on entry to the block and expected at backward
- // jumps to the block. Set when the jump target is bound, but may
- // or may not be set for forward-only blocks.
- VirtualFrame* entry_frame_;
-
- // The actual entry label of the block.
- Label entry_label_;
-
- // Implementations of Jump, Branch, and Bind with all arguments and
- // return values using the virtual frame.
- void DoJump();
- void DoBranch(Condition cc, Hint hint);
- void DoBind();
-
- private:
- static bool compiling_deferred_code_;
-
- // Add a virtual frame reaching this labeled block via a forward jump,
- // and a corresponding merge code label.
- void AddReachingFrame(VirtualFrame* frame);
-
- // Perform initialization required during entry frame computation
- // after setting the virtual frame element at index in frame to be
- // target.
- inline void InitializeEntryElement(int index, FrameElement* target);
-
- // Compute a frame to use for entry to this block.
- void ComputeEntryFrame();
-
- DISALLOW_COPY_AND_ASSIGN(JumpTarget);
-};
-
-
-// -------------------------------------------------------------------------
-// Break targets
-//
-// A break target is a jump target that can be used to break out of a
-// statement that keeps extra state on the stack (eg, for/in or
-// try/finally). They know the expected stack height at the target
-// and will drop state from nested statements as part of merging.
-//
-// Break targets are used for return, break, and continue targets.
-
-class BreakTarget : public JumpTarget {
- public:
- // Construct a break target.
- BreakTarget() {}
- explicit BreakTarget(JumpTarget::Directionality direction)
- : JumpTarget(direction) { }
-
- virtual ~BreakTarget() {}
-
- // Set the direction of the break target.
- virtual void set_direction(Directionality direction);
-
- // Copy the state of this break target to the destination. The
- // lists of forward-reaching frames and merge-point labels are
- // copied. All virtual frame pointers are copied, not the
- // pointed-to frames. The previous state of the destination is
- // overwritten, without deallocating pointed-to virtual frames.
- void CopyTo(BreakTarget* destination);
-
- // Emit a jump to the target. There must be a current frame at the
- // jump and there will be no current frame after the jump.
- virtual void Jump();
- virtual void Jump(Result* arg);
-
- // Emit a conditional branch to the target. There must be a current
- // frame at the branch. The current frame will fall through to the
- // code after the branch.
- virtual void Branch(Condition cc, Hint hint = no_hint);
- virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
-
- // Bind a break target. If there is no current frame at the binding
- // site, there must be at least one frame reaching via a forward
- // jump.
- virtual void Bind();
- virtual void Bind(Result* arg);
-
- // Setter for expected height.
- void set_expected_height(int expected) { expected_height_ = expected; }
-
- private:
- // The expected height of the expression stack where the target will
- // be bound, statically known at initialization time.
- int expected_height_;
-
- DISALLOW_COPY_AND_ASSIGN(BreakTarget);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_JUMP_TARGET_HEAVY_H_
diff --git a/deps/v8/src/jump-target-light-inl.h b/deps/v8/src/jump-target-light-inl.h
deleted file mode 100644
index e8f1a5fd4..000000000
--- a/deps/v8/src/jump-target-light-inl.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_LIGHT_INL_H_
-#define V8_JUMP_TARGET_LIGHT_INL_H_
-
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Construct a jump target.
-JumpTarget::JumpTarget(Directionality direction)
- : entry_frame_set_(false),
- direction_(direction),
- entry_frame_(kInvalidVirtualFrameInitializer) {
-}
-
-JumpTarget::JumpTarget()
- : entry_frame_set_(false),
- direction_(FORWARD_ONLY),
- entry_frame_(kInvalidVirtualFrameInitializer) {
-}
-
-
-BreakTarget::BreakTarget() { }
-BreakTarget::BreakTarget(JumpTarget::Directionality direction)
- : JumpTarget(direction) { }
-
-} } // namespace v8::internal
-
-#endif // V8_JUMP_TARGET_LIGHT_INL_H_
diff --git a/deps/v8/src/jump-target-light.cc b/deps/v8/src/jump-target-light.cc
deleted file mode 100644
index 36dc176bc..000000000
--- a/deps/v8/src/jump-target-light.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-DeferredCode::DeferredCode()
- : masm_(CodeGeneratorScope::Current()->masm()),
- statement_position_(masm_->positions_recorder()->
- current_statement_position()),
- position_(masm_->positions_recorder()->current_position()),
- frame_state_(*CodeGeneratorScope::Current()->frame()) {
- ASSERT(statement_position_ != RelocInfo::kNoPosition);
- ASSERT(position_ != RelocInfo::kNoPosition);
-
- CodeGeneratorScope::Current()->AddDeferred(this);
-
-#ifdef DEBUG
- comment_ = "";
-#endif
-}
-
-
-// -------------------------------------------------------------------------
-// BreakTarget implementation.
-
-
-void BreakTarget::SetExpectedHeight() {
- expected_height_ = cgen()->frame()->height();
-}
-
-
-void BreakTarget::Jump() {
- ASSERT(cgen()->has_valid_frame());
-
- int count = cgen()->frame()->height() - expected_height_;
- if (count > 0) {
- cgen()->frame()->Drop(count);
- }
- DoJump();
-}
-
-
-void BreakTarget::Branch(Condition cc, Hint hint) {
- if (cc == al) {
- Jump();
- return;
- }
-
- ASSERT(cgen()->has_valid_frame());
-
- int count = cgen()->frame()->height() - expected_height_;
- if (count > 0) {
- // We negate and branch here rather than using DoBranch's negate
- // and branch. This gives us a hook to remove statement state
- // from the frame.
- JumpTarget fall_through;
- // Branch to fall through will not negate, because it is a
- // forward-only target.
- fall_through.Branch(NegateCondition(cc), NegateHint(hint));
- // Emit merge code.
- cgen()->frame()->Drop(count);
- DoJump();
- fall_through.Bind();
- } else {
- DoBranch(cc, hint);
- }
-}
-
-
-void BreakTarget::Bind() {
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- if (count > 0) {
- cgen()->frame()->Drop(count);
- }
- }
- DoBind();
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/jump-target-light.h b/deps/v8/src/jump-target-light.h
deleted file mode 100644
index 0d653063b..000000000
--- a/deps/v8/src/jump-target-light.h
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_LIGHT_H_
-#define V8_JUMP_TARGET_LIGHT_H_
-
-#include "macro-assembler.h"
-#include "zone-inl.h"
-#include "virtual-frame.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class FrameElement;
-class Result;
-
-// -------------------------------------------------------------------------
-// Jump targets
-//
-// A jump target is an abstraction of a basic-block entry in generated
-// code. It collects all the virtual frames reaching the block by
-// forward jumps and pairs them with labels for the merge code along
-// all forward-reaching paths. When bound, an expected frame for the
-// block is determined and code is generated to merge to the expected
-// frame. For backward jumps, the merge code is generated at the edge
-// leaving the predecessor block.
-//
-// A jump target must have been reached via control flow (either by
-// jumping, branching, or falling through) at the time it is bound.
-// In particular, this means that at least one of the control-flow
-// graph edges reaching the target must be a forward edge.
-
-class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
- public:
- // Forward-only jump targets can only be reached by forward CFG edges.
- enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
-
- // Construct a jump target.
- explicit inline JumpTarget(Directionality direction);
-
- inline JumpTarget();
-
- virtual ~JumpTarget() {}
-
- void Unuse() {
- entry_frame_set_ = false;
- entry_label_.Unuse();
- }
-
- inline CodeGenerator* cgen();
-
- Label* entry_label() { return &entry_label_; }
-
- const VirtualFrame* entry_frame() const {
- return entry_frame_set_ ? &entry_frame_ : NULL;
- }
-
- void set_entry_frame(VirtualFrame* frame) {
- entry_frame_ = *frame;
- entry_frame_set_ = true;
- }
-
- // Predicates testing the state of the encapsulated label.
- bool is_bound() const { return entry_label_.is_bound(); }
- bool is_linked() const { return entry_label_.is_linked(); }
- bool is_unused() const { return entry_label_.is_unused(); }
-
- // Copy the state of this jump target to the destination.
- inline void CopyTo(JumpTarget* destination) {
- *destination = *this;
- }
-
- // Emit a jump to the target. There must be a current frame at the
- // jump and there will be no current frame after the jump.
- virtual void Jump();
-
- // Emit a conditional branch to the target. There must be a current
- // frame at the branch. The current frame will fall through to the
- // code after the branch.
- virtual void Branch(Condition cc, Hint hint = no_hint);
-
- // Bind a jump target. If there is no current frame at the binding
- // site, there must be at least one frame reaching via a forward
- // jump.
- virtual void Bind();
-
- // Emit a call to a jump target. There must be a current frame at
- // the call. The frame at the target is the same as the current
- // frame except for an extra return address on top of it. The frame
- // after the call is the same as the frame before the call.
- void Call();
-
- protected:
- // Has an entry frame been found?
- bool entry_frame_set_;
-
- // Can we branch backwards to this label?
- Directionality direction_;
-
- // The frame used on entry to the block and expected at backward
- // jumps to the block. Set the first time something branches to this
- // jump target.
- VirtualFrame entry_frame_;
-
- // The actual entry label of the block.
- Label entry_label_;
-
- // Implementations of Jump, Branch, and Bind with all arguments and
- // return values using the virtual frame.
- void DoJump();
- void DoBranch(Condition cc, Hint hint);
- void DoBind();
-};
-
-
-// -------------------------------------------------------------------------
-// Break targets
-//
-// A break target is a jump target that can be used to break out of a
-// statement that keeps extra state on the stack (eg, for/in or
-// try/finally). They know the expected stack height at the target
-// and will drop state from nested statements as part of merging.
-//
-// Break targets are used for return, break, and continue targets.
-
-class BreakTarget : public JumpTarget {
- public:
- // Construct a break target.
- inline BreakTarget();
-
- inline BreakTarget(JumpTarget::Directionality direction);
-
- virtual ~BreakTarget() {}
-
- // Copy the state of this jump target to the destination.
- inline void CopyTo(BreakTarget* destination) {
- *destination = *this;
- }
-
- // Emit a jump to the target. There must be a current frame at the
- // jump and there will be no current frame after the jump.
- virtual void Jump();
-
- // Emit a conditional branch to the target. There must be a current
- // frame at the branch. The current frame will fall through to the
- // code after the branch.
- virtual void Branch(Condition cc, Hint hint = no_hint);
-
- // Bind a break target. If there is no current frame at the binding
- // site, there must be at least one frame reaching via a forward
- // jump.
- virtual void Bind();
-
- // Setter for expected height.
- void set_expected_height(int expected) { expected_height_ = expected; }
-
- // Uses the current frame to set the expected height.
- void SetExpectedHeight();
-
- private:
- // The expected height of the expression stack where the target will
- // be bound, statically known at initialization time.
- int expected_height_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_JUMP_TARGET_LIGHT_H_
diff --git a/deps/v8/src/jump-target.h b/deps/v8/src/jump-target.h
deleted file mode 100644
index a0d2686b0..000000000
--- a/deps/v8/src/jump-target.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_H_
-#define V8_JUMP_TARGET_H_
-
-#if V8_TARGET_ARCH_IA32
-#include "jump-target-heavy.h"
-#elif V8_TARGET_ARCH_X64
-#include "jump-target-heavy.h"
-#elif V8_TARGET_ARCH_ARM
-#include "jump-target-light.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "jump-target-light.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Shadow break targets
-//
-// A shadow break target represents a break target that is temporarily
-// shadowed by another one (represented by the original during
-// shadowing). They are used to catch jumps to labels in certain
-// contexts, e.g. try blocks. After shadowing ends, the formerly
-// shadowed target is again represented by the original and the
-// ShadowTarget can be used as a jump target in its own right,
-// representing the formerly shadowing target.
-
-class ShadowTarget : public BreakTarget {
- public:
- // Construct a shadow jump target. After construction the shadow
- // target object holds the state of the original target, and the
- // original target is actually a fresh one that intercepts control
- // flow intended for the shadowed one.
- explicit ShadowTarget(BreakTarget* shadowed);
-
- virtual ~ShadowTarget() {}
-
- // End shadowing. After shadowing ends, the original jump target
- // again gives access to the formerly shadowed target and the shadow
- // target object gives access to the formerly shadowing target.
- void StopShadowing();
-
- // During shadowing, the currently shadowing target. After
- // shadowing, the target that was shadowed.
- BreakTarget* other_target() const { return other_target_; }
-
- private:
- // During shadowing, the currently shadowing target. After
- // shadowing, the target that was shadowed.
- BreakTarget* other_target_;
-
-#ifdef DEBUG
- bool is_shadowing_;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(ShadowTarget);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_JUMP_TARGET_H_
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index eeaea65f8..8ef7514f4 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -46,10 +46,16 @@ void List<T, P>::Add(const T& element) {
template<typename T, class P>
void List<T, P>::AddAll(const List<T, P>& other) {
- int result_length = length_ + other.length_;
+ AddAll(other.ToVector());
+}
+
+
+template<typename T, class P>
+void List<T, P>::AddAll(const Vector<T>& other) {
+ int result_length = length_ + other.length();
if (capacity_ < result_length) Resize(result_length);
- for (int i = 0; i < other.length_; i++) {
- data_[length_ + i] = other.data_[i];
+ for (int i = 0; i < other.length(); i++) {
+ data_[length_ + i] = other.at(i);
}
length_ = result_length;
}
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 9a2e69897..ca2b7bce2 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,8 @@
#ifndef V8_LIST_H_
#define V8_LIST_H_
+#include "utils.h"
+
namespace v8 {
namespace internal {
@@ -80,7 +82,7 @@ class List {
INLINE(int length() const) { return length_; }
INLINE(int capacity() const) { return capacity_; }
- Vector<T> ToVector() { return Vector<T>(data_, length_); }
+ Vector<T> ToVector() const { return Vector<T>(data_, length_); }
Vector<const T> ToConstVector() { return Vector<const T>(data_, length_); }
@@ -91,6 +93,9 @@ class List {
// Add all the elements from the argument list to this list.
void AddAll(const List<T, P>& other);
+ // Add all the elements from the vector to this list.
+ void AddAll(const Vector<T>& other);
+
// Inserts the element at the specific index.
void InsertAt(int index, const T& element);
@@ -159,6 +164,11 @@ class List {
DISALLOW_COPY_AND_ASSIGN(List);
};
+class Map;
+class Code;
+typedef List<Map*> MapList;
+typedef List<Code*> CodeList;
+
} } // namespace v8::internal
#endif // V8_LIST_H_
diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/lithium-allocator-inl.h
index 84c5bbdc6..8f660ce0e 100644
--- a/deps/v8/src/lithium-allocator-inl.h
+++ b/deps/v8/src/lithium-allocator-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -36,6 +36,8 @@
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-mips.h"
#else
#error "Unknown architecture."
#endif
@@ -60,27 +62,27 @@ TempIterator::TempIterator(LInstruction* instr)
: instr_(instr),
limit_(instr->TempCount()),
current_(0) {
- current_ = AdvanceToNext(0);
+ SkipUninteresting();
}
-bool TempIterator::HasNext() { return current_ < limit_; }
+bool TempIterator::Done() { return current_ >= limit_; }
-LOperand* TempIterator::Next() {
- ASSERT(HasNext());
+LOperand* TempIterator::Current() {
+ ASSERT(!Done());
return instr_->TempAt(current_);
}
-int TempIterator::AdvanceToNext(int start) {
- while (start < limit_ && instr_->TempAt(start) == NULL) start++;
- return start;
+void TempIterator::SkipUninteresting() {
+ while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_;
}
void TempIterator::Advance() {
- current_ = AdvanceToNext(current_ + 1);
+ ++current_;
+ SkipUninteresting();
}
@@ -88,27 +90,29 @@ InputIterator::InputIterator(LInstruction* instr)
: instr_(instr),
limit_(instr->InputCount()),
current_(0) {
- current_ = AdvanceToNext(0);
+ SkipUninteresting();
}
-bool InputIterator::HasNext() { return current_ < limit_; }
+bool InputIterator::Done() { return current_ >= limit_; }
-LOperand* InputIterator::Next() {
- ASSERT(HasNext());
+LOperand* InputIterator::Current() {
+ ASSERT(!Done());
return instr_->InputAt(current_);
}
void InputIterator::Advance() {
- current_ = AdvanceToNext(current_ + 1);
+ ++current_;
+ SkipUninteresting();
}
-int InputIterator::AdvanceToNext(int start) {
- while (start < limit_ && instr_->InputAt(start)->IsConstantOperand()) start++;
- return start;
+void InputIterator::SkipUninteresting() {
+ while (current_ < limit_ && instr_->InputAt(current_)->IsConstantOperand()) {
+ ++current_;
+ }
}
@@ -116,23 +120,23 @@ UseIterator::UseIterator(LInstruction* instr)
: input_iterator_(instr), env_iterator_(instr->environment()) { }
-bool UseIterator::HasNext() {
- return input_iterator_.HasNext() || env_iterator_.HasNext();
+bool UseIterator::Done() {
+ return input_iterator_.Done() && env_iterator_.Done();
}
-LOperand* UseIterator::Next() {
- ASSERT(HasNext());
- return input_iterator_.HasNext()
- ? input_iterator_.Next()
- : env_iterator_.Next();
+LOperand* UseIterator::Current() {
+ ASSERT(!Done());
+ return input_iterator_.Done()
+ ? env_iterator_.Current()
+ : input_iterator_.Current();
}
void UseIterator::Advance() {
- input_iterator_.HasNext()
- ? input_iterator_.Advance()
- : env_iterator_.Advance();
+ input_iterator_.Done()
+ ? env_iterator_.Advance()
+ : input_iterator_.Advance();
}
} } // namespace v8::internal
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index a13a18977..dcdc5d9b7 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
#include "lithium-allocator-inl.h"
#include "hydrogen.h"
@@ -36,6 +37,8 @@
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-mips.h"
#else
#error "Unknown architecture."
#endif
@@ -44,13 +47,18 @@ namespace v8 {
namespace internal {
-#define DEFINE_OPERAND_CACHE(name, type) \
- name name::cache[name::kNumCachedOperands]; \
- void name::SetupCache() { \
- for (int i = 0; i < kNumCachedOperands; i++) { \
- cache[i].ConvertTo(type, i); \
- } \
- }
+#define DEFINE_OPERAND_CACHE(name, type) \
+ name name::cache[name::kNumCachedOperands]; \
+ void name::SetupCache() { \
+ for (int i = 0; i < kNumCachedOperands; i++) { \
+ cache[i].ConvertTo(type, i); \
+ } \
+ } \
+ static bool name##_initialize() { \
+ name::SetupCache(); \
+ return true; \
+ } \
+ static bool name##_cache_initialized = name##_initialize();
DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
DEFINE_OPERAND_CACHE(LStackSlot, STACK_SLOT)
@@ -295,6 +303,11 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
// we need to split use positons in a special way.
bool split_at_start = false;
+ if (current->start().Value() == position.Value()) {
+ // When splitting at start we need to locate the previous use interval.
+ current = first_interval_;
+ }
+
while (current != NULL) {
if (current->Contains(position)) {
current->SplitAt(position);
@@ -344,6 +357,11 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
}
result->first_pos_ = use_after;
+ // Discard cached iteration state. It might be pointing
+ // to the use that no longer belongs to this live range.
+ last_processed_use_ = NULL;
+ current_interval_ = NULL;
+
// Link the new live range in the chain before any of the other
// ranges linked from the range before the split.
result->parent_ = (parent_ == NULL) ? this : parent_;
@@ -525,6 +543,24 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
}
+LAllocator::LAllocator(int num_values, HGraph* graph)
+ : chunk_(NULL),
+ live_in_sets_(graph->blocks()->length()),
+ live_ranges_(num_values * 2),
+ fixed_live_ranges_(NULL),
+ fixed_double_live_ranges_(NULL),
+ unhandled_live_ranges_(num_values * 2),
+ active_live_ranges_(8),
+ inactive_live_ranges_(8),
+ reusable_slots_(8),
+ next_virtual_register_(num_values),
+ first_artificial_register_(num_values),
+ mode_(NONE),
+ num_registers_(-1),
+ graph_(graph),
+ has_osr_entry_(false) {}
+
+
void LAllocator::InitializeLivenessAnalysis() {
// Initialize the live_in sets for each block to NULL.
int block_count = graph_->blocks()->length();
@@ -539,10 +575,10 @@ BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
BitVector* live_out = new BitVector(next_virtual_register_);
// Process all successor blocks.
- HBasicBlock* successor = block->end()->FirstSuccessor();
- while (successor != NULL) {
+ for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
// Add values live on entry to the successor. Note the successor's
// live_in will not be computed yet for backwards edges.
+ HBasicBlock* successor = it.Current();
BitVector* live_in = live_in_sets_[successor->block_id()];
if (live_in != NULL) live_out->Union(*live_in);
@@ -556,11 +592,6 @@ BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
live_out->Add(phi->OperandAt(index)->id());
}
}
-
- // Check if we are done with second successor.
- if (successor == block->end()->SecondSuccessor()) break;
-
- successor = block->end()->SecondSuccessor();
}
return live_out;
@@ -618,11 +649,7 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
LiveRange* LAllocator::FixedLiveRangeFor(int index) {
- if (index >= fixed_live_ranges_.length()) {
- fixed_live_ranges_.AddBlock(NULL,
- index - fixed_live_ranges_.length() + 1);
- }
-
+ ASSERT(index < Register::kNumAllocatableRegisters);
LiveRange* result = fixed_live_ranges_[index];
if (result == NULL) {
result = new LiveRange(FixedLiveRangeID(index));
@@ -635,11 +662,7 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
- if (index >= fixed_double_live_ranges_.length()) {
- fixed_double_live_ranges_.AddBlock(NULL,
- index - fixed_double_live_ranges_.length() + 1);
- }
-
+ ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) {
result = new LiveRange(FixedDoubleLiveRangeID(index));
@@ -650,6 +673,7 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
return result;
}
+
LiveRange* LAllocator::LiveRangeFor(int index) {
if (index >= live_ranges_.length()) {
live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1);
@@ -771,8 +795,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
int gap_index) {
// Handle fixed temporaries.
if (first != NULL) {
- for (TempIterator it(first); it.HasNext(); it.Advance()) {
- LUnallocated* temp = LUnallocated::cast(it.Next());
+ for (TempIterator it(first); !it.Done(); it.Advance()) {
+ LUnallocated* temp = LUnallocated::cast(it.Current());
if (temp->HasFixedPolicy()) {
AllocateFixed(temp, gap_index - 1, false);
}
@@ -813,8 +837,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
// Handle fixed input operands of second instruction.
if (second != NULL) {
- for (UseIterator it(second); it.HasNext(); it.Advance()) {
- LUnallocated* cur_input = LUnallocated::cast(it.Next());
+ for (UseIterator it(second); !it.Done(); it.Advance()) {
+ LUnallocated* cur_input = LUnallocated::cast(it.Current());
if (cur_input->HasFixedPolicy()) {
LUnallocated* input_copy = cur_input->CopyUnconstrained();
bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
@@ -949,8 +973,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
}
}
- for (UseIterator it(instr); it.HasNext(); it.Advance()) {
- LOperand* input = it.Next();
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LOperand* input = it.Current();
LifetimePosition use_pos;
if (input->IsUnallocated() &&
@@ -964,8 +988,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
if (input->IsUnallocated()) live->Add(input->VirtualRegister());
}
- for (TempIterator it(instr); it.HasNext(); it.Advance()) {
- LOperand* temp = it.Next();
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LOperand* temp = it.Current();
if (instr->IsMarkedAsCall()) {
if (temp->IsRegister()) continue;
if (temp->IsUnallocated()) {
@@ -1010,6 +1034,22 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
chunk_->AddGapMove(cur_block->last_instruction_index() - 1,
operand,
phi_operand);
+
+ // We are going to insert a move before the branch instruction.
+ // Some branch instructions (e.g. loops' back edges)
+ // can potentially cause a GC so they have a pointer map.
+ // By inserting a move we essentially create a copy of a
+ // value which is invisible to PopulatePointerMaps(), because we store
+ // it into a location different from the operand of a live range
+ // covering a branch instruction.
+ // Thus we need to manually record a pointer.
+ if (phi->representation().IsTagged()) {
+ LInstruction* branch =
+ InstructionAt(cur_block->last_instruction_index());
+ if (branch->HasPointerMap()) {
+ branch->pointer_map()->RecordPointer(phi_operand);
+ }
+ }
}
LiveRange* live_range = LiveRangeFor(phi->id());
@@ -1097,7 +1137,7 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
// We are going to insert a move before the branch instruction.
// Some branch instructions (e.g. loops' back edges)
// can potentially cause a GC so they have a pointer map.
- // By insterting a move we essentially create a copy of a
+ // By inserting a move we essentially create a copy of a
// value which is invisible to PopulatePointerMaps(), because we store
// it into a location different from the operand of a live range
// covering a branch instruction.
@@ -1274,7 +1314,7 @@ void LAllocator::BuildLiveRanges() {
found = true;
int operand_index = iterator.Current();
PrintF("Function: %s\n",
- *graph_->info()->function()->debug_name()->ToCString());
+ *chunk_->info()->function()->debug_name()->ToCString());
PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index);
PrintF("First use is at %d\n", range->first_pos()->pos().Value());
@@ -1436,7 +1476,7 @@ void LAllocator::AllocateDoubleRegisters() {
void LAllocator::AllocateRegisters() {
ASSERT(mode_ != NONE);
- reusable_slots_.Clear();
+ ASSERT(unhandled_live_ranges_.is_empty());
for (int i = 0; i < live_ranges_.length(); ++i) {
if (live_ranges_[i] != NULL) {
@@ -1448,6 +1488,7 @@ void LAllocator::AllocateRegisters() {
SortUnhandled();
ASSERT(UnhandledIsSorted());
+ ASSERT(reusable_slots_.is_empty());
ASSERT(active_live_ranges_.is_empty());
ASSERT(inactive_live_ranges_.is_empty());
@@ -1532,17 +1573,9 @@ void LAllocator::AllocateRegisters() {
}
}
- active_live_ranges_.Clear();
- inactive_live_ranges_.Clear();
-}
-
-
-void LAllocator::Setup() {
- LConstantOperand::SetupCache();
- LStackSlot::SetupCache();
- LDoubleStackSlot::SetupCache();
- LRegister::SetupCache();
- LDoubleRegister::SetupCache();
+ reusable_slots_.Rewind(0);
+ active_live_ranges_.Rewind(0);
+ inactive_live_ranges_.Rewind(0);
}
@@ -2001,12 +2034,12 @@ LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
// We have no choice
if (start_instr == end_instr) return end;
- HBasicBlock* end_block = GetBlock(start);
- HBasicBlock* start_block = GetBlock(end);
+ HBasicBlock* start_block = GetBlock(start);
+ HBasicBlock* end_block = GetBlock(end);
if (end_block == start_block) {
- // The interval is split in the same basic block. Split at latest possible
- // position.
+ // The interval is split in the same basic block. Split at the latest
+ // possible position.
return end;
}
@@ -2017,7 +2050,9 @@ LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
block = block->parent_loop_header();
}
- if (block == end_block) return end;
+ // We did not find any suitable outer loop. Split at the latest possible
+ // position unless end_block is a loop header itself.
+ if (block == end_block && !end_block->IsLoopHeader()) return end;
return LifetimePosition::FromInstructionIndex(
block->first_instruction_index());
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index d53ea7871..e4e64974b 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,7 +30,7 @@
#include "v8.h"
-#include "data-flow.h"
+#include "allocation.h"
#include "lithium.h"
#include "zone.h"
@@ -162,12 +162,12 @@ class LEnvironment;
class TempIterator BASE_EMBEDDED {
public:
inline explicit TempIterator(LInstruction* instr);
- inline bool HasNext();
- inline LOperand* Next();
+ inline bool Done();
+ inline LOperand* Current();
inline void Advance();
private:
- inline int AdvanceToNext(int start);
+ inline void SkipUninteresting();
LInstruction* instr_;
int limit_;
int current_;
@@ -178,12 +178,12 @@ class TempIterator BASE_EMBEDDED {
class InputIterator BASE_EMBEDDED {
public:
inline explicit InputIterator(LInstruction* instr);
- inline bool HasNext();
- inline LOperand* Next();
+ inline bool Done();
+ inline LOperand* Current();
inline void Advance();
private:
- inline int AdvanceToNext(int start);
+ inline void SkipUninteresting();
LInstruction* instr_;
int limit_;
int current_;
@@ -193,8 +193,8 @@ class InputIterator BASE_EMBEDDED {
class UseIterator BASE_EMBEDDED {
public:
inline explicit UseIterator(LInstruction* instr);
- inline bool HasNext();
- inline LOperand* Next();
+ inline bool Done();
+ inline LOperand* Current();
inline void Advance();
private:
@@ -428,24 +428,8 @@ class GrowableBitVector BASE_EMBEDDED {
class LAllocator BASE_EMBEDDED {
public:
- explicit LAllocator(int first_virtual_register, HGraph* graph)
- : chunk_(NULL),
- live_in_sets_(0),
- live_ranges_(16),
- fixed_live_ranges_(8),
- fixed_double_live_ranges_(8),
- unhandled_live_ranges_(8),
- active_live_ranges_(8),
- inactive_live_ranges_(8),
- reusable_slots_(8),
- next_virtual_register_(first_virtual_register),
- first_artificial_register_(first_virtual_register),
- mode_(NONE),
- num_registers_(-1),
- graph_(graph),
- has_osr_entry_(false) {}
-
- static void Setup();
+ LAllocator(int first_virtual_register, HGraph* graph);
+
static void TraceAlloc(const char* msg, ...);
// Lithium translation support.
@@ -468,10 +452,10 @@ class LAllocator BASE_EMBEDDED {
void Allocate(LChunk* chunk);
const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
- const ZoneList<LiveRange*>* fixed_live_ranges() const {
+ const Vector<LiveRange*>* fixed_live_ranges() const {
return &fixed_live_ranges_;
}
- const ZoneList<LiveRange*>* fixed_double_live_ranges() const {
+ const Vector<LiveRange*>* fixed_double_live_ranges() const {
return &fixed_double_live_ranges_;
}
@@ -616,8 +600,10 @@ class LAllocator BASE_EMBEDDED {
ZoneList<LiveRange*> live_ranges_;
// Lists of live ranges
- ZoneList<LiveRange*> fixed_live_ranges_;
- ZoneList<LiveRange*> fixed_double_live_ranges_;
+ EmbeddedVector<LiveRange*, Register::kNumAllocatableRegisters>
+ fixed_live_ranges_;
+ EmbeddedVector<LiveRange*, DoubleRegister::kNumAllocatableRegisters>
+ fixed_double_live_ranges_;
ZoneList<LiveRange*> unhandled_live_ranges_;
ZoneList<LiveRange*> active_live_ranges_;
ZoneList<LiveRange*> inactive_live_ranges_;
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index e829f2f04..64ef469b3 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
#include "lithium.h"
namespace v8 {
@@ -165,4 +166,30 @@ void LPointerMap::PrintTo(StringStream* stream) {
}
+int ElementsKindToShiftSize(JSObject::ElementsKind elements_kind) {
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ return 0;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ return 1;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ return 2;
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ return 3;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ return kPointerSizeLog2;
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index 280da4724..6010b777e 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -28,6 +28,7 @@
#ifndef V8_LITHIUM_H_
#define V8_LITHIUM_H_
+#include "allocation.h"
#include "hydrogen.h"
#include "safepoint-table.h"
@@ -518,34 +519,34 @@ class ShallowIterator BASE_EMBEDDED {
: env_(env),
limit_(env != NULL ? env->values()->length() : 0),
current_(0) {
- current_ = AdvanceToNext(0);
+ SkipUninteresting();
}
- inline bool HasNext() {
- return env_ != NULL && current_ < limit_;
- }
+ bool Done() { return current_ >= limit_; }
- inline LOperand* Next() {
- ASSERT(HasNext());
+ LOperand* Current() {
+ ASSERT(!Done());
return env_->values()->at(current_);
}
- inline void Advance() {
- current_ = AdvanceToNext(current_ + 1);
+ void Advance() {
+ ASSERT(!Done());
+ ++current_;
+ SkipUninteresting();
}
- inline LEnvironment* env() { return env_; }
+ LEnvironment* env() { return env_; }
private:
- inline bool ShouldSkip(LOperand* op) {
+ bool ShouldSkip(LOperand* op) {
return op == NULL || op->IsConstantOperand() || op->IsArgument();
}
- inline int AdvanceToNext(int start) {
- while (start < limit_ && ShouldSkip(env_->values()->at(start))) {
- start++;
+ // Skip until something interesting, beginning with and including current_.
+ void SkipUninteresting() {
+ while (current_ < limit_ && ShouldSkip(env_->values()->at(current_))) {
+ ++current_;
}
- return start;
}
LEnvironment* env_;
@@ -558,36 +559,36 @@ class ShallowIterator BASE_EMBEDDED {
class DeepIterator BASE_EMBEDDED {
public:
explicit DeepIterator(LEnvironment* env)
- : current_iterator_(env) { }
-
- inline bool HasNext() {
- if (current_iterator_.HasNext()) return true;
- if (current_iterator_.env() == NULL) return false;
- AdvanceToOuter();
- return current_iterator_.HasNext();
+ : current_iterator_(env) {
+ SkipUninteresting();
}
- inline LOperand* Next() {
- ASSERT(current_iterator_.HasNext());
- return current_iterator_.Next();
+ bool Done() { return current_iterator_.Done(); }
+
+ LOperand* Current() {
+ ASSERT(!current_iterator_.Done());
+ return current_iterator_.Current();
}
- inline void Advance() {
- if (current_iterator_.HasNext()) {
- current_iterator_.Advance();
- } else {
- AdvanceToOuter();
- }
+ void Advance() {
+ current_iterator_.Advance();
+ SkipUninteresting();
}
private:
- inline void AdvanceToOuter() {
- current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
+ void SkipUninteresting() {
+ while (current_iterator_.env() != NULL && current_iterator_.Done()) {
+ current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
+ }
}
ShallowIterator current_iterator_;
};
+
+int ElementsKindToShiftSize(JSObject::ElementsKind elements_kind);
+
+
} } // namespace v8::internal
#endif // V8_LITHIUM_H_
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index a395c5118..0b01e8af1 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,15 +30,15 @@
#include "liveedit.h"
-#include "compiler.h"
#include "compilation-cache.h"
+#include "compiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "global-handles.h"
-#include "memory.h"
#include "parser.h"
#include "scopeinfo.h"
#include "scopes.h"
+#include "v8memory.h"
namespace v8 {
namespace internal {
@@ -47,6 +47,18 @@ namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
+void SetElementNonStrict(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value) {
+ // Ignore return value from SetElement. It can only be a failure if there
+ // are element setters causing exceptions and the debugger context has none
+ // of these.
+ Handle<Object> no_failure;
+ no_failure = SetElement(object, index, value, kNonStrictMode);
+ ASSERT(!no_failure.is_null());
+ USE(no_failure);
+}
+
// A simple implementation of dynamic programming algorithm. It solves
// the problem of finding the difference of 2 arrays. It uses a table of results
// of subproblems. Each cell contains a number together with 2-bit flag
@@ -54,7 +66,7 @@ namespace internal {
class Differencer {
public:
explicit Differencer(Comparator::Input* input)
- : input_(input), len1_(input->getLength1()), len2_(input->getLength2()) {
+ : input_(input), len1_(input->GetLength1()), len2_(input->GetLength2()) {
buffer_ = NewArray<int>(len1_ * len2_);
}
~Differencer() {
@@ -139,7 +151,7 @@ class Differencer {
if (cached_res == kEmptyCellValue) {
Direction dir;
int res;
- if (input_->equals(pos1, pos2)) {
+ if (input_->Equals(pos1, pos2)) {
res = CompareUpToTail(pos1 + 1, pos2 + 1);
dir = EQ;
} else {
@@ -258,15 +270,8 @@ void Comparator::CalculateDifference(Comparator::Input* input,
static bool CompareSubstrings(Handle<String> s1, int pos1,
Handle<String> s2, int pos2, int len) {
- static StringInputBuffer buf1;
- static StringInputBuffer buf2;
- buf1.Reset(*s1);
- buf1.Seek(pos1);
- buf2.Reset(*s2);
- buf2.Seek(pos2);
for (int i = 0; i < len; i++) {
- ASSERT(buf1.has_more() && buf2.has_more());
- if (buf1.GetNext() != buf2.GetNext()) {
+ if (s1->Get(i + pos1) != s2->Get(i + pos2)) {
return false;
}
}
@@ -274,23 +279,91 @@ static bool CompareSubstrings(Handle<String> s1, int pos1,
}
+// Additional to Input interface. Lets switch Input range to subrange.
+// More elegant way would be to wrap one Input as another Input object
+// and translate positions there, but that would cost us additional virtual
+// call per comparison.
+class SubrangableInput : public Comparator::Input {
+ public:
+ virtual void SetSubrange1(int offset, int len) = 0;
+ virtual void SetSubrange2(int offset, int len) = 0;
+};
+
+
+class SubrangableOutput : public Comparator::Output {
+ public:
+ virtual void SetSubrange1(int offset, int len) = 0;
+ virtual void SetSubrange2(int offset, int len) = 0;
+};
+
+
+static int min(int a, int b) {
+ return a < b ? a : b;
+}
+
+
+// Finds common prefix and suffix in input. This parts shouldn't take space in
+// linear programming table. Enable subranging in input and output.
+static void NarrowDownInput(SubrangableInput* input,
+ SubrangableOutput* output) {
+ const int len1 = input->GetLength1();
+ const int len2 = input->GetLength2();
+
+ int common_prefix_len;
+ int common_suffix_len;
+
+ {
+ common_prefix_len = 0;
+ int prefix_limit = min(len1, len2);
+ while (common_prefix_len < prefix_limit &&
+ input->Equals(common_prefix_len, common_prefix_len)) {
+ common_prefix_len++;
+ }
+
+ common_suffix_len = 0;
+ int suffix_limit = min(len1 - common_prefix_len, len2 - common_prefix_len);
+
+ while (common_suffix_len < suffix_limit &&
+ input->Equals(len1 - common_suffix_len - 1,
+ len2 - common_suffix_len - 1)) {
+ common_suffix_len++;
+ }
+ }
+
+ if (common_prefix_len > 0 || common_suffix_len > 0) {
+ int new_len1 = len1 - common_suffix_len - common_prefix_len;
+ int new_len2 = len2 - common_suffix_len - common_prefix_len;
+
+ input->SetSubrange1(common_prefix_len, new_len1);
+ input->SetSubrange2(common_prefix_len, new_len2);
+
+ output->SetSubrange1(common_prefix_len, new_len1);
+ output->SetSubrange2(common_prefix_len, new_len2);
+ }
+}
+
+
// A helper class that writes chunk numbers into JSArray.
// Each chunk is stored as 3 array elements: (pos1_begin, pos1_end, pos2_end).
class CompareOutputArrayWriter {
public:
CompareOutputArrayWriter()
- : array_(Factory::NewJSArray(10)), current_size_(0) {}
+ : array_(FACTORY->NewJSArray(10)), current_size_(0) {}
Handle<JSArray> GetResult() {
return array_;
}
void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) {
- SetElement(array_, current_size_, Handle<Object>(Smi::FromInt(char_pos1)));
- SetElement(array_, current_size_ + 1,
- Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
- SetElement(array_, current_size_ + 2,
- Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
+ SetElementNonStrict(array_,
+ current_size_,
+ Handle<Object>(Smi::FromInt(char_pos1)));
+ SetElementNonStrict(array_,
+ current_size_ + 1,
+ Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
+ SetElementNonStrict(array_,
+ current_size_ + 2,
+ Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
current_size_ += 3;
}
@@ -310,13 +383,13 @@ class TokensCompareInput : public Comparator::Input {
: s1_(s1), offset1_(offset1), len1_(len1),
s2_(s2), offset2_(offset2), len2_(len2) {
}
- virtual int getLength1() {
+ virtual int GetLength1() {
return len1_;
}
- virtual int getLength2() {
+ virtual int GetLength2() {
return len2_;
}
- bool equals(int index1, int index2) {
+ bool Equals(int index1, int index2) {
return s1_->Get(offset1_ + index1) == s2_->Get(offset2_ + index2);
}
@@ -392,19 +465,26 @@ class LineEndsWrapper {
// Represents 2 strings as 2 arrays of lines.
-class LineArrayCompareInput : public Comparator::Input {
+class LineArrayCompareInput : public SubrangableInput {
public:
LineArrayCompareInput(Handle<String> s1, Handle<String> s2,
LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
- : s1_(s1), s2_(s2), line_ends1_(line_ends1), line_ends2_(line_ends2) {
+ : s1_(s1), s2_(s2), line_ends1_(line_ends1),
+ line_ends2_(line_ends2),
+ subrange_offset1_(0), subrange_offset2_(0),
+ subrange_len1_(line_ends1_.length()),
+ subrange_len2_(line_ends2_.length()) {
}
- int getLength1() {
- return line_ends1_.length();
+ int GetLength1() {
+ return subrange_len1_;
}
- int getLength2() {
- return line_ends2_.length();
+ int GetLength2() {
+ return subrange_len2_;
}
- bool equals(int index1, int index2) {
+ bool Equals(int index1, int index2) {
+ index1 += subrange_offset1_;
+ index2 += subrange_offset2_;
+
int line_start1 = line_ends1_.GetLineStart(index1);
int line_start2 = line_ends2_.GetLineStart(index2);
int line_end1 = line_ends1_.GetLineEnd(index1);
@@ -414,7 +494,16 @@ class LineArrayCompareInput : public Comparator::Input {
if (len1 != len2) {
return false;
}
- return CompareSubstrings(s1_, line_start1, s2_, line_start2, len1);
+ return CompareSubstrings(s1_, line_start1, s2_, line_start2,
+ len1);
+ }
+ void SetSubrange1(int offset, int len) {
+ subrange_offset1_ = offset;
+ subrange_len1_ = len;
+ }
+ void SetSubrange2(int offset, int len) {
+ subrange_offset2_ = offset;
+ subrange_len2_ = len;
}
private:
@@ -422,20 +511,28 @@ class LineArrayCompareInput : public Comparator::Input {
Handle<String> s2_;
LineEndsWrapper line_ends1_;
LineEndsWrapper line_ends2_;
+ int subrange_offset1_;
+ int subrange_offset2_;
+ int subrange_len1_;
+ int subrange_len2_;
};
// Stores compare result in JSArray. For each chunk tries to conduct
// a fine-grained nested diff token-wise.
-class TokenizingLineArrayCompareOutput : public Comparator::Output {
+class TokenizingLineArrayCompareOutput : public SubrangableOutput {
public:
TokenizingLineArrayCompareOutput(LineEndsWrapper line_ends1,
LineEndsWrapper line_ends2,
Handle<String> s1, Handle<String> s2)
- : line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2) {
+ : line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2),
+ subrange_offset1_(0), subrange_offset2_(0) {
}
void AddChunk(int line_pos1, int line_pos2, int line_len1, int line_len2) {
+ line_pos1 += subrange_offset1_;
+ line_pos2 += subrange_offset2_;
+
int char_pos1 = line_ends1_.GetLineStart(line_pos1);
int char_pos2 = line_ends2_.GetLineStart(line_pos2);
int char_len1 = line_ends1_.GetLineStart(line_pos1 + line_len1) - char_pos1;
@@ -455,6 +552,12 @@ class TokenizingLineArrayCompareOutput : public Comparator::Output {
array_writer_.WriteChunk(char_pos1, char_pos2, char_len1, char_len2);
}
}
+ void SetSubrange1(int offset, int len) {
+ subrange_offset1_ = offset;
+ }
+ void SetSubrange2(int offset, int len) {
+ subrange_offset2_ = offset;
+ }
Handle<JSArray> GetResult() {
return array_writer_.GetResult();
@@ -468,38 +571,45 @@ class TokenizingLineArrayCompareOutput : public Comparator::Output {
LineEndsWrapper line_ends2_;
Handle<String> s1_;
Handle<String> s2_;
+ int subrange_offset1_;
+ int subrange_offset2_;
};
Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1,
Handle<String> s2) {
+ s1 = FlattenGetString(s1);
+ s2 = FlattenGetString(s2);
+
LineEndsWrapper line_ends1(s1);
LineEndsWrapper line_ends2(s2);
LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
TokenizingLineArrayCompareOutput output(line_ends1, line_ends2, s1, s2);
+ NarrowDownInput(&input, &output);
+
Comparator::CalculateDifference(&input, &output);
return output.GetResult();
}
-static void CompileScriptForTracker(Handle<Script> script) {
+static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
// TODO(635): support extensions.
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(isolate);
// Build AST.
CompilationInfo info(script);
info.MarkAsGlobal();
if (ParserApi::Parse(&info)) {
// Compile the code.
- LiveEditFunctionTracker tracker(info.function());
+ LiveEditFunctionTracker tracker(info.isolate(), info.function());
if (Compiler::MakeCodeForLiveEdit(&info)) {
ASSERT(!info.code().is_null());
tracker.RecordRootFunctionInfo(info.code());
} else {
- Top::StackOverflow();
+ info.isolate()->StackOverflow();
}
}
}
@@ -513,11 +623,12 @@ static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
// Wraps any object into a OpaqueReference, that will hide the object
// from JavaScript.
-static Handle<JSValue> WrapInJSValue(Object* object) {
- Handle<JSFunction> constructor = Top::opaque_reference_function();
+static Handle<JSValue> WrapInJSValue(Handle<Object> object) {
+ Handle<JSFunction> constructor =
+ Isolate::Current()->opaque_reference_function();
Handle<JSValue> result =
- Handle<JSValue>::cast(Factory::NewJSObject(constructor));
- result->set_value(object);
+ Handle<JSValue>::cast(FACTORY->NewJSObject(constructor));
+ result->set_value(*object);
return result;
}
@@ -529,7 +640,7 @@ template<typename S>
class JSArrayBasedStruct {
public:
static S Create() {
- Handle<JSArray> array = Factory::NewJSArray(S::kSize_);
+ Handle<JSArray> array = FACTORY->NewJSArray(S::kSize_);
return S(array);
}
static S cast(Object* object) {
@@ -545,10 +656,12 @@ class JSArrayBasedStruct {
protected:
void SetField(int field_position, Handle<Object> value) {
- SetElement(array_, field_position, value);
+ SetElementNonStrict(array_, field_position, value);
}
void SetSmiValueField(int field_position, int value) {
- SetElement(array_, field_position, Handle<Smi>(Smi::FromInt(value)));
+ SetElementNonStrict(array_,
+ field_position,
+ Handle<Smi>(Smi::FromInt(value)));
}
Object* GetField(int field_position) {
return array_->GetElementNoExceptionThrown(field_position);
@@ -582,17 +695,17 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
}
void SetFunctionCode(Handle<Code> function_code,
Handle<Object> code_scope_info) {
- Handle<JSValue> code_wrapper = WrapInJSValue(*function_code);
+ Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
this->SetField(kCodeOffset_, code_wrapper);
- Handle<JSValue> scope_wrapper = WrapInJSValue(*code_scope_info);
+ Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info);
this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
}
void SetOuterScopeInfo(Handle<Object> scope_info_array) {
this->SetField(kOuterScopeInfoOffset_, scope_info_array);
}
void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
- Handle<JSValue> info_holder = WrapInJSValue(*info);
+ Handle<JSValue> info_holder = WrapInJSValue(info);
this->SetField(kSharedFunctionInfoOffset_, info_holder);
}
int GetParentIndex() {
@@ -649,7 +762,7 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
Handle<SharedFunctionInfo> info) {
HandleScope scope;
this->SetField(kFunctionNameOffset_, name);
- Handle<JSValue> info_holder = WrapInJSValue(*info);
+ Handle<JSValue> info_holder = WrapInJSValue(info);
this->SetField(kSharedInfoOffset_, info_holder);
this->SetSmiValueField(kStartPositionOffset_, start_position);
this->SetSmiValueField(kEndPositionOffset_, end_position);
@@ -677,7 +790,7 @@ class FunctionInfoListener {
FunctionInfoListener() {
current_parent_index_ = -1;
len_ = 0;
- result_ = Factory::NewJSArray(10);
+ result_ = FACTORY->NewJSArray(10);
}
void FunctionStarted(FunctionLiteral* fun) {
@@ -687,7 +800,7 @@ class FunctionInfoListener {
fun->end_position(), fun->num_parameters(),
current_parent_index_);
current_parent_index_ = len_;
- SetElement(result_, len_, info.GetJSArray());
+ SetElementNonStrict(result_, len_, info.GetJSArray());
len_++;
}
@@ -705,7 +818,7 @@ class FunctionInfoListener {
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
result_->GetElementNoExceptionThrown(current_parent_index_));
- info.SetFunctionCode(function_code, Handle<Object>(Heap::null_value()));
+ info.SetFunctionCode(function_code, Handle<Object>(HEAP->null_value()));
}
// Saves full information about a function: its code, its scope info
@@ -731,7 +844,7 @@ class FunctionInfoListener {
Object* SerializeFunctionScope(Scope* scope) {
HandleScope handle_scope;
- Handle<JSArray> scope_info_list = Factory::NewJSArray(10);
+ Handle<JSArray> scope_info_list = FACTORY->NewJSArray(10);
int scope_info_length = 0;
// Saves some description of scope. It stores name and indexes of
@@ -739,7 +852,7 @@ class FunctionInfoListener {
// scopes of this chain.
Scope* outer_scope = scope->outer_scope();
if (outer_scope == NULL) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
do {
ZoneList<Variable*> list(10);
@@ -767,14 +880,19 @@ class FunctionInfoListener {
list[k] = list[l];
}
for (int i = 0; i < j; i++) {
- SetElement(scope_info_list, scope_info_length, list[i]->name());
+ SetElementNonStrict(scope_info_list,
+ scope_info_length,
+ list[i]->name());
scope_info_length++;
- SetElement(scope_info_list, scope_info_length,
- Handle<Smi>(Smi::FromInt(list[i]->AsSlot()->index())));
+ SetElementNonStrict(
+ scope_info_list,
+ scope_info_length,
+ Handle<Smi>(Smi::FromInt(list[i]->AsSlot()->index())));
scope_info_length++;
}
- SetElement(scope_info_list, scope_info_length,
- Handle<Object>(Heap::null_value()));
+ SetElementNonStrict(scope_info_list,
+ scope_info_length,
+ Handle<Object>(HEAP->null_value()));
scope_info_length++;
outer_scope = outer_scope->outer_scope();
@@ -789,18 +907,17 @@ class FunctionInfoListener {
};
-static FunctionInfoListener* active_function_info_listener = NULL;
-
JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<String> source) {
- CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+ Isolate* isolate = Isolate::Current();
+ ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
FunctionInfoListener listener;
Handle<Object> original_source = Handle<Object>(script->source());
script->set_source(*source);
- active_function_info_listener = &listener;
- CompileScriptForTracker(script);
- active_function_info_listener = NULL;
+ isolate->set_active_function_info_listener(&listener);
+ CompileScriptForTracker(isolate, script);
+ isolate->set_active_function_info_listener(NULL);
script->set_source(*original_source);
return *(listener.GetResult());
@@ -817,7 +934,7 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
Handle<String> name_handle(String::cast(info->name()));
info_wrapper.SetProperties(name_handle, info->start_position(),
info->end_position(), info);
- SetElement(array, i, info_wrapper.GetJSArray());
+ SetElementNonStrict(array, i, info_wrapper.GetJSArray());
}
}
@@ -882,12 +999,12 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
// Finds all references to original and replaces them with substitution.
static void ReplaceCodeObject(Code* original, Code* substitution) {
- ASSERT(!Heap::InNewSpace(substitution));
+ ASSERT(!HEAP->InNewSpace(substitution));
AssertNoAllocation no_allocations_please;
// A zone scope for ReferenceCollectorVisitor.
- ZoneScope scope(DELETE_ON_EXIT);
+ ZoneScope scope(Isolate::Current(), DELETE_ON_EXIT);
ReferenceCollectorVisitor visitor(original);
@@ -895,7 +1012,7 @@ static void ReplaceCodeObject(Code* original, Code* substitution) {
// so temporary replace the pointers with offset numbers
// in prologue/epilogue.
{
- Heap::IterateStrongRoots(&visitor, VISIT_ALL);
+ HEAP->IterateStrongRoots(&visitor, VISIT_ALL);
}
// Now iterate over all pointers of all objects, including code_target
@@ -925,7 +1042,7 @@ static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
DeoptimizationInputData* data =
DeoptimizationInputData::cast(function->code()->deoptimization_data());
- if (data == Heap::empty_fixed_array()) return false;
+ if (data == HEAP->empty_fixed_array()) return false;
FixedArray* literals = data->LiteralArray();
@@ -977,7 +1094,7 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
HandleScope scope;
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Top::ThrowIllegalOperation();
+ return Isolate::Current()->ThrowIllegalOperation();
}
FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
@@ -986,8 +1103,8 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
if (IsJSFunctionCode(shared_info->code())) {
- ReplaceCodeObject(shared_info->code(),
- *(compile_info_wrapper.GetFunctionCode()));
+ Handle<Code> code = compile_info_wrapper.GetFunctionCode();
+ ReplaceCodeObject(shared_info->code(), *code);
Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
if (code_scope_info->IsFixedArray()) {
shared_info->set_scope_info(SerializedScopeInfo::cast(*code_scope_info));
@@ -997,20 +1114,23 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
if (shared_info->debug_info()->IsDebugInfo()) {
Handle<DebugInfo> debug_info(DebugInfo::cast(shared_info->debug_info()));
Handle<Code> new_original_code =
- Factory::CopyCode(compile_info_wrapper.GetFunctionCode());
+ FACTORY->CopyCode(compile_info_wrapper.GetFunctionCode());
debug_info->set_original_code(*new_original_code);
}
- shared_info->set_start_position(compile_info_wrapper.GetStartPosition());
- shared_info->set_end_position(compile_info_wrapper.GetEndPosition());
+ int start_position = compile_info_wrapper.GetStartPosition();
+ int end_position = compile_info_wrapper.GetEndPosition();
+ shared_info->set_start_position(start_position);
+ shared_info->set_end_position(end_position);
shared_info->set_construct_stub(
- Builtins::builtin(Builtins::JSConstructStubGeneric));
+ Isolate::Current()->builtins()->builtin(
+ Builtins::kJSConstructStubGeneric));
DeoptimizeDependentFunctions(*shared_info);
- CompilationCache::Remove(shared_info);
+ Isolate::Current()->compilation_cache()->Remove(shared_info);
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
@@ -1019,16 +1139,16 @@ MaybeObject* LiveEdit::FunctionSourceUpdated(
HandleScope scope;
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Top::ThrowIllegalOperation();
+ return Isolate::Current()->ThrowIllegalOperation();
}
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
DeoptimizeDependentFunctions(*shared_info);
- CompilationCache::Remove(shared_info);
+ Isolate::Current()->compilation_cache()->Remove(shared_info);
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
@@ -1038,7 +1158,7 @@ void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
Handle<SharedFunctionInfo>::cast(UnwrapJSValue(function_wrapper));
shared_info->set_script(*script_handle);
- CompilationCache::Remove(shared_info);
+ Isolate::Current()->compilation_cache()->Remove(shared_info);
}
@@ -1186,7 +1306,7 @@ static Handle<Code> PatchPositionsInCode(Handle<Code> code,
// Relocation info section now has different size. We cannot simply
// rewrite it inside code object. Instead we have to create a new
// code object.
- Handle<Code> result(Factory::CopyCode(code, buffer));
+ Handle<Code> result(FACTORY->CopyCode(code, buffer));
return result;
}
}
@@ -1196,7 +1316,7 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Top::ThrowIllegalOperation();
+ return Isolate::Current()->ThrowIllegalOperation();
}
SharedInfoWrapper shared_info_wrapper(shared_info_array);
@@ -1205,13 +1325,14 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
int old_function_start = info->start_position();
int new_function_start = TranslatePosition(old_function_start,
position_change_array);
- info->set_start_position(new_function_start);
- info->set_end_position(TranslatePosition(info->end_position(),
- position_change_array));
+ int new_function_end = TranslatePosition(info->end_position(),
+ position_change_array);
+ int new_function_token_pos =
+ TranslatePosition(info->function_token_position(), position_change_array);
- info->set_function_token_position(
- TranslatePosition(info->function_token_position(),
- position_change_array));
+ info->set_start_position(new_function_start);
+ info->set_end_position(new_function_end);
+ info->set_function_token_position(new_function_token_pos);
if (IsJSFunctionCode(info->code())) {
// Patch relocation info section of the code.
@@ -1227,14 +1348,14 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
}
}
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
static Handle<Script> CreateScriptCopy(Handle<Script> original) {
Handle<String> original_source(String::cast(original->source()));
- Handle<Script> copy = Factory::NewScript(original_source);
+ Handle<Script> copy = FACTORY->NewScript(original_source);
copy->set_name(original->name());
copy->set_line_offset(original->line_offset());
@@ -1259,15 +1380,16 @@ Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script,
Handle<Script> old_script = CreateScriptCopy(original_script);
old_script->set_name(String::cast(*old_script_name));
old_script_object = old_script;
- Debugger::OnAfterCompile(old_script, Debugger::SEND_WHEN_DEBUGGING);
+ Isolate::Current()->debugger()->OnAfterCompile(
+ old_script, Debugger::SEND_WHEN_DEBUGGING);
} else {
- old_script_object = Handle<Object>(Heap::null_value());
+ old_script_object = Handle<Object>(HEAP->null_value());
}
original_script->set_source(*new_source);
// Drop line ends so that they will be recalculated.
- original_script->set_line_ends(Heap::undefined_value());
+ original_script->set_line_ends(HEAP->undefined_value());
return *old_script_object;
}
@@ -1315,7 +1437,7 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
SharedFunctionInfo::cast(wrapper->value()));
if (function->shared() == *shared || IsInlined(*function, *shared)) {
- SetElement(result, i, Handle<Smi>(Smi::FromInt(status)));
+ SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status)));
return true;
}
}
@@ -1328,7 +1450,8 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
static bool FixTryCatchHandler(StackFrame* top_frame,
StackFrame* bottom_frame) {
Address* pointer_address =
- &Memory::Address_at(Top::get_address_from_id(Top::k_handler_address));
+ &Memory::Address_at(Isolate::Current()->get_address_from_id(
+ Isolate::k_handler_address));
while (*pointer_address < top_frame->sp()) {
pointer_address = &Memory::Address_at(*pointer_address);
@@ -1363,19 +1486,26 @@ static const char* DropFrames(Vector<StackFrame*> frames,
ASSERT(bottom_js_frame->is_java_script());
// Check the nature of the top frame.
- if (pre_top_frame->code()->is_inline_cache_stub() &&
- pre_top_frame->code()->ic_state() == DEBUG_BREAK) {
+ Isolate* isolate = Isolate::Current();
+ Code* pre_top_frame_code = pre_top_frame->LookupCode();
+ if (pre_top_frame_code->is_inline_cache_stub() &&
+ pre_top_frame_code->ic_state() == DEBUG_BREAK) {
// OK, we can drop inline cache calls.
*mode = Debug::FRAME_DROPPED_IN_IC_CALL;
- } else if (pre_top_frame->code() == Debug::debug_break_slot()) {
+ } else if (pre_top_frame_code ==
+ isolate->debug()->debug_break_slot()) {
// OK, we can drop debug break slot.
*mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
- } else if (pre_top_frame->code() ==
- Builtins::builtin(Builtins::FrameDropper_LiveEdit)) {
+ } else if (pre_top_frame_code ==
+ isolate->builtins()->builtin(
+ Builtins::kFrameDropper_LiveEdit)) {
// OK, we can drop our own code.
*mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
- } else if (pre_top_frame->code()->kind() == Code::STUB &&
- pre_top_frame->code()->major_key()) {
+ } else if (pre_top_frame_code ==
+ isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
+ *mode = Debug::FRAME_DROPPED_IN_RETURN_CALL;
+ } else if (pre_top_frame_code->kind() == Code::STUB &&
+ pre_top_frame_code->major_key()) {
// Entry from our unit tests, it's fine, we support this case.
*mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
} else {
@@ -1397,7 +1527,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// Make sure FixTryCatchHandler is idempotent.
ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
- Handle<Code> code(Builtins::builtin(Builtins::FrameDropper_LiveEdit));
+ Handle<Code> code = Isolate::Current()->builtins()->FrameDropper_LiveEdit();
top_frame->set_pc(code->entry());
pre_top_frame->SetCallerFp(bottom_js_frame->fp());
@@ -1424,8 +1554,9 @@ static bool IsDropableFrame(StackFrame* frame) {
// removing all listed function if possible and if do_drop is true.
static const char* DropActivationsInActiveThread(
Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
-
- ZoneScope scope(DELETE_ON_EXIT);
+ Isolate* isolate = Isolate::Current();
+ Debug* debug = isolate->debug();
+ ZoneScope scope(isolate, DELETE_ON_EXIT);
Vector<StackFrame*> frames = CreateStackMap();
int array_len = Smi::cast(shared_info_array->length())->value();
@@ -1434,7 +1565,7 @@ static const char* DropActivationsInActiveThread(
int frame_index = 0;
for (; frame_index < frames.length(); frame_index++) {
StackFrame* frame = frames[frame_index];
- if (frame->id() == Debug::break_frame_id()) {
+ if (frame->id() == debug->break_frame_id()) {
top_frame_index = frame_index;
break;
}
@@ -1511,7 +1642,7 @@ static const char* DropActivationsInActiveThread(
break;
}
}
- Debug::FramesHaveBeenDropped(new_id, drop_mode,
+ debug->FramesHaveBeenDropped(new_id, drop_mode,
restarter_frame_function_pointer);
// Replace "blocked on active" with "replaced on active" status.
@@ -1520,7 +1651,7 @@ static const char* DropActivationsInActiveThread(
Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
Handle<Object> replaced(
Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK));
- SetElement(result, i, replaced);
+ SetElementNonStrict(result, i, replaced);
}
}
return NULL;
@@ -1534,8 +1665,8 @@ class InactiveThreadActivationsChecker : public ThreadVisitor {
: shared_info_array_(shared_info_array), result_(result),
has_blocked_functions_(false) {
}
- void VisitThread(ThreadLocalTop* top) {
- for (StackFrameIterator it(top); !it.done(); it.Advance()) {
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+ for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
has_blocked_functions_ |= CheckActivation(
shared_info_array_, result_, it.frame(),
LiveEdit::FUNCTION_BLOCKED_ON_OTHER_STACK);
@@ -1556,19 +1687,22 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
Handle<JSArray> shared_info_array, bool do_drop) {
int len = Smi::cast(shared_info_array->length())->value();
- Handle<JSArray> result = Factory::NewJSArray(len);
+ Handle<JSArray> result = FACTORY->NewJSArray(len);
// Fill the default values.
for (int i = 0; i < len; i++) {
- SetElement(result, i,
- Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH)));
+ SetElementNonStrict(
+ result,
+ i,
+ Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH)));
}
// First check inactive threads. Fail if some functions are blocked there.
InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
result);
- ThreadManager::IterateArchivedThreads(&inactive_threads_checker);
+ Isolate::Current()->thread_manager()->IterateArchivedThreads(
+ &inactive_threads_checker);
if (inactive_threads_checker.HasBlockedFunctions()) {
return result;
}
@@ -1579,42 +1713,44 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
if (error_message != NULL) {
// Add error message as an array extra element.
Vector<const char> vector_message(error_message, StrLength(error_message));
- Handle<String> str = Factory::NewStringFromAscii(vector_message);
- SetElement(result, len, str);
+ Handle<String> str = FACTORY->NewStringFromAscii(vector_message);
+ SetElementNonStrict(result, len, str);
}
return result;
}
-LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
- if (active_function_info_listener != NULL) {
- active_function_info_listener->FunctionStarted(fun);
+LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
+ FunctionLiteral* fun)
+ : isolate_(isolate) {
+ if (isolate_->active_function_info_listener() != NULL) {
+ isolate_->active_function_info_listener()->FunctionStarted(fun);
}
}
LiveEditFunctionTracker::~LiveEditFunctionTracker() {
- if (active_function_info_listener != NULL) {
- active_function_info_listener->FunctionDone();
+ if (isolate_->active_function_info_listener() != NULL) {
+ isolate_->active_function_info_listener()->FunctionDone();
}
}
void LiveEditFunctionTracker::RecordFunctionInfo(
Handle<SharedFunctionInfo> info, FunctionLiteral* lit) {
- if (active_function_info_listener != NULL) {
- active_function_info_listener->FunctionInfo(info, lit->scope());
+ if (isolate_->active_function_info_listener() != NULL) {
+ isolate_->active_function_info_listener()->FunctionInfo(info, lit->scope());
}
}
void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
- active_function_info_listener->FunctionCode(code);
+ isolate_->active_function_info_listener()->FunctionCode(code);
}
-bool LiveEditFunctionTracker::IsActive() {
- return active_function_info_listener != NULL;
+bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
+ return isolate->active_function_info_listener() != NULL;
}
@@ -1622,7 +1758,8 @@ bool LiveEditFunctionTracker::IsActive() {
// This ifdef-else-endif section provides working or stub implementation of
// LiveEditFunctionTracker.
-LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
+LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
+ FunctionLiteral* fun) {
}
@@ -1639,7 +1776,7 @@ void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
}
-bool LiveEditFunctionTracker::IsActive() {
+bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
return false;
}
diff --git a/deps/v8/src/liveedit.h b/deps/v8/src/liveedit.h
index 5f2c99c3d..4ee446612 100644
--- a/deps/v8/src/liveedit.h
+++ b/deps/v8/src/liveedit.h
@@ -49,6 +49,7 @@
// instantiate newly compiled functions.
+#include "allocation.h"
#include "compiler.h"
namespace v8 {
@@ -65,13 +66,18 @@ namespace internal {
// also collects compiled function codes.
class LiveEditFunctionTracker {
public:
- explicit LiveEditFunctionTracker(FunctionLiteral* fun);
+ explicit LiveEditFunctionTracker(Isolate* isolate, FunctionLiteral* fun);
~LiveEditFunctionTracker();
void RecordFunctionInfo(Handle<SharedFunctionInfo> info,
FunctionLiteral* lit);
void RecordRootFunctionInfo(Handle<Code> code);
- static bool IsActive();
+ static bool IsActive(Isolate* isolate);
+
+ private:
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Isolate* isolate_;
+#endif
};
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -137,14 +143,13 @@ class LiveEdit : AllStatic {
// A general-purpose comparator between 2 arrays.
class Comparator {
public:
-
// Holds 2 arrays of some elements allowing to compare any pair of
// element from the first array and element from the second array.
class Input {
public:
- virtual int getLength1() = 0;
- virtual int getLength2() = 0;
- virtual bool equals(int index1, int index2) = 0;
+ virtual int GetLength1() = 0;
+ virtual int GetLength2() = 0;
+ virtual bool Equals(int index1, int index2) = 0;
protected:
virtual ~Input() {}
diff --git a/deps/v8/src/liveobjectlist.cc b/deps/v8/src/liveobjectlist.cc
index cd6fcf99e..4a9034c48 100644
--- a/deps/v8/src/liveobjectlist.cc
+++ b/deps/v8/src/liveobjectlist.cc
@@ -118,7 +118,7 @@ typedef int (*RawComparer)(const void*, const void*);
v(Code, "meta: Code") \
v(Map, "meta: Map") \
v(Oddball, "Oddball") \
- v(Proxy, "meta: Proxy") \
+ v(Foreign, "meta: Foreign") \
v(SharedFunctionInfo, "meta: SharedFunctionInfo") \
v(Struct, "meta: Struct") \
\
@@ -592,16 +592,22 @@ static bool AddObjDetail(Handle<FixedArray> arr,
return false;
}
- { MaybeObject* maybe_result =
- detail->SetProperty(*id_sym, Smi::FromInt(obj_id), NONE);
+ { MaybeObject* maybe_result = detail->SetProperty(*id_sym,
+ Smi::FromInt(obj_id),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return false;
}
- { MaybeObject* maybe_result =
- detail->SetProperty(*desc_sym, *desc, NONE);
+ { MaybeObject* maybe_result = detail->SetProperty(*desc_sym,
+ *desc,
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return false;
}
- { MaybeObject* maybe_result =
- detail->SetProperty(*size_sym, Smi::FromInt(size), NONE);
+ { MaybeObject* maybe_result = detail->SetProperty(*size_sym,
+ Smi::FromInt(size),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return false;
}
@@ -1140,16 +1146,22 @@ MaybeObject* LiveObjectList::Capture() {
Handle<JSObject> result = Factory::NewJSObject(Top::object_function());
if (result->IsFailure()) return Object::cast(*result);
- { MaybeObject* maybe_result =
- result->SetProperty(*id_sym, Smi::FromInt(lol->id()), NONE);
+ { MaybeObject* maybe_result = result->SetProperty(*id_sym,
+ Smi::FromInt(lol->id()),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
}
- { MaybeObject* maybe_result =
- result->SetProperty(*count_sym, Smi::FromInt(total_count), NONE);
+ { MaybeObject* maybe_result = result->SetProperty(*count_sym,
+ Smi::FromInt(total_count),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
}
- { MaybeObject* maybe_result =
- result->SetProperty(*size_sym, Smi::FromInt(size), NONE);
+ { MaybeObject* maybe_result = result->SetProperty(*size_sym,
+ Smi::FromInt(size),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
}
@@ -1285,19 +1297,28 @@ MaybeObject* LiveObjectList::DumpPrivate(DumpWriter* writer,
// Set the updated body.count.
Handle<String> count_sym = Factory::LookupAsciiSymbol("count");
- maybe_result = body->SetProperty(*count_sym, Smi::FromInt(count), NONE);
+ maybe_result = body->SetProperty(*count_sym,
+ Smi::FromInt(count),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
// Set the updated body.size if appropriate.
if (size >= 0) {
Handle<String> size_sym = Factory::LookupAsciiSymbol("size");
- maybe_result = body->SetProperty(*size_sym, Smi::FromInt(size), NONE);
+ maybe_result = body->SetProperty(*size_sym,
+ Smi::FromInt(size),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
}
// Set body.first_index.
Handle<String> first_sym = Factory::LookupAsciiSymbol("first_index");
- maybe_result = body->SetProperty(*first_sym, Smi::FromInt(start), NONE);
+ maybe_result = body->SetProperty(*first_sym,
+ Smi::FromInt(start),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
// Allocate the JSArray of the elements.
@@ -1307,7 +1328,10 @@ MaybeObject* LiveObjectList::DumpPrivate(DumpWriter* writer,
// Set body.elements.
Handle<String> elements_sym = Factory::LookupAsciiSymbol("elements");
- maybe_result = body->SetProperty(*elements_sym, *elements, NONE);
+ maybe_result = body->SetProperty(*elements_sym,
+ *elements,
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
return *body;
@@ -1399,11 +1423,20 @@ MaybeObject* LiveObjectList::SummarizePrivate(SummaryWriter* writer,
Handle<String> desc = Factory::LookupAsciiSymbol(desc_cstr);
int size = summary.Size(type);
- maybe_result = detail->SetProperty(*desc_sym, *desc, NONE);
+ maybe_result = detail->SetProperty(*desc_sym,
+ *desc,
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*count_sym, Smi::FromInt(count), NONE);
+ maybe_result = detail->SetProperty(*count_sym,
+ Smi::FromInt(count),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*size_sym, Smi::FromInt(size), NONE);
+ maybe_result = detail->SetProperty(*size_sym,
+ Smi::FromInt(size),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
summary_arr->set(idx++, *detail);
@@ -1422,11 +1455,16 @@ MaybeObject* LiveObjectList::SummarizePrivate(SummaryWriter* writer,
// Fill out the body object.
int total_count = summary.total_count();
int total_size = summary.total_size();
- maybe_result =
- body->SetProperty(*count_sym, Smi::FromInt(total_count), NONE);
+ maybe_result = body->SetProperty(*count_sym,
+ Smi::FromInt(total_count),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = body->SetProperty(*size_sym, Smi::FromInt(total_size), NONE);
+ maybe_result = body->SetProperty(*size_sym,
+ Smi::FromInt(total_size),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
if (is_tracking_roots) {
@@ -1435,15 +1473,22 @@ MaybeObject* LiveObjectList::SummarizePrivate(SummaryWriter* writer,
Handle<String> root_sym = Factory::LookupAsciiSymbol("found_root");
Handle<String> weak_root_sym =
Factory::LookupAsciiSymbol("found_weak_root");
- maybe_result =
- body->SetProperty(*root_sym, Smi::FromInt(found_root), NONE);
+ maybe_result = body->SetProperty(*root_sym,
+ Smi::FromInt(found_root),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
- maybe_result =
- body->SetProperty(*weak_root_sym, Smi::FromInt(found_weak_root), NONE);
+ maybe_result = body->SetProperty(*weak_root_sym,
+ Smi::FromInt(found_weak_root),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
}
- maybe_result = body->SetProperty(*summary_sym, *summary_obj, NONE);
+ maybe_result = body->SetProperty(*summary_sym,
+ *summary_obj,
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
return *body;
@@ -1501,13 +1546,20 @@ MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) {
Handle<JSObject> detail = Factory::NewJSObject(Top::object_function());
if (detail->IsFailure()) return Object::cast(*detail);
- maybe_result =
- detail->SetProperty(*id_sym, Smi::FromInt(lol->id()), NONE);
+ maybe_result = detail->SetProperty(*id_sym,
+ Smi::FromInt(lol->id()),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
- maybe_result =
- detail->SetProperty(*count_sym, Smi::FromInt(count), NONE);
+ maybe_result = detail->SetProperty(*count_sym,
+ Smi::FromInt(count),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*size_sym, Smi::FromInt(size), NONE);
+ maybe_result = detail->SetProperty(*size_sym,
+ Smi::FromInt(size),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
list->set(idx++, *detail);
dump_limit--;
@@ -1522,17 +1574,24 @@ MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) {
Handle<JSObject> result = Factory::NewJSObject(Top::object_function());
if (result->IsFailure()) return Object::cast(*result);
- maybe_result =
- result->SetProperty(*count_sym, Smi::FromInt(total_count), NONE);
+ maybe_result = result->SetProperty(*count_sym,
+ Smi::FromInt(total_count),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
Handle<String> first_sym = Factory::LookupAsciiSymbol("first_index");
- maybe_result =
- result->SetProperty(*first_sym, Smi::FromInt(start_idx), NONE);
+ maybe_result = result->SetProperty(*first_sym,
+ Smi::FromInt(start_idx),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
Handle<String> lists_sym = Factory::LookupAsciiSymbol("lists");
- maybe_result = result->SetProperty(*lists_sym, *lols, NONE);
+ maybe_result = result->SetProperty(*lists_sym,
+ *lols,
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
return *result;
@@ -1590,7 +1649,6 @@ Object* LiveObjectList::GetObjId(Handle<String> address) {
// Helper class for copying HeapObjects.
class LolVisitor: public ObjectVisitor {
public:
-
LolVisitor(HeapObject* target, Handle<HeapObject> handle_to_skip)
: target_(target), handle_to_skip_(handle_to_skip), found_(false) {}
@@ -1880,7 +1938,10 @@ MaybeObject* LiveObjectList::GetObjRetainers(int obj_id,
// Set body.id.
Handle<JSObject> body = Handle<JSObject>(JSObject::cast(body_obj));
Handle<String> id_sym = Factory::LookupAsciiSymbol("id");
- maybe_result = body->SetProperty(*id_sym, Smi::FromInt(obj_id), NONE);
+ maybe_result = body->SetProperty(*id_sym,
+ Smi::FromInt(obj_id),
+ NONE,
+ kNonStrictMode);
if (maybe_result->IsFailure()) return maybe_result;
return *body;
diff --git a/deps/v8/src/liveobjectlist.h b/deps/v8/src/liveobjectlist.h
index 423f8f0d7..23e418d6d 100644
--- a/deps/v8/src/liveobjectlist.h
+++ b/deps/v8/src/liveobjectlist.h
@@ -273,28 +273,28 @@ class LiveObjectList {
inline static void ProcessNonLive(HeapObject* obj) {}
inline static void UpdateReferencesForScavengeGC() {}
- inline static MaybeObject* Capture() { return Heap::undefined_value(); }
+ inline static MaybeObject* Capture() { return HEAP->undefined_value(); }
inline static bool Delete(int id) { return false; }
inline static MaybeObject* Dump(int id1,
int id2,
int start_idx,
int dump_limit,
Handle<JSObject> filter_obj) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
inline static MaybeObject* Info(int start_idx, int dump_limit) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
inline static MaybeObject* Summarize(int id1,
int id2,
Handle<JSObject> filter_obj) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
inline static void Reset() {}
- inline static Object* GetObj(int obj_id) { return Heap::undefined_value(); }
+ inline static Object* GetObj(int obj_id) { return HEAP->undefined_value(); }
inline static Object* GetObjId(Handle<String> address) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
inline static MaybeObject* GetObjRetainers(int obj_id,
Handle<JSObject> instance_filter,
@@ -302,15 +302,15 @@ class LiveObjectList {
int start,
int count,
Handle<JSObject> filter_obj) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
inline static Object* GetPath(int obj_id1,
int obj_id2,
Handle<JSObject> instance_filter) {
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
- inline static Object* PrintObj(int obj_id) { return Heap::undefined_value(); }
+ inline static Object* PrintObj(int obj_id) { return HEAP->undefined_value(); }
};
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 9a498ec0f..1bba7cd54 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "log-utils.h"
+#include "string-stream.h"
namespace v8 {
namespace internal {
@@ -118,50 +119,141 @@ int LogDynamicBuffer::WriteInternal(const char* data, int data_size) {
return data_size;
}
-
-bool Log::is_stopped_ = false;
-Log::WritePtr Log::Write = NULL;
-FILE* Log::output_handle_ = NULL;
-FILE* Log::output_code_handle_ = NULL;
-LogDynamicBuffer* Log::output_buffer_ = NULL;
// Must be the same message as in Logger::PauseProfiler.
-const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
-Mutex* Log::mutex_ = NULL;
-char* Log::message_buffer_ = NULL;
+const char* const Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
+
+Log::Log(Logger* logger)
+ : write_to_file_(false),
+ is_stopped_(false),
+ output_handle_(NULL),
+ ll_output_handle_(NULL),
+ output_buffer_(NULL),
+ mutex_(NULL),
+ message_buffer_(NULL),
+ logger_(logger) {
+}
+
+
+static void AddIsolateIdIfNeeded(StringStream* stream) {
+ Isolate* isolate = Isolate::Current();
+ if (isolate->IsDefaultIsolate()) return;
+ stream->Add("isolate-%p-", isolate);
+}
-void Log::Init() {
+void Log::Initialize() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
mutex_ = OS::CreateMutex();
message_buffer_ = NewArray<char>(kMessageBufferSize);
+
+ // --log-all enables all the log flags.
+ if (FLAG_log_all) {
+ FLAG_log_runtime = true;
+ FLAG_log_api = true;
+ FLAG_log_code = true;
+ FLAG_log_gc = true;
+ FLAG_log_suspect = true;
+ FLAG_log_handles = true;
+ FLAG_log_regexp = true;
+ }
+
+ // --prof implies --log-code.
+ if (FLAG_prof) FLAG_log_code = true;
+
+ // --prof_lazy controls --log-code, implies --noprof_auto.
+ if (FLAG_prof_lazy) {
+ FLAG_log_code = false;
+ FLAG_prof_auto = false;
+ }
+
+ bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
+ || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
+ || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
+
+ bool open_log_file = start_logging || FLAG_prof_lazy;
+
+ // If we're logging anything, we need to open the log file.
+ if (open_log_file) {
+ if (strcmp(FLAG_logfile, "-") == 0) {
+ OpenStdout();
+ } else if (strcmp(FLAG_logfile, "*") == 0) {
+ OpenMemoryBuffer();
+ } else {
+ if (strchr(FLAG_logfile, '%') != NULL ||
+ !Isolate::Current()->IsDefaultIsolate()) {
+ // If there's a '%' in the log file name we have to expand
+ // placeholders.
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ AddIsolateIdIfNeeded(&stream);
+ for (const char* p = FLAG_logfile; *p; p++) {
+ if (*p == '%') {
+ p++;
+ switch (*p) {
+ case '\0':
+ // If there's a % at the end of the string we back up
+ // one character so we can escape the loop properly.
+ p--;
+ break;
+ case 't': {
+ // %t expands to the current time in milliseconds.
+ double time = OS::TimeCurrentMillis();
+ stream.Add("%.0f", FmtElm(time));
+ break;
+ }
+ case '%':
+ // %% expands (contracts really) to %.
+ stream.Put('%');
+ break;
+ default:
+ // All other %'s expand to themselves.
+ stream.Put('%');
+ stream.Put(*p);
+ break;
+ }
+ } else {
+ stream.Put(*p);
+ }
+ }
+ SmartPointer<const char> expanded = stream.ToCString();
+ OpenFile(*expanded);
+ } else {
+ OpenFile(FLAG_logfile);
+ }
+ }
+ }
+#endif
}
void Log::OpenStdout() {
ASSERT(!IsEnabled());
output_handle_ = stdout;
- Write = WriteToFile;
- Init();
+ write_to_file_ = true;
}
-static const char kCodeLogExt[] = ".code";
+// Extension added to V8 log file name to get the low-level log name.
+static const char kLowLevelLogExt[] = ".ll";
+
+// File buffer size of the low-level log. We don't use the default to
+// minimize the associated overhead.
+static const int kLowLevelLogBufferSize = 2 * MB;
void Log::OpenFile(const char* name) {
ASSERT(!IsEnabled());
output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
+ write_to_file_ = true;
if (FLAG_ll_prof) {
- // Open a file for logging the contents of code objects so that
- // they can be disassembled later.
- size_t name_len = strlen(name);
- ScopedVector<char> code_name(
- static_cast<int>(name_len + sizeof(kCodeLogExt)));
- memcpy(code_name.start(), name, name_len);
- memcpy(code_name.start() + name_len, kCodeLogExt, sizeof(kCodeLogExt));
- output_code_handle_ = OS::FOpen(code_name.start(), OS::LogFileOpenMode);
+ // Open the low-level log file.
+ size_t len = strlen(name);
+ ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLowLevelLogExt)));
+ memcpy(ll_name.start(), name, len);
+ memcpy(ll_name.start() + len, kLowLevelLogExt, sizeof(kLowLevelLogExt));
+ ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
+ setvbuf(ll_output_handle_, NULL, _IOFBF, kLowLevelLogBufferSize);
}
- Write = WriteToFile;
- Init();
}
@@ -170,24 +262,20 @@ void Log::OpenMemoryBuffer() {
output_buffer_ = new LogDynamicBuffer(
kDynamicBufferBlockSize, kMaxDynamicBufferSize,
kDynamicBufferSeal, StrLength(kDynamicBufferSeal));
- Write = WriteToMemory;
- Init();
+ write_to_file_ = false;
}
void Log::Close() {
- if (Write == WriteToFile) {
+ if (write_to_file_) {
if (output_handle_ != NULL) fclose(output_handle_);
output_handle_ = NULL;
- if (output_code_handle_ != NULL) fclose(output_code_handle_);
- output_code_handle_ = NULL;
- } else if (Write == WriteToMemory) {
+ if (ll_output_handle_ != NULL) fclose(ll_output_handle_);
+ ll_output_handle_ = NULL;
+ } else {
delete output_buffer_;
output_buffer_ = NULL;
- } else {
- ASSERT(Write == NULL);
}
- Write = NULL;
DeleteArray(message_buffer_);
message_buffer_ = NULL;
@@ -200,7 +288,7 @@ void Log::Close() {
int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
- if (Write != WriteToMemory) return 0;
+ if (write_to_file_) return 0;
ASSERT(output_buffer_ != NULL);
ASSERT(from_pos >= 0);
ASSERT(max_size >= 0);
@@ -220,17 +308,16 @@ int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
}
-LogMessageBuilder::WriteFailureHandler
- LogMessageBuilder::write_failure_handler = NULL;
-
-
-LogMessageBuilder::LogMessageBuilder(): sl(Log::mutex_), pos_(0) {
- ASSERT(Log::message_buffer_ != NULL);
+LogMessageBuilder::LogMessageBuilder(Logger* logger)
+ : log_(logger->log_),
+ sl(log_->mutex_),
+ pos_(0) {
+ ASSERT(log_->message_buffer_ != NULL);
}
void LogMessageBuilder::Append(const char* format, ...) {
- Vector<char> buf(Log::message_buffer_ + pos_,
+ Vector<char> buf(log_->message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
va_list args;
va_start(args, format);
@@ -241,7 +328,7 @@ void LogMessageBuilder::Append(const char* format, ...) {
void LogMessageBuilder::AppendVA(const char* format, va_list args) {
- Vector<char> buf(Log::message_buffer_ + pos_,
+ Vector<char> buf(log_->message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
int result = v8::internal::OS::VSNPrintF(buf, format, args);
@@ -257,7 +344,7 @@ void LogMessageBuilder::AppendVA(const char* format, va_list args) {
void LogMessageBuilder::Append(const char c) {
if (pos_ < Log::kMessageBufferSize) {
- Log::message_buffer_[pos_++] = c;
+ log_->message_buffer_[pos_++] = c;
}
ASSERT(pos_ <= Log::kMessageBufferSize);
}
@@ -278,6 +365,7 @@ void LogMessageBuilder::AppendAddress(Address addr) {
void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
+ if (str == NULL) return;
AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
int len = str->length();
if (len > 0x1000)
@@ -315,7 +403,7 @@ void LogMessageBuilder::AppendStringPart(const char* str, int len) {
ASSERT(len >= 0);
if (len == 0) return;
}
- Vector<char> buf(Log::message_buffer_ + pos_,
+ Vector<char> buf(log_->message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
OS::StrNCpy(buf, str, len);
pos_ += len;
@@ -325,12 +413,16 @@ void LogMessageBuilder::AppendStringPart(const char* str, int len) {
void LogMessageBuilder::WriteToLogFile() {
ASSERT(pos_ <= Log::kMessageBufferSize);
- const int written = Log::Write(Log::message_buffer_, pos_);
- if (written != pos_ && write_failure_handler != NULL) {
- write_failure_handler();
+ const int written = log_->write_to_file_ ?
+ log_->WriteToFile(log_->message_buffer_, pos_) :
+ log_->WriteToMemory(log_->message_buffer_, pos_);
+ if (written != pos_) {
+ log_->stop();
+ log_->logger_->LogFailure();
}
}
+
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 719d37030..81bbf779f 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -28,11 +28,15 @@
#ifndef V8_LOG_UTILS_H_
#define V8_LOG_UTILS_H_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
+class Logger;
+
// A memory buffer that increments its size as you write in it. Size
// is incremented with 'block_size' steps, never exceeding 'max_size'.
// During growth, memory contents are never copied. At the end of the
@@ -89,28 +93,23 @@ class LogDynamicBuffer {
// Functions and data for performing output of log messages.
-class Log : public AllStatic {
+class Log {
public:
- // Opens stdout for logging.
- static void OpenStdout();
-
- // Opens file for logging.
- static void OpenFile(const char* name);
- // Opens memory buffer for logging.
- static void OpenMemoryBuffer();
+ // Performs process-wide initialization.
+ void Initialize();
// Disables logging, but preserves acquired resources.
- static void stop() { is_stopped_ = true; }
+ void stop() { is_stopped_ = true; }
- // Frees all resources acquired in Open... functions.
- static void Close();
+ // Frees all resources acquired in Initialize and Open... functions.
+ void Close();
// See description in include/v8.h.
- static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+ int GetLogLines(int from_pos, char* dest_buf, int max_size);
// Returns whether logging is enabled.
- static bool IsEnabled() {
+ bool IsEnabled() {
return !is_stopped_ && (output_handle_ != NULL || output_buffer_ != NULL);
}
@@ -118,16 +117,19 @@ class Log : public AllStatic {
static const int kMessageBufferSize = v8::V8::kMinimumSizeForLogLinesBuffer;
private:
- typedef int (*WritePtr)(const char* msg, int length);
+ explicit Log(Logger* logger);
- // Initialization function called from Open... functions.
- static void Init();
+ // Opens stdout for logging.
+ void OpenStdout();
- // Write functions assume that mutex_ is acquired by the caller.
- static WritePtr Write;
+ // Opens file for logging.
+ void OpenFile(const char* name);
+
+ // Opens memory buffer for logging.
+ void OpenMemoryBuffer();
// Implementation of writing to a log file.
- static int WriteToFile(const char* msg, int length) {
+ int WriteToFile(const char* msg, int length) {
ASSERT(output_handle_ != NULL);
size_t rv = fwrite(msg, 1, length, output_handle_);
ASSERT(static_cast<size_t>(length) == rv);
@@ -137,25 +139,27 @@ class Log : public AllStatic {
}
// Implementation of writing to a memory buffer.
- static int WriteToMemory(const char* msg, int length) {
+ int WriteToMemory(const char* msg, int length) {
ASSERT(output_buffer_ != NULL);
return output_buffer_->Write(msg, length);
}
+ bool write_to_file_;
+
// Whether logging is stopped (e.g. due to insufficient resources).
- static bool is_stopped_;
+ bool is_stopped_;
// When logging is active, either output_handle_ or output_buffer_ is used
// to store a pointer to log destination. If logging was opened via OpenStdout
// or OpenFile, then output_handle_ is used. If logging was opened
// via OpenMemoryBuffer, then output_buffer_ is used.
// mutex_ should be acquired before using output_handle_ or output_buffer_.
- static FILE* output_handle_;
+ FILE* output_handle_;
- // Used when low-level profiling is active to save code object contents.
- static FILE* output_code_handle_;
+ // Used when low-level profiling is active.
+ FILE* ll_output_handle_;
- static LogDynamicBuffer* output_buffer_;
+ LogDynamicBuffer* output_buffer_;
// Size of dynamic buffer block (and dynamic buffer initial size).
static const int kDynamicBufferBlockSize = 65536;
@@ -164,15 +168,17 @@ class Log : public AllStatic {
static const int kMaxDynamicBufferSize = 50 * 1024 * 1024;
// Message to "seal" dynamic buffer with.
- static const char* kDynamicBufferSeal;
+ static const char* const kDynamicBufferSeal;
// mutex_ is a Mutex used for enforcing exclusive
// access to the formatting buffer and the log file or log memory buffer.
- static Mutex* mutex_;
+ Mutex* mutex_;
// Buffer used for formatting log messages. This is a singleton buffer and
// mutex_ should be acquired before using it.
- static char* message_buffer_;
+ char* message_buffer_;
+
+ Logger* logger_;
friend class Logger;
friend class LogMessageBuilder;
@@ -185,7 +191,7 @@ class LogMessageBuilder BASE_EMBEDDED {
public:
// Create a message builder starting from position 0. This acquires the mutex
// in the log as well.
- explicit LogMessageBuilder();
+ explicit LogMessageBuilder(Logger* logger);
~LogMessageBuilder() { }
// Append string data to the log message.
@@ -211,16 +217,9 @@ class LogMessageBuilder BASE_EMBEDDED {
// Write the log message to the log file currently opened.
void WriteToLogFile();
- // A handler that is called when Log::Write fails.
- typedef void (*WriteFailureHandler)();
-
- static void set_write_failure_handler(WriteFailureHandler handler) {
- write_failure_handler = handler;
- }
-
private:
- static WriteFailureHandler write_failure_handler;
+ Log* log_;
ScopedLock sl;
int pos_;
};
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 16aeadb0b..7c85c8a26 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -52,24 +52,25 @@ namespace internal {
//
class SlidingStateWindow {
public:
- SlidingStateWindow();
+ explicit SlidingStateWindow(Isolate* isolate);
~SlidingStateWindow();
void AddState(StateTag state);
private:
static const int kBufferSize = 256;
+ Counters* counters_;
int current_index_;
bool is_full_;
byte buffer_[kBufferSize];
void IncrementStateCounter(StateTag state) {
- Counters::state_counters[state].Increment();
+ counters_->state_counters(state)->Increment();
}
void DecrementStateCounter(StateTag state) {
- Counters::state_counters[state].Decrement();
+ counters_->state_counters(state)->Decrement();
}
};
@@ -82,7 +83,7 @@ class SlidingStateWindow {
//
class Profiler: public Thread {
public:
- Profiler();
+ explicit Profiler(Isolate* isolate);
void Engage();
void Disengage();
@@ -113,14 +114,15 @@ class Profiler: public Thread {
void Run();
// Pause and Resume TickSample data collection.
- static bool paused() { return paused_; }
- static void pause() { paused_ = true; }
- static void resume() { paused_ = false; }
+ bool paused() const { return paused_; }
+ void pause() { paused_ = true; }
+ void resume() { paused_ = false; }
private:
// Returns the next index in the cyclic buffer.
int Succ(int index) { return (index + 1) % kBufferSize; }
+ Isolate* isolate_;
// Cyclic buffer for communicating profiling samples
// between the signal handler and the worker thread.
static const int kBufferSize = 128;
@@ -137,43 +139,41 @@ class Profiler: public Thread {
bool running_;
// Tells whether we are currently recording tick samples.
- static bool paused_;
+ bool paused_;
};
-bool Profiler::paused_ = false;
-
//
// StackTracer implementation
//
-void StackTracer::Trace(TickSample* sample) {
- sample->tos = NULL;
- sample->frames_count = 0;
+void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
+ ASSERT(isolate->IsInitialized());
// Avoid collecting traces while doing GC.
if (sample->state == GC) return;
- const Address js_entry_sp = Top::js_entry_sp(Top::GetCurrentThread());
+ const Address js_entry_sp =
+ Isolate::js_entry_sp(isolate->thread_local_top());
if (js_entry_sp == 0) {
// Not executing JS now.
return;
}
- // Sample potential return address value for frameless invocation of
- // stubs (we'll figure out later, if this value makes sense).
- sample->tos = Memory::Address_at(sample->sp);
-
- int i = 0;
- const Address callback = Top::external_callback();
- // Surprisingly, PC can point _exactly_ to callback start, with good
- // probability, and this will result in reporting fake nested
- // callback call.
- if (callback != NULL && callback != sample->pc) {
- sample->stack[i++] = callback;
+ const Address callback = isolate->external_callback();
+ if (callback != NULL) {
+ sample->external_callback = callback;
+ sample->has_external_callback = true;
+ } else {
+ // Sample potential return address value for frameless invocation of
+ // stubs (we'll figure out later, if this value makes sense).
+ sample->tos = Memory::Address_at(sample->sp);
+ sample->has_external_callback = false;
}
- SafeStackTraceFrameIterator it(sample->fp, sample->sp,
+ SafeStackTraceFrameIterator it(isolate,
+ sample->fp, sample->sp,
sample->sp, js_entry_sp);
+ int i = 0;
while (!it.done() && i < TickSample::kMaxFramesCount) {
sample->stack[i++] = it.frame()->pc();
it.Advance();
@@ -188,8 +188,8 @@ void StackTracer::Trace(TickSample* sample) {
//
class Ticker: public Sampler {
public:
- explicit Ticker(int interval) :
- Sampler(interval),
+ Ticker(Isolate* isolate, int interval):
+ Sampler(isolate, interval),
window_(NULL),
profiler_(NULL) {}
@@ -225,7 +225,7 @@ class Ticker: public Sampler {
protected:
virtual void DoSampleStack(TickSample* sample) {
- StackTracer::Trace(sample);
+ StackTracer::Trace(isolate(), sample);
}
private:
@@ -237,16 +237,17 @@ class Ticker: public Sampler {
//
// SlidingStateWindow implementation.
//
-SlidingStateWindow::SlidingStateWindow(): current_index_(0), is_full_(false) {
+SlidingStateWindow::SlidingStateWindow(Isolate* isolate)
+ : counters_(isolate->counters()), current_index_(0), is_full_(false) {
for (int i = 0; i < kBufferSize; i++) {
buffer_[i] = static_cast<byte>(OTHER);
}
- Logger::ticker_->SetWindow(this);
+ isolate->logger()->ticker_->SetWindow(this);
}
SlidingStateWindow::~SlidingStateWindow() {
- Logger::ticker_->ClearWindow();
+ LOGGER->ticker_->ClearWindow();
}
@@ -266,14 +267,16 @@ void SlidingStateWindow::AddState(StateTag state) {
//
// Profiler implementation.
//
-Profiler::Profiler()
+Profiler::Profiler(Isolate* isolate)
: Thread("v8:Profiler"),
+ isolate_(isolate),
head_(0),
tail_(0),
overflow_(false),
buffer_semaphore_(OS::CreateSemaphore(0)),
engaged_(false),
- running_(false) {
+ running_(false),
+ paused_(false) {
}
@@ -292,9 +295,9 @@ void Profiler::Engage() {
Start();
// Register to get ticks.
- Logger::ticker_->SetProfiler(this);
+ LOGGER->ticker_->SetProfiler(this);
- Logger::ProfilerBeginEvent();
+ LOGGER->ProfilerBeginEvent();
}
@@ -302,7 +305,7 @@ void Profiler::Disengage() {
if (!engaged_) return;
// Stop receiving ticks.
- Logger::ticker_->ClearProfiler();
+ LOGGER->ticker_->ClearProfiler();
// Terminate the worker thread by setting running_ to false,
// inserting a fake element in the queue and then wait for
@@ -314,7 +317,7 @@ void Profiler::Disengage() {
Insert(&sample);
Join();
- LOG(UncheckedStringEvent("profiler", "end"));
+ LOG(ISOLATE, UncheckedStringEvent("profiler", "end"));
}
@@ -322,32 +325,231 @@ void Profiler::Run() {
TickSample sample;
bool overflow = Remove(&sample);
while (running_) {
- LOG(TickEvent(&sample, overflow));
+ LOG(isolate_, TickEvent(&sample, overflow));
overflow = Remove(&sample);
}
}
+// Low-level profiling event structures.
+
+struct LowLevelCodeCreateStruct {
+ static const char kTag = 'C';
+
+ int32_t name_size;
+ Address code_address;
+ int32_t code_size;
+};
+
+
+struct LowLevelCodeMoveStruct {
+ static const char kTag = 'M';
+
+ Address from_address;
+ Address to_address;
+};
+
+
+struct LowLevelCodeDeleteStruct {
+ static const char kTag = 'D';
+
+ Address address;
+};
+
+
+struct LowLevelSnapshotPositionStruct {
+ static const char kTag = 'P';
+
+ Address address;
+ int32_t position;
+};
+
+
+static const char kCodeMovingGCTag = 'G';
+
+
//
// Logger class implementation.
//
-Ticker* Logger::ticker_ = NULL;
-Profiler* Logger::profiler_ = NULL;
-SlidingStateWindow* Logger::sliding_state_window_ = NULL;
-int Logger::logging_nesting_ = 0;
-int Logger::cpu_profiler_nesting_ = 0;
-int Logger::heap_profiler_nesting_ = 0;
+
+class Logger::NameMap {
+ public:
+ NameMap() : impl_(&PointerEquals) {}
+
+ ~NameMap() {
+ for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
+ DeleteArray(static_cast<const char*>(p->value));
+ }
+ }
+
+ void Insert(Address code_address, const char* name, int name_size) {
+ HashMap::Entry* entry = FindOrCreateEntry(code_address);
+ if (entry->value == NULL) {
+ entry->value = CopyName(name, name_size);
+ }
+ }
+
+ const char* Lookup(Address code_address) {
+ HashMap::Entry* entry = FindEntry(code_address);
+ return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
+ }
+
+ void Remove(Address code_address) {
+ HashMap::Entry* entry = FindEntry(code_address);
+ if (entry != NULL) {
+ DeleteArray(static_cast<char*>(entry->value));
+ RemoveEntry(entry);
+ }
+ }
+
+ void Move(Address from, Address to) {
+ if (from == to) return;
+ HashMap::Entry* from_entry = FindEntry(from);
+ ASSERT(from_entry != NULL);
+ void* value = from_entry->value;
+ RemoveEntry(from_entry);
+ HashMap::Entry* to_entry = FindOrCreateEntry(to);
+ ASSERT(to_entry->value == NULL);
+ to_entry->value = value;
+ }
+
+ private:
+ static bool PointerEquals(void* lhs, void* rhs) {
+ return lhs == rhs;
+ }
+
+ static char* CopyName(const char* name, int name_size) {
+ char* result = NewArray<char>(name_size + 1);
+ for (int i = 0; i < name_size; ++i) {
+ char c = name[i];
+ if (c == '\0') c = ' ';
+ result[i] = c;
+ }
+ result[name_size] = '\0';
+ return result;
+ }
+
+ HashMap::Entry* FindOrCreateEntry(Address code_address) {
+ return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
+ }
+
+ HashMap::Entry* FindEntry(Address code_address) {
+ return impl_.Lookup(code_address, ComputePointerHash(code_address), false);
+ }
+
+ void RemoveEntry(HashMap::Entry* entry) {
+ impl_.Remove(entry->key, entry->hash);
+ }
+
+ HashMap impl_;
+
+ DISALLOW_COPY_AND_ASSIGN(NameMap);
+};
+
+
+class Logger::NameBuffer {
+ public:
+ NameBuffer() { Reset(); }
+
+ void Reset() {
+ utf8_pos_ = 0;
+ }
+
+ void AppendString(String* str) {
+ if (str == NULL) return;
+ if (str->HasOnlyAsciiChars()) {
+ int utf8_length = Min(str->length(), kUtf8BufferSize - utf8_pos_);
+ String::WriteToFlat(str, utf8_buffer_ + utf8_pos_, 0, utf8_length);
+ utf8_pos_ += utf8_length;
+ return;
+ }
+ int uc16_length = Min(str->length(), kUc16BufferSize);
+ String::WriteToFlat(str, uc16_buffer_, 0, uc16_length);
+ for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
+ uc16 c = uc16_buffer_[i];
+ if (c <= String::kMaxAsciiCharCodeU) {
+ utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
+ } else {
+ int char_length = unibrow::Utf8::Length(c);
+ if (utf8_pos_ + char_length > kUtf8BufferSize) break;
+ unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c);
+ utf8_pos_ += char_length;
+ }
+ }
+ }
+
+ void AppendBytes(const char* bytes, int size) {
+ size = Min(size, kUtf8BufferSize - utf8_pos_);
+ memcpy(utf8_buffer_ + utf8_pos_, bytes, size);
+ utf8_pos_ += size;
+ }
+
+ void AppendBytes(const char* bytes) {
+ AppendBytes(bytes, StrLength(bytes));
+ }
+
+ void AppendByte(char c) {
+ if (utf8_pos_ >= kUtf8BufferSize) return;
+ utf8_buffer_[utf8_pos_++] = c;
+ }
+
+ void AppendInt(int n) {
+ Vector<char> buffer(utf8_buffer_ + utf8_pos_, kUtf8BufferSize - utf8_pos_);
+ int size = OS::SNPrintF(buffer, "%d", n);
+ if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
+ utf8_pos_ += size;
+ }
+ }
+
+ const char* get() { return utf8_buffer_; }
+ int size() const { return utf8_pos_; }
+
+ private:
+ static const int kUtf8BufferSize = 512;
+ static const int kUc16BufferSize = 128;
+
+ int utf8_pos_;
+ char utf8_buffer_[kUtf8BufferSize];
+ uc16 uc16_buffer_[kUc16BufferSize];
+};
+
+
+Logger::Logger()
+ : ticker_(NULL),
+ profiler_(NULL),
+ sliding_state_window_(NULL),
+ log_events_(NULL),
+ logging_nesting_(0),
+ cpu_profiler_nesting_(0),
+ log_(new Log(this)),
+ name_buffer_(new NameBuffer),
+ address_to_name_map_(NULL),
+ is_initialized_(false),
+ last_address_(NULL),
+ prev_sp_(NULL),
+ prev_function_(NULL),
+ prev_to_(NULL),
+ prev_code_(NULL) {
+}
+
+
+Logger::~Logger() {
+ delete address_to_name_map_;
+ delete name_buffer_;
+ delete log_;
+}
+
#define DECLARE_EVENT(ignore1, name) name,
-const char* kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
+static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
};
#undef DECLARE_EVENT
void Logger::ProfilerBeginEvent() {
- if (!Log::IsEnabled()) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ LogMessageBuilder msg(this);
msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
msg.WriteToLogFile();
}
@@ -364,8 +566,8 @@ void Logger::StringEvent(const char* name, const char* value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedStringEvent(const char* name, const char* value) {
- if (!Log::IsEnabled()) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,\"%s\"\n", name, value);
msg.WriteToLogFile();
}
@@ -388,8 +590,8 @@ void Logger::IntPtrTEvent(const char* name, intptr_t value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntEvent(const char* name, int value) {
- if (!Log::IsEnabled()) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%d\n", name, value);
msg.WriteToLogFile();
}
@@ -398,8 +600,8 @@ void Logger::UncheckedIntEvent(const char* name, int value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
- if (!Log::IsEnabled()) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
msg.WriteToLogFile();
}
@@ -408,8 +610,8 @@ void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
void Logger::HandleEvent(const char* name, Object** location) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_handles) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_handles) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
msg.WriteToLogFile();
#endif
@@ -421,8 +623,8 @@ void Logger::HandleEvent(const char* name, Object** location) {
// caller's responsibility to ensure that log is enabled and that
// FLAG_log_api is true.
void Logger::ApiEvent(const char* format, ...) {
- ASSERT(Log::IsEnabled() && FLAG_log_api);
- LogMessageBuilder msg;
+ ASSERT(log_->IsEnabled() && FLAG_log_api);
+ LogMessageBuilder msg(this);
va_list ap;
va_start(ap, format);
msg.AppendVA(format, ap);
@@ -434,7 +636,7 @@ void Logger::ApiEvent(const char* format, ...) {
void Logger::ApiNamedSecurityCheck(Object* key) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_api) return;
+ if (!log_->IsEnabled() || !FLAG_log_api) return;
if (key->IsString()) {
SmartPointer<char> str =
String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -452,8 +654,8 @@ void Logger::SharedLibraryEvent(const char* library_path,
uintptr_t start,
uintptr_t end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_prof) return;
+ LogMessageBuilder msg(this);
msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
library_path,
start,
@@ -467,8 +669,8 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path,
uintptr_t start,
uintptr_t end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_prof) return;
+ LogMessageBuilder msg(this);
msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
library_path,
start,
@@ -482,7 +684,7 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path,
void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
// Prints "/" + re.source + "/" +
// (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
- LogMessageBuilder msg;
+ LogMessageBuilder msg(this);
Handle<Object> source = GetProperty(regexp, "source");
if (!source->IsString()) {
@@ -524,8 +726,8 @@ void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_regexp) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_regexp) return;
+ LogMessageBuilder msg(this);
msg.Append("regexp-compile,");
LogRegExpSource(regexp);
msg.Append(in_cache ? ",hit\n" : ",miss\n");
@@ -536,9 +738,9 @@ void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_runtime) return;
+ if (!log_->IsEnabled() || !FLAG_log_runtime) return;
HandleScope scope;
- LogMessageBuilder msg;
+ LogMessageBuilder msg(this);
for (int i = 0; i < format.length(); i++) {
char c = format[i];
if (c == '%' && i <= format.length() - 2) {
@@ -582,7 +784,7 @@ void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
void Logger::ApiIndexedSecurityCheck(uint32_t index) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_api) return;
+ if (!log_->IsEnabled() || !FLAG_log_api) return;
ApiEvent("api,check-security,%u\n", index);
#endif
}
@@ -593,13 +795,13 @@ void Logger::ApiNamedPropertyAccess(const char* tag,
Object* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(name->IsString());
- if (!Log::IsEnabled() || !FLAG_log_api) return;
+ if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> property_name =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- Logger::ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
+ ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
#endif
}
@@ -607,37 +809,37 @@ void Logger::ApiIndexedPropertyAccess(const char* tag,
JSObject* holder,
uint32_t index) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_api) return;
+ if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- Logger::ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
+ ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
#endif
}
void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_api) return;
+ if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = object->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- Logger::ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
+ ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
#endif
}
void Logger::ApiEntryCall(const char* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_api) return;
- Logger::ApiEvent("api,%s\n", name);
+ if (!log_->IsEnabled() || !FLAG_log_api) return;
+ ApiEvent("api,%s\n", name);
#endif
}
void Logger::NewEvent(const char* name, void* object, size_t size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log) return;
+ LogMessageBuilder msg(this);
msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
static_cast<unsigned int>(size));
msg.WriteToLogFile();
@@ -647,19 +849,28 @@ void Logger::NewEvent(const char* name, void* object, size_t size) {
void Logger::DeleteEvent(const char* name, void* object) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log) return;
+ LogMessageBuilder msg(this);
msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
msg.WriteToLogFile();
#endif
}
+void Logger::NewEventStatic(const char* name, void* object, size_t size) {
+ LOGGER->NewEvent(name, object, size);
+}
+
+
+void Logger::DeleteEventStatic(const char* name, void* object) {
+ LOGGER->DeleteEvent(name, object);
+}
+
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::CallbackEventInternal(const char* prefix, const char* name,
Address entry_point) {
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[CALLBACK_TAG]);
@@ -673,7 +884,7 @@ void Logger::CallbackEventInternal(const char* prefix, const char* name,
void Logger::CallbackEvent(String* name, Address entry_point) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CallbackEventInternal("", *str, entry_point);
@@ -683,7 +894,7 @@ void Logger::CallbackEvent(String* name, Address entry_point) {
void Logger::GetterCallbackEvent(String* name, Address entry_point) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CallbackEventInternal("get ", *str, entry_point);
@@ -693,7 +904,7 @@ void Logger::GetterCallbackEvent(String* name, Address entry_point) {
void Logger::SetterCallbackEvent(String* name, Address entry_point) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CallbackEventInternal("set ", *str, entry_point);
@@ -705,8 +916,21 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ if (FLAG_ll_prof || Serializer::enabled()) {
+ name_buffer_->Reset();
+ name_buffer_->AppendBytes(kLogEventsNames[tag]);
+ name_buffer_->AppendByte(':');
+ name_buffer_->AppendBytes(comment);
+ }
+ if (FLAG_ll_prof) {
+ LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (Serializer::enabled()) {
+ RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!FLAG_log_code) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
@@ -719,7 +943,6 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.Append(*p);
}
msg.Append('"');
- LowLevelCodeCreateEvent(code, &msg);
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -730,13 +953,30 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (name != NULL) {
- SmartPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- CodeCreateEvent(tag, code, *str);
- } else {
- CodeCreateEvent(tag, code, "");
+ if (!log_->IsEnabled()) return;
+ if (FLAG_ll_prof || Serializer::enabled()) {
+ name_buffer_->Reset();
+ name_buffer_->AppendBytes(kLogEventsNames[tag]);
+ name_buffer_->AppendByte(':');
+ name_buffer_->AppendString(name);
+ }
+ if (FLAG_ll_prof) {
+ LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
+ if (Serializer::enabled()) {
+ RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!FLAG_log_code) return;
+ LogMessageBuilder msg(this);
+ msg.Append("%s,%s,",
+ kLogEventsNames[CODE_CREATION_EVENT],
+ kLogEventsNames[tag]);
+ msg.AppendAddress(code->address());
+ msg.Append(",%d,\"", code->ExecutableSize());
+ msg.AppendDetailed(name, false);
+ msg.Append('"');
+ msg.Append('\n');
+ msg.WriteToLogFile();
#endif
}
@@ -758,9 +998,26 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
SharedFunctionInfo* shared,
String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- if (code == Builtins::builtin(Builtins::LazyCompile)) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ if (FLAG_ll_prof || Serializer::enabled()) {
+ name_buffer_->Reset();
+ name_buffer_->AppendBytes(kLogEventsNames[tag]);
+ name_buffer_->AppendByte(':');
+ name_buffer_->AppendBytes(ComputeMarker(code));
+ name_buffer_->AppendString(name);
+ }
+ if (FLAG_ll_prof) {
+ LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (Serializer::enabled()) {
+ RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!FLAG_log_code) return;
+ if (code == Isolate::Current()->builtins()->builtin(
+ Builtins::kLazyCompile))
+ return;
+
+ LogMessageBuilder msg(this);
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s,%s,",
@@ -770,7 +1027,6 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str);
msg.AppendAddress(shared->address());
msg.Append(",%s", ComputeMarker(code));
- LowLevelCodeCreateEvent(code, &msg);
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -785,8 +1041,26 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
SharedFunctionInfo* shared,
String* source, int line) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ if (FLAG_ll_prof || Serializer::enabled()) {
+ name_buffer_->Reset();
+ name_buffer_->AppendBytes(kLogEventsNames[tag]);
+ name_buffer_->AppendByte(':');
+ name_buffer_->AppendBytes(ComputeMarker(code));
+ name_buffer_->AppendString(shared->DebugName());
+ name_buffer_->AppendByte(' ');
+ name_buffer_->AppendString(source);
+ name_buffer_->AppendByte(':');
+ name_buffer_->AppendInt(line);
+ }
+ if (FLAG_ll_prof) {
+ LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (Serializer::enabled()) {
+ RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!FLAG_log_code) return;
+ LogMessageBuilder msg(this);
SmartPointer<char> name =
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> sourcestr =
@@ -802,7 +1076,6 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
line);
msg.AppendAddress(shared->address());
msg.Append(",%s", ComputeMarker(code));
- LowLevelCodeCreateEvent(code, &msg);
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -811,14 +1084,26 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ if (FLAG_ll_prof || Serializer::enabled()) {
+ name_buffer_->Reset();
+ name_buffer_->AppendBytes(kLogEventsNames[tag]);
+ name_buffer_->AppendByte(':');
+ name_buffer_->AppendInt(args_count);
+ }
+ if (FLAG_ll_prof) {
+ LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (Serializer::enabled()) {
+ RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!FLAG_log_code) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
- LowLevelCodeCreateEvent(code, &msg);
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -827,10 +1112,8 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
void Logger::CodeMovingGCEvent() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
- LogMessageBuilder msg;
- msg.Append("%s\n", kLogEventsNames[CODE_MOVING_GC]);
- msg.WriteToLogFile();
+ if (!log_->IsEnabled() || !FLAG_ll_prof) return;
+ LowLevelLogWriteBytes(&kCodeMovingGCTag, sizeof(kCodeMovingGCTag));
OS::SignalCodeMovingGC();
#endif
}
@@ -838,8 +1121,21 @@ void Logger::CodeMovingGCEvent() {
void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ if (FLAG_ll_prof || Serializer::enabled()) {
+ name_buffer_->Reset();
+ name_buffer_->AppendBytes(kLogEventsNames[REG_EXP_TAG]);
+ name_buffer_->AppendByte(':');
+ name_buffer_->AppendString(source);
+ }
+ if (FLAG_ll_prof) {
+ LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (Serializer::enabled()) {
+ RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!FLAG_log_code) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[REG_EXP_TAG]);
@@ -847,7 +1143,6 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
msg.Append(",%d,\"", code->ExecutableSize());
msg.AppendDetailed(source, false);
msg.Append('\"');
- LowLevelCodeCreateEvent(code, &msg);
msg.Append('\n');
msg.WriteToLogFile();
#endif
@@ -856,6 +1151,11 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
void Logger::CodeMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!log_->IsEnabled()) return;
+ if (FLAG_ll_prof) LowLevelCodeMoveEvent(from, to);
+ if (Serializer::enabled() && address_to_name_map_ != NULL) {
+ address_to_name_map_->Move(from, to);
+ }
MoveEventInternal(CODE_MOVE_EVENT, from, to);
#endif
}
@@ -863,6 +1163,11 @@ void Logger::CodeMoveEvent(Address from, Address to) {
void Logger::CodeDeleteEvent(Address from) {
#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!log_->IsEnabled()) return;
+ if (FLAG_ll_prof) LowLevelCodeDeleteEvent(from);
+ if (Serializer::enabled() && address_to_name_map_ != NULL) {
+ address_to_name_map_->Remove(from);
+ }
DeleteEventInternal(CODE_DELETE_EVENT, from);
#endif
}
@@ -870,8 +1175,22 @@ void Logger::CodeDeleteEvent(Address from) {
void Logger::SnapshotPositionEvent(Address addr, int pos) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_snapshot_positions) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled()) return;
+ if (FLAG_ll_prof) LowLevelSnapshotPositionEvent(addr, pos);
+ if (Serializer::enabled() && address_to_name_map_ != NULL) {
+ const char* code_name = address_to_name_map_->Lookup(addr);
+ if (code_name == NULL) return; // Not a code object.
+ LogMessageBuilder msg(this);
+ msg.Append("%s,%d,\"", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
+ for (const char* p = code_name; *p != '\0'; ++p) {
+ if (*p == '"') msg.Append('\\');
+ msg.Append(*p);
+ }
+ msg.Append("\"\n");
+ msg.WriteToLogFile();
+ }
+ if (!FLAG_log_snapshot_positions) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
msg.AppendAddress(addr);
msg.Append(",%d", pos);
@@ -881,9 +1200,9 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) {
}
-void Logger::SFIMoveEvent(Address from, Address to) {
+void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- MoveEventInternal(SFI_MOVE_EVENT, from, to);
+ MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
#endif
}
@@ -892,8 +1211,8 @@ void Logger::SFIMoveEvent(Address from, Address to) {
void Logger::MoveEventInternal(LogEventsAndTags event,
Address from,
Address to) {
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,", kLogEventsNames[event]);
msg.AppendAddress(from);
msg.Append(',');
@@ -906,8 +1225,8 @@ void Logger::MoveEventInternal(LogEventsAndTags event,
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,", kLogEventsNames[event]);
msg.AppendAddress(from);
msg.Append('\n');
@@ -918,8 +1237,8 @@ void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
void Logger::ResourceEvent(const char* name, const char* tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,%s,", name, tag);
uint32_t sec, usec;
@@ -936,11 +1255,11 @@ void Logger::ResourceEvent(const char* name, const char* tag) {
void Logger::SuspectReadEvent(String* name, Object* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_suspect) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_suspect) return;
+ LogMessageBuilder msg(this);
String* class_name = obj->IsJSObject()
? JSObject::cast(obj)->class_name()
- : Heap::empty_string();
+ : HEAP->empty_string();
msg.Append("suspect-read,");
msg.Append(class_name);
msg.Append(',');
@@ -955,8 +1274,8 @@ void Logger::SuspectReadEvent(String* name, Object* obj) {
void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_gc) return;
+ LogMessageBuilder msg(this);
// Using non-relative system time in order to be able to synchronize with
// external memory profiling events (e.g. DOM memory size).
msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
@@ -966,23 +1285,10 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
}
-void Logger::HeapSampleStats(const char* space, const char* kind,
- intptr_t capacity, intptr_t used) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg;
- msg.Append("heap-sample-stats,\"%s\",\"%s\","
- "%" V8_PTR_PREFIX "d,%" V8_PTR_PREFIX "d\n",
- space, kind, capacity, used);
- msg.WriteToLogFile();
-#endif
-}
-
-
void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_gc) return;
+ LogMessageBuilder msg(this);
msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
msg.WriteToLogFile();
#endif
@@ -991,84 +1297,18 @@ void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log_gc) return;
+ LogMessageBuilder msg(this);
msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
msg.WriteToLogFile();
#endif
}
-void Logger::HeapSampleJSConstructorEvent(const char* constructor,
- int number, int bytes) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg;
- msg.Append("heap-js-cons-item,%s,%d,%d\n", constructor, number, bytes);
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::HeapSampleJSRetainersEvent(
- const char* constructor, const char* event) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- // Event starts with comma, so we don't have it in the format string.
- static const char* event_text = "heap-js-ret-item,%s";
- // We take placeholder strings into account, but it's OK to be conservative.
- static const int event_text_len = StrLength(event_text);
- const int cons_len = StrLength(constructor);
- const int event_len = StrLength(event);
- int pos = 0;
- // Retainer lists can be long. We may need to split them into multiple events.
- do {
- LogMessageBuilder msg;
- msg.Append(event_text, constructor);
- int to_write = event_len - pos;
- if (to_write > Log::kMessageBufferSize - (cons_len + event_text_len)) {
- int cut_pos = pos + Log::kMessageBufferSize - (cons_len + event_text_len);
- ASSERT(cut_pos < event_len);
- while (cut_pos > pos && event[cut_pos] != ',') --cut_pos;
- if (event[cut_pos] != ',') {
- // Crash in debug mode, skip in release mode.
- ASSERT(false);
- return;
- }
- // Append a piece of event that fits, without trailing comma.
- msg.AppendStringPart(event + pos, cut_pos - pos);
- // Start next piece with comma.
- pos = cut_pos;
- } else {
- msg.Append("%s", event + pos);
- pos += event_len;
- }
- msg.Append('\n');
- msg.WriteToLogFile();
- } while (pos < event_len);
-#endif
-}
-
-
-void Logger::HeapSampleJSProducerEvent(const char* constructor,
- Address* stack) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg;
- msg.Append("heap-js-prod-item,%s", constructor);
- while (*stack != NULL) {
- msg.Append(",0x%" V8PRIxPTR, *stack++);
- }
- msg.Append("\n");
- msg.WriteToLogFile();
-#endif
-}
-
-
void Logger::DebugTag(const char* call_site_tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_log) return;
+ LogMessageBuilder msg(this);
msg.Append("debug-tag,%s\n", call_site_tag);
msg.WriteToLogFile();
#endif
@@ -1077,13 +1317,13 @@ void Logger::DebugTag(const char* call_site_tag) {
void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log) return;
+ if (!log_->IsEnabled() || !FLAG_log) return;
StringBuilder s(parameter.length() + 1);
for (int i = 0; i < parameter.length(); ++i) {
s.AddCharacter(static_cast<char>(parameter[i]));
}
char* parameter_string = s.Finalize();
- LogMessageBuilder msg;
+ LogMessageBuilder msg(this);
msg.Append("debug-queue-event,%s,%15.3f,%s\n",
event_type,
OS::TimeCurrentMillis(),
@@ -1096,14 +1336,19 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::TickEvent(TickSample* sample, bool overflow) {
- if (!Log::IsEnabled() || !FLAG_prof) return;
- LogMessageBuilder msg;
+ if (!log_->IsEnabled() || !FLAG_prof) return;
+ LogMessageBuilder msg(this);
msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
msg.AppendAddress(sample->pc);
msg.Append(',');
msg.AppendAddress(sample->sp);
- msg.Append(',');
- msg.AppendAddress(sample->tos);
+ if (sample->has_external_callback) {
+ msg.Append(",1,");
+ msg.AppendAddress(sample->external_callback);
+ } else {
+ msg.Append(",0,");
+ msg.AppendAddress(sample->tos);
+ }
msg.Append(",%d", static_cast<int>(sample->state));
if (overflow) {
msg.Append(",overflow");
@@ -1122,15 +1367,12 @@ int Logger::GetActiveProfilerModules() {
if (profiler_ != NULL && !profiler_->paused()) {
result |= PROFILER_MODULE_CPU;
}
- if (FLAG_log_gc) {
- result |= PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS;
- }
return result;
}
void Logger::PauseProfiler(int flags, int tag) {
- if (!Log::IsEnabled()) return;
+ if (!log_->IsEnabled()) return;
if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
// It is OK to have negative nesting.
if (--cpu_profiler_nesting_ == 0) {
@@ -1141,18 +1383,11 @@ void Logger::PauseProfiler(int flags, int tag) {
}
FLAG_log_code = false;
// Must be the same message as Log::kDynamicBufferSeal.
- LOG(UncheckedStringEvent("profiler", "pause"));
+ LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
}
--logging_nesting_;
}
}
- if (flags &
- (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
- if (--heap_profiler_nesting_ == 0) {
- FLAG_log_gc = false;
- --logging_nesting_;
- }
- }
if (tag != 0) {
UncheckedIntEvent("close-tag", tag);
}
@@ -1160,7 +1395,7 @@ void Logger::PauseProfiler(int flags, int tag) {
void Logger::ResumeProfiler(int flags, int tag) {
- if (!Log::IsEnabled()) return;
+ if (!log_->IsEnabled()) return;
if (tag != 0) {
UncheckedIntEvent("open-tag", tag);
}
@@ -1169,7 +1404,7 @@ void Logger::ResumeProfiler(int flags, int tag) {
++logging_nesting_;
if (FLAG_prof_lazy) {
profiler_->Engage();
- LOG(UncheckedStringEvent("profiler", "resume"));
+ LOG(ISOLATE, UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true;
LogCompiledFunctions();
LogAccessorCallbacks();
@@ -1180,20 +1415,12 @@ void Logger::ResumeProfiler(int flags, int tag) {
profiler_->resume();
}
}
- if (flags &
- (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
- if (heap_profiler_nesting_++ == 0) {
- ++logging_nesting_;
- FLAG_log_gc = true;
- }
- }
}
// This function can be called when Log's mutex is acquired,
// either from main or Profiler's thread.
-void Logger::StopLoggingAndProfiling() {
- Log::stop();
+void Logger::LogFailure() {
PauseProfiler(PROFILER_MODULE_CPU, 0);
}
@@ -1204,7 +1431,7 @@ bool Logger::IsProfilerSamplerActive() {
int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) {
- return Log::GetLogLines(from_pos, dest_buf, max_size);
+ return log_->GetLogLines(from_pos, dest_buf, max_size);
}
@@ -1219,8 +1446,12 @@ class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
virtual void LeaveContext(Context* context) {}
virtual void VisitFunction(JSFunction* function) {
+ SharedFunctionInfo* sfi = SharedFunctionInfo::cast(function->shared());
+ Object* maybe_script = sfi->script();
+ if (maybe_script->IsScript()
+ && !Script::cast(maybe_script)->HasValidSource()) return;
if (sfis_ != NULL) {
- sfis_[*count_] = Handle<SharedFunctionInfo>(function->shared());
+ sfis_[*count_] = Handle<SharedFunctionInfo>(sfi);
}
if (code_objects_ != NULL) {
ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
@@ -1271,7 +1502,7 @@ static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
void Logger::LogCodeObject(Object* object) {
- if (FLAG_log_code) {
+ if (FLAG_log_code || FLAG_ll_prof) {
Code* code_object = Code::cast(object);
LogEventsAndTags tag = Logger::STUB_TAG;
const char* description = "Unknown code from the snapshot";
@@ -1279,8 +1510,8 @@ void Logger::LogCodeObject(Object* object) {
case Code::FUNCTION:
case Code::OPTIMIZED_FUNCTION:
return; // We log this later using LogCompiledFunctions.
- case Code::BINARY_OP_IC: // fall through
- case Code::TYPE_RECORDING_BINARY_OP_IC: // fall through
+ case Code::UNARY_OP_IC: // fall through
+ case Code::BINARY_OP_IC: // fall through
case Code::COMPARE_IC: // fall through
case Code::STUB:
description =
@@ -1318,14 +1549,14 @@ void Logger::LogCodeObject(Object* object) {
tag = Logger::KEYED_CALL_IC_TAG;
break;
}
- PROFILE(CodeCreateEvent(tag, code_object, description));
+ PROFILE(ISOLATE, CodeCreateEvent(tag, code_object, description));
}
}
void Logger::LogCodeInfo() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
+ if (!log_->IsEnabled() || !FLAG_ll_prof) return;
#if V8_TARGET_ARCH_IA32
const char arch[] = "ia32";
#elif V8_TARGET_ARCH_X64
@@ -1335,21 +1566,69 @@ void Logger::LogCodeInfo() {
#else
const char arch[] = "unknown";
#endif
- LogMessageBuilder msg;
- msg.Append("code-info,%s,%d\n", arch, Code::kHeaderSize);
- msg.WriteToLogFile();
+ LowLevelLogWriteBytes(arch, sizeof(arch));
#endif // ENABLE_LOGGING_AND_PROFILING
}
-void Logger::LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg) {
- if (!FLAG_ll_prof || Log::output_code_handle_ == NULL) return;
- int pos = static_cast<int>(ftell(Log::output_code_handle_));
- size_t rv = fwrite(code->instruction_start(), 1, code->instruction_size(),
- Log::output_code_handle_);
- ASSERT(static_cast<size_t>(code->instruction_size()) == rv);
+void Logger::RegisterSnapshotCodeName(Code* code,
+ const char* name,
+ int name_size) {
+ ASSERT(Serializer::enabled());
+ if (address_to_name_map_ == NULL) {
+ address_to_name_map_ = new NameMap;
+ }
+ address_to_name_map_->Insert(code->address(), name, name_size);
+}
+
+
+void Logger::LowLevelCodeCreateEvent(Code* code,
+ const char* name,
+ int name_size) {
+ if (log_->ll_output_handle_ == NULL) return;
+ LowLevelCodeCreateStruct event;
+ event.name_size = name_size;
+ event.code_address = code->instruction_start();
+ ASSERT(event.code_address == code->address() + Code::kHeaderSize);
+ event.code_size = code->instruction_size();
+ LowLevelLogWriteStruct(event);
+ LowLevelLogWriteBytes(name, name_size);
+ LowLevelLogWriteBytes(
+ reinterpret_cast<const char*>(code->instruction_start()),
+ code->instruction_size());
+}
+
+
+void Logger::LowLevelCodeMoveEvent(Address from, Address to) {
+ if (log_->ll_output_handle_ == NULL) return;
+ LowLevelCodeMoveStruct event;
+ event.from_address = from + Code::kHeaderSize;
+ event.to_address = to + Code::kHeaderSize;
+ LowLevelLogWriteStruct(event);
+}
+
+
+void Logger::LowLevelCodeDeleteEvent(Address from) {
+ if (log_->ll_output_handle_ == NULL) return;
+ LowLevelCodeDeleteStruct event;
+ event.address = from + Code::kHeaderSize;
+ LowLevelLogWriteStruct(event);
+}
+
+
+void Logger::LowLevelSnapshotPositionEvent(Address addr, int pos) {
+ if (log_->ll_output_handle_ == NULL) return;
+ LowLevelSnapshotPositionStruct event;
+ event.address = addr + Code::kHeaderSize;
+ event.position = pos;
+ LowLevelLogWriteStruct(event);
+}
+
+
+void Logger::LowLevelLogWriteBytes(const char* bytes, int size) {
+ size_t rv = fwrite(bytes, 1, size, log_->ll_output_handle_);
+ ASSERT(static_cast<size_t>(size) == rv);
USE(rv);
- msg->Append(",%d", pos);
}
@@ -1372,7 +1651,9 @@ void Logger::LogCompiledFunctions() {
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
- if (*code_objects[i] == Builtins::builtin(Builtins::LazyCompile)) continue;
+ if (*code_objects[i] == Isolate::Current()->builtins()->builtin(
+ Builtins::kLazyCompile))
+ continue;
Handle<SharedFunctionInfo> shared = sfis[i];
Handle<String> func_name(shared->DebugName());
if (shared->script()->IsScript()) {
@@ -1381,20 +1662,23 @@ void Logger::LogCompiledFunctions() {
Handle<String> script_name(String::cast(script->name()));
int line_num = GetScriptLineNumber(script, shared->start_position());
if (line_num > 0) {
- PROFILE(CodeCreateEvent(
- Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code_objects[i], *shared,
- *script_name, line_num + 1));
+ PROFILE(ISOLATE,
+ CodeCreateEvent(
+ Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
+ *code_objects[i], *shared,
+ *script_name, line_num + 1));
} else {
// Can't distinguish eval and script here, so always use Script.
- PROFILE(CodeCreateEvent(
- Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *code_objects[i], *shared, *script_name));
+ PROFILE(ISOLATE,
+ CodeCreateEvent(
+ Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+ *code_objects[i], *shared, *script_name));
}
} else {
- PROFILE(CodeCreateEvent(
- Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code_objects[i], *shared, *func_name));
+ PROFILE(ISOLATE,
+ CodeCreateEvent(
+ Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
+ *code_objects[i], *shared, *func_name));
}
} else if (shared->IsApiFunction()) {
// API function.
@@ -1404,11 +1688,13 @@ void Logger::LogCompiledFunctions() {
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
Address entry_point = v8::ToCData<Address>(callback_obj);
- PROFILE(CallbackEvent(*func_name, entry_point));
+ PROFILE(ISOLATE, CallbackEvent(*func_name, entry_point));
}
} else {
- PROFILE(CodeCreateEvent(
- Logger::LAZY_COMPILE_TAG, *code_objects[i], *shared, *func_name));
+ PROFILE(ISOLATE,
+ CodeCreateEvent(
+ Logger::LAZY_COMPILE_TAG, *code_objects[i],
+ *shared, *func_name));
}
}
}
@@ -1417,6 +1703,7 @@ void Logger::LogCompiledFunctions() {
void Logger::LogAccessorCallbacks() {
AssertNoAllocation no_alloc;
HeapIterator iterator;
+ i::Isolate* isolate = ISOLATE;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsAccessorInfo()) continue;
AccessorInfo* ai = AccessorInfo::cast(obj);
@@ -1424,11 +1711,11 @@ void Logger::LogAccessorCallbacks() {
String* name = String::cast(ai->name());
Address getter_entry = v8::ToCData<Address>(ai->getter());
if (getter_entry != 0) {
- PROFILE(GetterCallbackEvent(name, getter_entry));
+ PROFILE(isolate, GetterCallbackEvent(name, getter_entry));
}
Address setter_entry = v8::ToCData<Address>(ai->setter());
if (setter_entry != 0) {
- PROFILE(SetterCallbackEvent(name, setter_entry));
+ PROFILE(isolate, SetterCallbackEvent(name, setter_entry));
}
}
}
@@ -1438,23 +1725,12 @@ void Logger::LogAccessorCallbacks() {
bool Logger::Setup() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- // --log-all enables all the log flags.
- if (FLAG_log_all) {
- FLAG_log_runtime = true;
- FLAG_log_api = true;
- FLAG_log_code = true;
- FLAG_log_gc = true;
- FLAG_log_suspect = true;
- FLAG_log_handles = true;
- FLAG_log_regexp = true;
- }
-
- // --prof implies --log-code.
- if (FLAG_prof) FLAG_log_code = true;
+ // Tests and EnsureInitialize() can call this twice in a row. It's harmless.
+ if (is_initialized_) return true;
+ is_initialized_ = true;
// --ll-prof implies --log-code and --log-snapshot-positions.
if (FLAG_ll_prof) {
- FLAG_log_code = true;
FLAG_log_snapshot_positions = true;
}
@@ -1464,73 +1740,31 @@ bool Logger::Setup() {
FLAG_prof_auto = false;
}
- bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
- || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes;
-
- bool open_log_file = start_logging || FLAG_prof_lazy;
-
- // If we're logging anything, we need to open the log file.
- if (open_log_file) {
- if (strcmp(FLAG_logfile, "-") == 0) {
- Log::OpenStdout();
- } else if (strcmp(FLAG_logfile, "*") == 0) {
- Log::OpenMemoryBuffer();
- } else if (strchr(FLAG_logfile, '%') != NULL) {
- // If there's a '%' in the log file name we have to expand
- // placeholders.
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- for (const char* p = FLAG_logfile; *p; p++) {
- if (*p == '%') {
- p++;
- switch (*p) {
- case '\0':
- // If there's a % at the end of the string we back up
- // one character so we can escape the loop properly.
- p--;
- break;
- case 't': {
- // %t expands to the current time in milliseconds.
- double time = OS::TimeCurrentMillis();
- stream.Add("%.0f", FmtElm(time));
- break;
- }
- case '%':
- // %% expands (contracts really) to %.
- stream.Put('%');
- break;
- default:
- // All other %'s expand to themselves.
- stream.Put('%');
- stream.Put(*p);
- break;
- }
- } else {
- stream.Put(*p);
- }
- }
- SmartPointer<const char> expanded = stream.ToCString();
- Log::OpenFile(*expanded);
- } else {
- Log::OpenFile(FLAG_logfile);
- }
- }
+ // TODO(isolates): this assert introduces cyclic dependency (logger
+ // -> thread local top -> heap -> logger).
+ // ASSERT(VMState::is_outermost_external());
+
+ log_->Initialize();
if (FLAG_ll_prof) LogCodeInfo();
- ticker_ = new Ticker(kSamplingIntervalMs);
+ Isolate* isolate = Isolate::Current();
+ ticker_ = new Ticker(isolate, kSamplingIntervalMs);
if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
- sliding_state_window_ = new SlidingStateWindow();
+ sliding_state_window_ = new SlidingStateWindow(isolate);
}
+ bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
+ || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
+ || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
+
if (start_logging) {
logging_nesting_ = 1;
}
if (FLAG_prof) {
- profiler_ = new Profiler();
+ profiler_ = new Profiler(isolate);
if (!FLAG_prof_auto) {
profiler_->pause();
} else {
@@ -1541,7 +1775,6 @@ bool Logger::Setup() {
}
}
- LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling);
return true;
#else
@@ -1550,6 +1783,15 @@ bool Logger::Setup() {
}
+Sampler* Logger::sampler() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ return ticker_;
+#else
+ return NULL;
+#endif
+}
+
+
void Logger::EnsureTickerStarted() {
#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(ticker_ != NULL);
@@ -1567,7 +1809,8 @@ void Logger::EnsureTickerStopped() {
void Logger::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- LogMessageBuilder::set_write_failure_handler(NULL);
+ if (!is_initialized_) return;
+ is_initialized_ = false;
// Stop the profiler before closing the file.
if (profiler_ != NULL) {
@@ -1582,7 +1825,7 @@ void Logger::TearDown() {
delete ticker_;
ticker_ = NULL;
- Log::Close();
+ log_->Close();
#endif
}
@@ -1600,9 +1843,63 @@ void Logger::EnableSlidingStateWindow() {
// Otherwise, if the sliding state window computation has not been
// started we do it now.
if (sliding_state_window_ == NULL) {
- sliding_state_window_ = new SlidingStateWindow();
+ sliding_state_window_ = new SlidingStateWindow(Isolate::Current());
+ }
+#endif
+}
+
+
+Mutex* SamplerRegistry::mutex_ = OS::CreateMutex();
+List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
+
+
+bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
+ ScopedLock lock(mutex_);
+ for (int i = 0;
+ ActiveSamplersExist() && i < active_samplers_->length();
+ ++i) {
+ func(active_samplers_->at(i), param);
}
+ return ActiveSamplersExist();
+}
+
+
+static void ComputeCpuProfiling(Sampler* sampler, void* flag_ptr) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ bool* flag = reinterpret_cast<bool*>(flag_ptr);
+ *flag |= sampler->IsProfiling();
#endif
}
+
+SamplerRegistry::State SamplerRegistry::GetState() {
+ bool flag = false;
+ if (!IterateActiveSamplers(&ComputeCpuProfiling, &flag)) {
+ return HAS_NO_SAMPLERS;
+ }
+ return flag ? HAS_CPU_PROFILING_SAMPLERS : HAS_SAMPLERS;
+}
+
+
+void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
+ ASSERT(sampler->IsActive());
+ ScopedLock lock(mutex_);
+ if (active_samplers_ == NULL) {
+ active_samplers_ = new List<Sampler*>;
+ } else {
+ ASSERT(!active_samplers_->Contains(sampler));
+ }
+ active_samplers_->Add(sampler);
+}
+
+
+void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
+ ASSERT(sampler->IsActive());
+ ScopedLock lock(mutex_);
+ ASSERT(active_samplers_ != NULL);
+ bool removed = active_samplers_->RemoveElement(sampler);
+ ASSERT(removed);
+ USE(removed);
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index a808cd1d4..8a627eb3f 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,7 @@
#ifndef V8_LOG_H_
#define V8_LOG_H_
+#include "allocation.h"
#include "platform.h"
#include "log-utils.h"
@@ -69,68 +70,79 @@ namespace internal {
// tick profiler requires code events, so --prof implies --log-code.
// Forward declarations.
-class Ticker;
+class HashMap;
+class LogMessageBuilder;
class Profiler;
class Semaphore;
class SlidingStateWindow;
-class LogMessageBuilder;
+class Ticker;
#undef LOG
#ifdef ENABLE_LOGGING_AND_PROFILING
-#define LOG(Call) \
- do { \
- if (v8::internal::Logger::is_logging()) \
- v8::internal::Logger::Call; \
+#define LOG(isolate, Call) \
+ do { \
+ v8::internal::Logger* logger = \
+ (isolate)->logger(); \
+ if (logger->is_logging()) \
+ logger->Call; \
} while (false)
#else
-#define LOG(Call) ((void) 0)
+#define LOG(isolate, Call) ((void) 0)
#endif
-#define LOG_EVENTS_AND_TAGS_LIST(V) \
- V(CODE_CREATION_EVENT, "code-creation") \
- V(CODE_MOVE_EVENT, "code-move") \
- V(CODE_DELETE_EVENT, "code-delete") \
- V(CODE_MOVING_GC, "code-moving-gc") \
- V(SFI_MOVE_EVENT, "sfi-move") \
- V(SNAPSHOT_POSITION_EVENT, "snapshot-pos") \
- V(TICK_EVENT, "tick") \
- V(REPEAT_META_EVENT, "repeat") \
- V(BUILTIN_TAG, "Builtin") \
- V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
- V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
- V(CALL_IC_TAG, "CallIC") \
- V(CALL_INITIALIZE_TAG, "CallInitialize") \
- V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
- V(CALL_MISS_TAG, "CallMiss") \
- V(CALL_NORMAL_TAG, "CallNormal") \
- V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic") \
- V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
- V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, \
- "KeyedCallDebugPrepareStepIn") \
- V(KEYED_CALL_IC_TAG, "KeyedCallIC") \
- V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
- V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
- V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
- V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal") \
- V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic") \
- V(CALLBACK_TAG, "Callback") \
- V(EVAL_TAG, "Eval") \
- V(FUNCTION_TAG, "Function") \
- V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
- V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
- V(LAZY_COMPILE_TAG, "LazyCompile") \
- V(LOAD_IC_TAG, "LoadIC") \
- V(REG_EXP_TAG, "RegExp") \
- V(SCRIPT_TAG, "Script") \
- V(STORE_IC_TAG, "StoreIC") \
- V(STUB_TAG, "Stub") \
- V(NATIVE_FUNCTION_TAG, "Function") \
- V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
+#define LOG_EVENTS_AND_TAGS_LIST(V) \
+ V(CODE_CREATION_EVENT, "code-creation") \
+ V(CODE_MOVE_EVENT, "code-move") \
+ V(CODE_DELETE_EVENT, "code-delete") \
+ V(CODE_MOVING_GC, "code-moving-gc") \
+ V(SHARED_FUNC_MOVE_EVENT, "sfi-move") \
+ V(SNAPSHOT_POSITION_EVENT, "snapshot-pos") \
+ V(SNAPSHOT_CODE_NAME_EVENT, "snapshot-code-name") \
+ V(TICK_EVENT, "tick") \
+ V(REPEAT_META_EVENT, "repeat") \
+ V(BUILTIN_TAG, "Builtin") \
+ V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
+ V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
+ V(CALL_IC_TAG, "CallIC") \
+ V(CALL_INITIALIZE_TAG, "CallInitialize") \
+ V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
+ V(CALL_MISS_TAG, "CallMiss") \
+ V(CALL_NORMAL_TAG, "CallNormal") \
+ V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic") \
+ V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
+ V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, \
+ "KeyedCallDebugPrepareStepIn") \
+ V(KEYED_CALL_IC_TAG, "KeyedCallIC") \
+ V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
+ V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
+ V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
+ V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal") \
+ V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic") \
+ V(CALLBACK_TAG, "Callback") \
+ V(EVAL_TAG, "Eval") \
+ V(FUNCTION_TAG, "Function") \
+ V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
+ V(KEYED_LOAD_MEGAMORPHIC_IC_TAG, "KeyedLoadMegamorphicIC") \
+ V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
+ V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
+ V(KEYED_STORE_MEGAMORPHIC_IC_TAG, "KeyedStoreMegamorphicIC") \
+ V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC") \
+ V(LAZY_COMPILE_TAG, "LazyCompile") \
+ V(LOAD_IC_TAG, "LoadIC") \
+ V(REG_EXP_TAG, "RegExp") \
+ V(SCRIPT_TAG, "Script") \
+ V(STORE_IC_TAG, "StoreIC") \
+ V(STUB_TAG, "Stub") \
+ V(NATIVE_FUNCTION_TAG, "Function") \
+ V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
V(NATIVE_SCRIPT_TAG, "Script")
// Note that 'NATIVE_' cases for functions and scripts are mapped onto
// original tags when writing to the log.
+class Sampler;
+
+
class Logger {
public:
#define DECLARE_ENUM(enum_item, ignore) enum_item,
@@ -141,142 +153,147 @@ class Logger {
#undef DECLARE_ENUM
// Acquires resources for logging if the right flags are set.
- static bool Setup();
+ bool Setup();
+
+ void EnsureTickerStarted();
+ void EnsureTickerStopped();
- static void EnsureTickerStarted();
- static void EnsureTickerStopped();
+ Sampler* sampler();
// Frees resources acquired in Setup.
- static void TearDown();
+ void TearDown();
// Enable the computation of a sliding window of states.
- static void EnableSlidingStateWindow();
+ void EnableSlidingStateWindow();
// Emits an event with a string value -> (name, value).
- static void StringEvent(const char* name, const char* value);
+ void StringEvent(const char* name, const char* value);
// Emits an event with an int value -> (name, value).
- static void IntEvent(const char* name, int value);
- static void IntPtrTEvent(const char* name, intptr_t value);
+ void IntEvent(const char* name, int value);
+ void IntPtrTEvent(const char* name, intptr_t value);
// Emits an event with an handle value -> (name, location).
- static void HandleEvent(const char* name, Object** location);
+ void HandleEvent(const char* name, Object** location);
// Emits memory management events for C allocated structures.
- static void NewEvent(const char* name, void* object, size_t size);
- static void DeleteEvent(const char* name, void* object);
+ void NewEvent(const char* name, void* object, size_t size);
+ void DeleteEvent(const char* name, void* object);
+
+ // Static versions of the above, operate on current isolate's logger.
+ // Used in TRACK_MEMORY(TypeName) defined in globals.h
+ static void NewEventStatic(const char* name, void* object, size_t size);
+ static void DeleteEventStatic(const char* name, void* object);
// Emits an event with a tag, and some resource usage information.
// -> (name, tag, <rusage information>).
// Currently, the resource usage information is a process time stamp
// and a real time timestamp.
- static void ResourceEvent(const char* name, const char* tag);
+ void ResourceEvent(const char* name, const char* tag);
// Emits an event that an undefined property was read from an
// object.
- static void SuspectReadEvent(String* name, Object* obj);
+ void SuspectReadEvent(String* name, Object* obj);
// Emits an event when a message is put on or read from a debugging queue.
// DebugTag lets us put a call-site specific label on the event.
- static void DebugTag(const char* call_site_tag);
- static void DebugEvent(const char* event_type, Vector<uint16_t> parameter);
+ void DebugTag(const char* call_site_tag);
+ void DebugEvent(const char* event_type, Vector<uint16_t> parameter);
// ==== Events logged by --log-api. ====
- static void ApiNamedSecurityCheck(Object* key);
- static void ApiIndexedSecurityCheck(uint32_t index);
- static void ApiNamedPropertyAccess(const char* tag,
- JSObject* holder,
- Object* name);
- static void ApiIndexedPropertyAccess(const char* tag,
- JSObject* holder,
- uint32_t index);
- static void ApiObjectAccess(const char* tag, JSObject* obj);
- static void ApiEntryCall(const char* name);
+ void ApiNamedSecurityCheck(Object* key);
+ void ApiIndexedSecurityCheck(uint32_t index);
+ void ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name);
+ void ApiIndexedPropertyAccess(const char* tag,
+ JSObject* holder,
+ uint32_t index);
+ void ApiObjectAccess(const char* tag, JSObject* obj);
+ void ApiEntryCall(const char* name);
// ==== Events logged by --log-code. ====
// Emits a code event for a callback function.
- static void CallbackEvent(String* name, Address entry_point);
- static void GetterCallbackEvent(String* name, Address entry_point);
- static void SetterCallbackEvent(String* name, Address entry_point);
+ void CallbackEvent(String* name, Address entry_point);
+ void GetterCallbackEvent(String* name, Address entry_point);
+ void SetterCallbackEvent(String* name, Address entry_point);
// Emits a code create event.
- static void CodeCreateEvent(LogEventsAndTags tag,
- Code* code, const char* source);
- static void CodeCreateEvent(LogEventsAndTags tag,
- Code* code, String* name);
- static void CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* name);
- static void CodeCreateEvent(LogEventsAndTags tag,
- Code* code,
- SharedFunctionInfo* shared,
- String* source, int line);
- static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
- static void CodeMovingGCEvent();
+ void CodeCreateEvent(LogEventsAndTags tag,
+ Code* code, const char* source);
+ void CodeCreateEvent(LogEventsAndTags tag,
+ Code* code, String* name);
+ void CodeCreateEvent(LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ String* name);
+ void CodeCreateEvent(LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ String* source, int line);
+ void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
+ void CodeMovingGCEvent();
// Emits a code create event for a RegExp.
- static void RegExpCodeCreateEvent(Code* code, String* source);
+ void RegExpCodeCreateEvent(Code* code, String* source);
// Emits a code move event.
- static void CodeMoveEvent(Address from, Address to);
+ void CodeMoveEvent(Address from, Address to);
// Emits a code delete event.
- static void CodeDeleteEvent(Address from);
+ void CodeDeleteEvent(Address from);
- static void SFIMoveEvent(Address from, Address to);
+ void SharedFunctionInfoMoveEvent(Address from, Address to);
- static void SnapshotPositionEvent(Address addr, int pos);
+ void SnapshotPositionEvent(Address addr, int pos);
// ==== Events logged by --log-gc. ====
// Heap sampling events: start, end, and individual types.
- static void HeapSampleBeginEvent(const char* space, const char* kind);
- static void HeapSampleEndEvent(const char* space, const char* kind);
- static void HeapSampleItemEvent(const char* type, int number, int bytes);
- static void HeapSampleJSConstructorEvent(const char* constructor,
- int number, int bytes);
- static void HeapSampleJSRetainersEvent(const char* constructor,
+ void HeapSampleBeginEvent(const char* space, const char* kind);
+ void HeapSampleEndEvent(const char* space, const char* kind);
+ void HeapSampleItemEvent(const char* type, int number, int bytes);
+ void HeapSampleJSConstructorEvent(const char* constructor,
+ int number, int bytes);
+ void HeapSampleJSRetainersEvent(const char* constructor,
const char* event);
- static void HeapSampleJSProducerEvent(const char* constructor,
- Address* stack);
- static void HeapSampleStats(const char* space, const char* kind,
- intptr_t capacity, intptr_t used);
-
- static void SharedLibraryEvent(const char* library_path,
- uintptr_t start,
- uintptr_t end);
- static void SharedLibraryEvent(const wchar_t* library_path,
- uintptr_t start,
- uintptr_t end);
+ void HeapSampleJSProducerEvent(const char* constructor,
+ Address* stack);
+ void HeapSampleStats(const char* space, const char* kind,
+ intptr_t capacity, intptr_t used);
+
+ void SharedLibraryEvent(const char* library_path,
+ uintptr_t start,
+ uintptr_t end);
+ void SharedLibraryEvent(const wchar_t* library_path,
+ uintptr_t start,
+ uintptr_t end);
// ==== Events logged by --log-regexp ====
// Regexp compilation and execution events.
- static void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
+ void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
// Log an event reported from generated code
- static void LogRuntime(Vector<const char> format, JSArray* args);
+ void LogRuntime(Vector<const char> format, JSArray* args);
#ifdef ENABLE_LOGGING_AND_PROFILING
- static bool is_logging() {
+ bool is_logging() {
return logging_nesting_ > 0;
}
// Pause/Resume collection of profiling data.
// When data collection is paused, CPU Tick events are discarded until
// data collection is Resumed.
- static void PauseProfiler(int flags, int tag);
- static void ResumeProfiler(int flags, int tag);
- static int GetActiveProfilerModules();
+ void PauseProfiler(int flags, int tag);
+ void ResumeProfiler(int flags, int tag);
+ int GetActiveProfilerModules();
// If logging is performed into a memory buffer, allows to
// retrieve previously written messages. See v8.h.
- static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+ int GetLogLines(int from_pos, char* dest_buf, int max_size);
// Logs all compiled functions found in the heap.
- static void LogCompiledFunctions();
+ void LogCompiledFunctions();
// Logs all accessor callbacks found in the heap.
- static void LogAccessorCallbacks();
+ void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
- static void LogCodeObjects();
+ void LogCodeObjects();
// Converts tag to a corresponding NATIVE_... if the script is native.
INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));
@@ -284,70 +301,95 @@ class Logger {
// Profiler's sampling interval (in milliseconds).
static const int kSamplingIntervalMs = 1;
+ // Callback from Log, stops profiling in case of insufficient resources.
+ void LogFailure();
+
private:
+ class NameBuffer;
+ class NameMap;
+
+ Logger();
+ ~Logger();
// Emits the profiler's first message.
- static void ProfilerBeginEvent();
+ void ProfilerBeginEvent();
// Emits callback event messages.
- static void CallbackEventInternal(const char* prefix,
- const char* name,
- Address entry_point);
+ void CallbackEventInternal(const char* prefix,
+ const char* name,
+ Address entry_point);
// Internal configurable move event.
- static void MoveEventInternal(LogEventsAndTags event,
- Address from,
- Address to);
+ void MoveEventInternal(LogEventsAndTags event, Address from, Address to);
// Internal configurable move event.
- static void DeleteEventInternal(LogEventsAndTags event,
- Address from);
+ void DeleteEventInternal(LogEventsAndTags event, Address from);
// Emits the source code of a regexp. Used by regexp events.
- static void LogRegExpSource(Handle<JSRegExp> regexp);
+ void LogRegExpSource(Handle<JSRegExp> regexp);
// Used for logging stubs found in the snapshot.
- static void LogCodeObject(Object* code_object);
+ void LogCodeObject(Object* code_object);
// Emits general information about generated code.
- static void LogCodeInfo();
+ void LogCodeInfo();
- // Handles code creation when low-level profiling is active.
- static void LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg);
+ void RegisterSnapshotCodeName(Code* code, const char* name, int name_size);
+
+ // Low-level logging support.
+
+ void LowLevelCodeCreateEvent(Code* code, const char* name, int name_size);
+
+ void LowLevelCodeMoveEvent(Address from, Address to);
+
+ void LowLevelCodeDeleteEvent(Address from);
+
+ void LowLevelSnapshotPositionEvent(Address addr, int pos);
+
+ void LowLevelLogWriteBytes(const char* bytes, int size);
+
+ template <typename T>
+ void LowLevelLogWriteStruct(const T& s) {
+ char tag = T::kTag;
+ LowLevelLogWriteBytes(reinterpret_cast<const char*>(&tag), sizeof(tag));
+ LowLevelLogWriteBytes(reinterpret_cast<const char*>(&s), sizeof(s));
+ }
// Emits a profiler tick event. Used by the profiler thread.
- static void TickEvent(TickSample* sample, bool overflow);
+ void TickEvent(TickSample* sample, bool overflow);
- static void ApiEvent(const char* name, ...);
+ void ApiEvent(const char* name, ...);
// Logs a StringEvent regardless of whether FLAG_log is true.
- static void UncheckedStringEvent(const char* name, const char* value);
+ void UncheckedStringEvent(const char* name, const char* value);
// Logs an IntEvent regardless of whether FLAG_log is true.
- static void UncheckedIntEvent(const char* name, int value);
- static void UncheckedIntPtrTEvent(const char* name, intptr_t value);
-
- // Stops logging and profiling in case of insufficient resources.
- static void StopLoggingAndProfiling();
+ void UncheckedIntEvent(const char* name, int value);
+ void UncheckedIntPtrTEvent(const char* name, intptr_t value);
// Returns whether profiler's sampler is active.
- static bool IsProfilerSamplerActive();
+ bool IsProfilerSamplerActive();
// The sampler used by the profiler and the sliding state window.
- static Ticker* ticker_;
+ Ticker* ticker_;
// When the statistical profile is active, profiler_
// points to a Profiler, that handles collection
// of samples.
- static Profiler* profiler_;
+ Profiler* profiler_;
// SlidingStateWindow instance keeping a sliding window of the most
// recent VM states.
- static SlidingStateWindow* sliding_state_window_;
+ SlidingStateWindow* sliding_state_window_;
+
+ // An array of log events names.
+ const char* const* log_events_;
// Internal implementation classes with access to
// private members.
friend class EventLog;
+ friend class Isolate;
+ friend class LogMessageBuilder;
friend class TimeLog;
friend class Profiler;
friend class SlidingStateWindow;
@@ -356,21 +398,75 @@ class Logger {
friend class LoggerTestHelper;
- static int logging_nesting_;
- static int cpu_profiler_nesting_;
- static int heap_profiler_nesting_;
+
+ int logging_nesting_;
+ int cpu_profiler_nesting_;
+
+ Log* log_;
+
+ NameBuffer* name_buffer_;
+
+ NameMap* address_to_name_map_;
+
+ // Guards against multiple calls to TearDown() that can happen in some tests.
+ // 'true' between Setup() and TearDown().
+ bool is_initialized_;
+
+ // Support for 'incremental addresses' in compressed logs:
+ // LogMessageBuilder::AppendAddress(Address addr)
+ Address last_address_;
+ // Logger::TickEvent(...)
+ Address prev_sp_;
+ Address prev_function_;
+ // Logger::MoveEventInternal(...)
+ Address prev_to_;
+ // Logger::FunctionCreateEvent(...)
+ Address prev_code_;
friend class CpuProfiler;
#else
- static bool is_logging() { return false; }
+ bool is_logging() { return false; }
#endif
};
+// Process wide registry of samplers.
+class SamplerRegistry : public AllStatic {
+ public:
+ enum State {
+ HAS_NO_SAMPLERS,
+ HAS_SAMPLERS,
+ HAS_CPU_PROFILING_SAMPLERS
+ };
+
+ typedef void (*VisitSampler)(Sampler*, void*);
+
+ static State GetState();
+
+ // Iterates over all active samplers keeping the internal lock held.
+ // Returns whether there are any active samplers.
+ static bool IterateActiveSamplers(VisitSampler func, void* param);
+
+ // Adds/Removes an active sampler.
+ static void AddActiveSampler(Sampler* sampler);
+ static void RemoveActiveSampler(Sampler* sampler);
+
+ private:
+ static bool ActiveSamplersExist() {
+ return active_samplers_ != NULL && !active_samplers_->is_empty();
+ }
+
+ static Mutex* mutex_; // Protects the state below.
+ static List<Sampler*>* active_samplers_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SamplerRegistry);
+};
+
+
// Class that extracts stack trace, used for profiling.
class StackTracer : public AllStatic {
public:
- static void Trace(TickSample* sample);
+ static void Trace(Isolate* isolate, TickSample* sample);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 69f36c09c..fc08cb129 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -38,12 +38,13 @@ const GETTER = 0;
const SETTER = 1;
# These definitions must match the index of the properties in objects.h.
-const kApiTagOffset = 0;
-const kApiPropertyListOffset = 1;
-const kApiSerialNumberOffset = 2;
-const kApiConstructorOffset = 2;
-const kApiPrototypeTemplateOffset = 5;
-const kApiParentTemplateOffset = 6;
+const kApiTagOffset = 0;
+const kApiPropertyListOffset = 1;
+const kApiSerialNumberOffset = 2;
+const kApiConstructorOffset = 2;
+const kApiPrototypeTemplateOffset = 5;
+const kApiParentTemplateOffset = 6;
+const kApiPrototypeAttributesOffset = 15;
const NO_HINT = 0;
const NUMBER_HINT = 1;
@@ -127,7 +128,8 @@ macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0);
macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber(arg));
-
+macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
+macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index a4c782c59..fc1ab9270 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,28 +44,27 @@ namespace internal {
// -------------------------------------------------------------------------
// MarkCompactCollector
-bool MarkCompactCollector::force_compaction_ = false;
-bool MarkCompactCollector::compacting_collection_ = false;
-bool MarkCompactCollector::compact_on_next_gc_ = false;
-
-int MarkCompactCollector::previous_marked_count_ = 0;
-GCTracer* MarkCompactCollector::tracer_ = NULL;
-
-
+MarkCompactCollector::MarkCompactCollector() : // NOLINT
#ifdef DEBUG
-MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
-
-// Counters used for debugging the marking phase of mark-compact or mark-sweep
-// collection.
-int MarkCompactCollector::live_bytes_ = 0;
-int MarkCompactCollector::live_young_objects_size_ = 0;
-int MarkCompactCollector::live_old_data_objects_size_ = 0;
-int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
-int MarkCompactCollector::live_code_objects_size_ = 0;
-int MarkCompactCollector::live_map_objects_size_ = 0;
-int MarkCompactCollector::live_cell_objects_size_ = 0;
-int MarkCompactCollector::live_lo_objects_size_ = 0;
+ state_(IDLE),
#endif
+ force_compaction_(false),
+ compacting_collection_(false),
+ compact_on_next_gc_(false),
+ previous_marked_count_(0),
+ tracer_(NULL),
+#ifdef DEBUG
+ live_young_objects_size_(0),
+ live_old_pointer_objects_size_(0),
+ live_old_data_objects_size_(0),
+ live_code_objects_size_(0),
+ live_map_objects_size_(0),
+ live_cell_objects_size_(0),
+ live_lo_objects_size_(0),
+ live_bytes_(0),
+#endif
+ heap_(NULL),
+ code_flusher_(NULL) { }
void MarkCompactCollector::CollectGarbage() {
@@ -87,15 +86,15 @@ void MarkCompactCollector::CollectGarbage() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
EncodeForwardingAddresses();
- Heap::MarkMapPointersAsEncoded(true);
+ heap()->MarkMapPointersAsEncoded(true);
UpdatePointers();
- Heap::MarkMapPointersAsEncoded(false);
- PcToCodeCache::FlushPcToCodeCache();
+ heap()->MarkMapPointersAsEncoded(false);
+ heap()->isolate()->pc_to_code_cache()->Flush();
RelocateObjects();
} else {
SweepSpaces();
- PcToCodeCache::FlushPcToCodeCache();
+ heap()->isolate()->pc_to_code_cache()->Flush();
}
Finish();
@@ -124,7 +123,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
compact_on_next_gc_ = false;
if (FLAG_never_compact) compacting_collection_ = false;
- if (!Heap::map_space()->MapPointersEncodable())
+ if (!heap()->map_space()->MapPointersEncodable())
compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
#ifdef ENABLE_GDB_JIT_INTERFACE
@@ -162,9 +161,9 @@ void MarkCompactCollector::Finish() {
// force lazy re-initialization of it. This must be done after the
// GC, because it relies on the new address of certain old space
// objects (empty string, illegal builtin).
- StubCache::Clear();
+ heap()->isolate()->stub_cache()->Clear();
- ExternalStringTable::CleanUp();
+ heap()->external_string_table_.CleanUp();
// If we've just compacted old space there's no reason to check the
// fragmentation limit. Just return.
@@ -221,17 +220,19 @@ void MarkCompactCollector::Finish() {
// and continue with marking. This process repeats until all reachable
// objects have been marked.
-static MarkingStack marking_stack;
-
-class FlushCode : public AllStatic {
+class CodeFlusher {
public:
- static void AddCandidate(SharedFunctionInfo* shared_info) {
+ explicit CodeFlusher(Isolate* isolate)
+ : isolate_(isolate),
+ jsfunction_candidates_head_(NULL),
+ shared_function_info_candidates_head_(NULL) {}
+
+ void AddCandidate(SharedFunctionInfo* shared_info) {
SetNextCandidate(shared_info, shared_function_info_candidates_head_);
shared_function_info_candidates_head_ = shared_info;
}
-
- static void AddCandidate(JSFunction* function) {
+ void AddCandidate(JSFunction* function) {
ASSERT(function->unchecked_code() ==
function->unchecked_shared()->unchecked_code());
@@ -239,15 +240,14 @@ class FlushCode : public AllStatic {
jsfunction_candidates_head_ = function;
}
-
- static void ProcessCandidates() {
+ void ProcessCandidates() {
ProcessSharedFunctionInfoCandidates();
ProcessJSFunctionCandidates();
}
private:
- static void ProcessJSFunctionCandidates() {
- Code* lazy_compile = Builtins::builtin(Builtins::LazyCompile);
+ void ProcessJSFunctionCandidates() {
+ Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
JSFunction* candidate = jsfunction_candidates_head_;
JSFunction* next_candidate;
@@ -271,8 +271,8 @@ class FlushCode : public AllStatic {
}
- static void ProcessSharedFunctionInfoCandidates() {
- Code* lazy_compile = Builtins::builtin(Builtins::LazyCompile);
+ void ProcessSharedFunctionInfoCandidates() {
+ Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
SharedFunctionInfo* next_candidate;
@@ -291,57 +291,55 @@ class FlushCode : public AllStatic {
shared_function_info_candidates_head_ = NULL;
}
-
static JSFunction** GetNextCandidateField(JSFunction* candidate) {
return reinterpret_cast<JSFunction**>(
candidate->address() + JSFunction::kCodeEntryOffset);
}
-
static JSFunction* GetNextCandidate(JSFunction* candidate) {
return *GetNextCandidateField(candidate);
}
-
static void SetNextCandidate(JSFunction* candidate,
JSFunction* next_candidate) {
*GetNextCandidateField(candidate) = next_candidate;
}
-
- STATIC_ASSERT(kPointerSize <= Code::kHeaderSize - Code::kHeaderPaddingStart);
-
-
static SharedFunctionInfo** GetNextCandidateField(
SharedFunctionInfo* candidate) {
Code* code = candidate->unchecked_code();
return reinterpret_cast<SharedFunctionInfo**>(
- code->address() + Code::kHeaderPaddingStart);
+ code->address() + Code::kNextCodeFlushingCandidateOffset);
}
-
static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
return *GetNextCandidateField(candidate);
}
-
static void SetNextCandidate(SharedFunctionInfo* candidate,
SharedFunctionInfo* next_candidate) {
*GetNextCandidateField(candidate) = next_candidate;
}
- static JSFunction* jsfunction_candidates_head_;
+ Isolate* isolate_;
+ JSFunction* jsfunction_candidates_head_;
+ SharedFunctionInfo* shared_function_info_candidates_head_;
- static SharedFunctionInfo* shared_function_info_candidates_head_;
+ DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};
-JSFunction* FlushCode::jsfunction_candidates_head_ = NULL;
-SharedFunctionInfo* FlushCode::shared_function_info_candidates_head_ = NULL;
+MarkCompactCollector::~MarkCompactCollector() {
+ if (code_flusher_ != NULL) {
+ delete code_flusher_;
+ code_flusher_ = NULL;
+ }
+}
+
static inline HeapObject* ShortCircuitConsString(Object** p) {
// Optimization: If the heap object pointed to by p is a non-symbol
- // cons string whose right substring is Heap::empty_string, update
+ // cons string whose right substring is HEAP->empty_string, update
// it in place to its left substring. Return the updated value.
//
// Here we assume that if we change *p, we replace it with a heap object
@@ -349,7 +347,7 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
//
// The check performed is:
// object->IsConsString() && !object->IsSymbol() &&
- // (ConsString::cast(object)->second() == Heap::empty_string())
+ // (ConsString::cast(object)->second() == HEAP->empty_string())
// except the maps for the object and its possible substrings might be
// marked.
HeapObject* object = HeapObject::cast(*p);
@@ -359,7 +357,8 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
- if (second != Heap::raw_unchecked_empty_string()) {
+ Heap* heap = map_word.ToMap()->heap();
+ if (second != heap->raw_unchecked_empty_string()) {
return object;
}
@@ -367,7 +366,7 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
// page dirty marks. Therefore, we only replace the string with its left
// substring when page dirty marks do not change.
Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
- if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
+ if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
*p = first;
return HeapObject::cast(first);
@@ -380,19 +379,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
table_.GetVisitor(map)(map, obj);
}
- static void EnableCodeFlushing(bool enabled) {
- if (enabled) {
- table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
- table_.Register(kVisitSharedFunctionInfo,
- &VisitSharedFunctionInfoAndFlushCode);
-
- } else {
- table_.Register(kVisitJSFunction, &VisitJSFunction);
- table_.Register(kVisitSharedFunctionInfo,
- &VisitSharedFunctionInfoGeneric);
- }
- }
-
static void Initialize() {
table_.Register(kVisitShortcutCandidate,
&FixedBodyVisitor<StaticMarkingVisitor,
@@ -410,6 +396,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
FixedArray::BodyDescriptor,
void>::Visit);
+ table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
+
table_.Register(kVisitGlobalContext,
&FixedBodyVisitor<StaticMarkingVisitor,
Context::MarkCompactBodyDescriptor,
@@ -454,84 +442,91 @@ class StaticMarkingVisitor : public StaticVisitorBase {
kVisitStructGeneric>();
}
- INLINE(static void VisitPointer(Object** p)) {
- MarkObjectByPointer(p);
+ INLINE(static void VisitPointer(Heap* heap, Object** p)) {
+ MarkObjectByPointer(heap, p);
}
- INLINE(static void VisitPointers(Object** start, Object** end)) {
+ INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
// Mark all objects pointed to in [start, end).
const int kMinRangeForMarkingRecursion = 64;
if (end - start >= kMinRangeForMarkingRecursion) {
- if (VisitUnmarkedObjects(start, end)) return;
+ if (VisitUnmarkedObjects(heap, start, end)) return;
// We are close to a stack overflow, so just mark the objects.
}
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
}
- static inline void VisitCodeTarget(RelocInfo* rinfo) {
+ static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
+ if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
IC::Clear(rinfo->pc());
// Please note targets for cleared inline cached do not have to be
- // marked since they are contained in Heap::non_monomorphic_cache().
+ // marked since they are contained in HEAP->non_monomorphic_cache().
} else {
- MarkCompactCollector::MarkObject(code);
+ heap->mark_compact_collector()->MarkObject(code);
}
}
- static void VisitGlobalPropertyCell(RelocInfo* rinfo) {
+ static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
Object* cell = rinfo->target_cell();
Object* old_cell = cell;
- VisitPointer(&cell);
+ VisitPointer(heap, &cell);
if (cell != old_cell) {
rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
}
}
- static inline void VisitDebugTarget(RelocInfo* rinfo) {
+ static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
rinfo->IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence()));
HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
- MarkCompactCollector::MarkObject(code);
+ heap->mark_compact_collector()->MarkObject(code);
}
// Mark object pointed to by p.
- INLINE(static void MarkObjectByPointer(Object** p)) {
+ INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
if (!(*p)->IsHeapObject()) return;
HeapObject* object = ShortCircuitConsString(p);
- MarkCompactCollector::MarkObject(object);
+ if (!object->IsMarked()) {
+ heap->mark_compact_collector()->MarkUnmarkedObject(object);
+ }
}
+
// Visit an unmarked object.
- static inline void VisitUnmarkedObject(HeapObject* obj) {
+ INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
+ HeapObject* obj)) {
#ifdef DEBUG
- ASSERT(Heap::Contains(obj));
+ ASSERT(Isolate::Current()->heap()->Contains(obj));
ASSERT(!obj->IsMarked());
#endif
Map* map = obj->map();
- MarkCompactCollector::SetMark(obj);
+ collector->SetMark(obj);
// Mark the map pointer and the body.
- MarkCompactCollector::MarkObject(map);
+ if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
IterateBody(map, obj);
}
// Visit all unmarked objects pointed to by [start, end).
// Returns false if the operation fails (lack of stack space).
- static inline bool VisitUnmarkedObjects(Object** start, Object** end) {
+ static inline bool VisitUnmarkedObjects(Heap* heap,
+ Object** start,
+ Object** end) {
// Return false is we are close to the stack limit.
- StackLimitCheck check;
+ StackLimitCheck check(heap->isolate());
if (check.HasOverflowed()) return false;
+ MarkCompactCollector* collector = heap->mark_compact_collector();
// Visit the unmarked objects.
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
if (obj->IsMarked()) continue;
- VisitUnmarkedObject(obj);
+ VisitUnmarkedObject(collector, obj);
}
return true;
}
@@ -559,7 +554,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
void> StructObjectVisitor;
static void VisitCode(Map* map, HeapObject* object) {
- reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
+ reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
+ map->heap());
}
// Code flushing support.
@@ -568,25 +564,24 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// flushed.
static const int kCodeAgeThreshold = 5;
- inline static bool HasSourceCode(SharedFunctionInfo* info) {
- Object* undefined = Heap::raw_unchecked_undefined_value();
+ inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
+ Object* undefined = heap->raw_unchecked_undefined_value();
return (info->script() != undefined) &&
(reinterpret_cast<Script*>(info->script())->source() != undefined);
}
inline static bool IsCompiled(JSFunction* function) {
- return
- function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
+ return function->unchecked_code() !=
+ function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
}
-
inline static bool IsCompiled(SharedFunctionInfo* function) {
- return
- function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
+ return function->unchecked_code() !=
+ function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
}
- inline static bool IsFlushable(JSFunction* function) {
+ inline static bool IsFlushable(Heap* heap, JSFunction* function) {
SharedFunctionInfo* shared_info = function->unchecked_shared();
// Code is either on stack, in compilation cache or referenced
@@ -601,10 +596,10 @@ class StaticMarkingVisitor : public StaticVisitorBase {
return false;
}
- return IsFlushable(shared_info);
+ return IsFlushable(heap, shared_info);
}
- inline static bool IsFlushable(SharedFunctionInfo* shared_info) {
+ inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
if (shared_info->unchecked_code()->IsMarked()) {
@@ -614,7 +609,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// The function must be compiled and have the source code available,
// to be able to recompile it in case we need the function again.
- if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) {
+ if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
return false;
}
@@ -645,15 +640,15 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
- static bool FlushCodeForFunction(JSFunction* function) {
- if (!IsFlushable(function)) return false;
+ static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
+ if (!IsFlushable(heap, function)) return false;
// This function's code looks flushable. But we have to postpone the
// decision until we see all functions that point to the same
// SharedFunctionInfo because some of them might be optimized.
// That would make the nonoptimized version of the code nonflushable,
// because it is required for bailing out from optimized code.
- FlushCode::AddCandidate(function);
+ heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
return true;
}
@@ -676,9 +671,11 @@ class StaticMarkingVisitor : public StaticVisitorBase {
if (!ctx->IsHeapObject()) return false;
Map* map = SafeMap(ctx);
- if (!(map == Heap::raw_unchecked_context_map() ||
- map == Heap::raw_unchecked_catch_context_map() ||
- map == Heap::raw_unchecked_global_context_map())) {
+ Heap* heap = map->heap();
+ if (!(map == heap->raw_unchecked_function_context_map() ||
+ map == heap->raw_unchecked_catch_context_map() ||
+ map == heap->raw_unchecked_with_context_map() ||
+ map == heap->raw_unchecked_global_context_map())) {
return false;
}
@@ -705,29 +702,37 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void VisitSharedFunctionInfoAndFlushCode(Map* map,
HeapObject* object) {
+ MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+ if (!collector->is_code_flushing_enabled()) {
+ VisitSharedFunctionInfoGeneric(map, object);
+ return;
+ }
VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
}
static void VisitSharedFunctionInfoAndFlushCodeGeneric(
Map* map, HeapObject* object, bool known_flush_code_candidate) {
+ Heap* heap = map->heap();
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
if (!known_flush_code_candidate) {
- known_flush_code_candidate = IsFlushable(shared);
- if (known_flush_code_candidate) FlushCode::AddCandidate(shared);
+ known_flush_code_candidate = IsFlushable(heap, shared);
+ if (known_flush_code_candidate) {
+ heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
+ }
}
- VisitSharedFunctionInfoFields(object, known_flush_code_candidate);
+ VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
}
- static void VisitCodeEntry(Address entry_address) {
+ static void VisitCodeEntry(Heap* heap, Address entry_address) {
Object* code = Code::GetObjectFromEntryAddress(entry_address);
Object* old_code = code;
- VisitPointer(&code);
+ VisitPointer(heap, &code);
if (code != old_code) {
Memory::Address_at(entry_address) =
reinterpret_cast<Code*>(code)->entry();
@@ -736,16 +741,22 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
+ Heap* heap = map->heap();
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (!collector->is_code_flushing_enabled()) {
+ VisitJSFunction(map, object);
+ return;
+ }
+
JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
// The function must have a valid context and not be a builtin.
bool flush_code_candidate = false;
if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
- flush_code_candidate = FlushCodeForFunction(jsfunction);
+ flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
}
if (!flush_code_candidate) {
- MarkCompactCollector::MarkObject(
- jsfunction->unchecked_shared()->unchecked_code());
+ collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
// For optimized functions we should retain both non-optimized version
@@ -761,8 +772,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
i < count;
i++) {
JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
- MarkCompactCollector::MarkObject(
- inlined->unchecked_shared()->unchecked_code());
+ collector->MarkObject(inlined->unchecked_shared()->unchecked_code());
}
}
}
@@ -787,11 +797,15 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static inline void VisitJSFunctionFields(Map* map,
JSFunction* object,
bool flush_code_candidate) {
- VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
+ Heap* heap = map->heap();
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+
+ VisitPointers(heap,
+ SLOT_ADDR(object, JSFunction::kPropertiesOffset),
SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
if (!flush_code_candidate) {
- VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
+ VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
} else {
// Don't visit code object.
@@ -800,15 +814,16 @@ class StaticMarkingVisitor : public StaticVisitorBase {
SharedFunctionInfo* shared_info = object->unchecked_shared();
if (!shared_info->IsMarked()) {
Map* shared_info_map = shared_info->map();
- MarkCompactCollector::SetMark(shared_info);
- MarkCompactCollector::MarkObject(shared_info_map);
+ collector->SetMark(shared_info);
+ collector->MarkObject(shared_info_map);
VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
shared_info,
true);
}
}
- VisitPointers(SLOT_ADDR(object,
+ VisitPointers(heap,
+ SLOT_ADDR(object,
JSFunction::kCodeEntryOffset + kPointerSize),
SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
@@ -816,15 +831,17 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
- static void VisitSharedFunctionInfoFields(HeapObject* object,
+ static void VisitSharedFunctionInfoFields(Heap* heap,
+ HeapObject* object,
bool flush_code_candidate) {
- VisitPointer(SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
+ VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
if (!flush_code_candidate) {
- VisitPointer(SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
+ VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
}
- VisitPointers(SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
+ VisitPointers(heap,
+ SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
SLOT_ADDR(object, SharedFunctionInfo::kSize));
}
@@ -842,40 +859,42 @@ VisitorDispatchTable<StaticMarkingVisitor::Callback>
class MarkingVisitor : public ObjectVisitor {
public:
+ explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
+
void VisitPointer(Object** p) {
- StaticMarkingVisitor::VisitPointer(p);
+ StaticMarkingVisitor::VisitPointer(heap_, p);
}
void VisitPointers(Object** start, Object** end) {
- StaticMarkingVisitor::VisitPointers(start, end);
+ StaticMarkingVisitor::VisitPointers(heap_, start, end);
}
- void VisitCodeTarget(RelocInfo* rinfo) {
- StaticMarkingVisitor::VisitCodeTarget(rinfo);
- }
-
- void VisitGlobalPropertyCell(RelocInfo* rinfo) {
- StaticMarkingVisitor::VisitGlobalPropertyCell(rinfo);
- }
-
- void VisitDebugTarget(RelocInfo* rinfo) {
- StaticMarkingVisitor::VisitDebugTarget(rinfo);
- }
+ private:
+ Heap* heap_;
};
class CodeMarkingVisitor : public ThreadVisitor {
public:
- void VisitThread(ThreadLocalTop* top) {
- for (StackFrameIterator it(top); !it.done(); it.Advance()) {
- MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
+ explicit CodeMarkingVisitor(MarkCompactCollector* collector)
+ : collector_(collector) {}
+
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+ for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ collector_->MarkObject(it.frame()->unchecked_code());
}
}
+
+ private:
+ MarkCompactCollector* collector_;
};
class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
public:
+ explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
+ : collector_(collector) {}
+
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) VisitPointer(p);
}
@@ -884,44 +903,52 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
Object* obj = *slot;
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
- MarkCompactCollector::MarkObject(shared->unchecked_code());
- MarkCompactCollector::MarkObject(shared);
+ collector_->MarkObject(shared->unchecked_code());
+ collector_->MarkObject(shared);
}
}
+
+ private:
+ MarkCompactCollector* collector_;
};
void MarkCompactCollector::PrepareForCodeFlushing() {
+ ASSERT(heap() == Isolate::Current()->heap());
+
if (!FLAG_flush_code) {
- StaticMarkingVisitor::EnableCodeFlushing(false);
+ EnableCodeFlushing(false);
return;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::IsLoaded() || Debug::has_break_points()) {
- StaticMarkingVisitor::EnableCodeFlushing(false);
+ if (heap()->isolate()->debug()->IsLoaded() ||
+ heap()->isolate()->debug()->has_break_points()) {
+ EnableCodeFlushing(false);
return;
}
#endif
- StaticMarkingVisitor::EnableCodeFlushing(true);
+ EnableCodeFlushing(true);
// Ensure that empty descriptor array is marked. Method MarkDescriptorArray
// relies on it being marked before any other descriptor array.
- MarkObject(Heap::raw_unchecked_empty_descriptor_array());
+ MarkObject(heap()->raw_unchecked_empty_descriptor_array());
// Make sure we are not referencing the code from the stack.
+ ASSERT(this == heap()->mark_compact_collector());
for (StackFrameIterator it; !it.done(); it.Advance()) {
MarkObject(it.frame()->unchecked_code());
}
// Iterate the archived stacks in all threads to check if
// the code is referenced.
- CodeMarkingVisitor code_marking_visitor;
- ThreadManager::IterateArchivedThreads(&code_marking_visitor);
+ CodeMarkingVisitor code_marking_visitor(this);
+ heap()->isolate()->thread_manager()->IterateArchivedThreads(
+ &code_marking_visitor);
- SharedFunctionInfoMarkingVisitor visitor;
- CompilationCache::IterateFunctions(&visitor);
- HandleScopeImplementer::Iterate(&visitor);
+ SharedFunctionInfoMarkingVisitor visitor(this);
+ heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
+ heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
ProcessMarkingStack();
}
@@ -930,6 +957,9 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
// Visitor class for marking heap roots.
class RootMarkingVisitor : public ObjectVisitor {
public:
+ explicit RootMarkingVisitor(Heap* heap)
+ : collector_(heap->mark_compact_collector()) { }
+
void VisitPointer(Object** p) {
MarkObjectByPointer(p);
}
@@ -948,23 +978,26 @@ class RootMarkingVisitor : public ObjectVisitor {
Map* map = object->map();
// Mark the object.
- MarkCompactCollector::SetMark(object);
+ collector_->SetMark(object);
// Mark the map pointer and body, and push them on the marking stack.
- MarkCompactCollector::MarkObject(map);
+ collector_->MarkObject(map);
StaticMarkingVisitor::IterateBody(map, object);
// Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap.
- MarkCompactCollector::EmptyMarkingStack();
+ collector_->EmptyMarkingStack();
}
+
+ MarkCompactCollector* collector_;
};
// Helper class for pruning the symbol table.
class SymbolTableCleaner : public ObjectVisitor {
public:
- SymbolTableCleaner() : pointers_removed_(0) { }
+ explicit SymbolTableCleaner(Heap* heap)
+ : heap_(heap), pointers_removed_(0) { }
virtual void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
@@ -976,10 +1009,10 @@ class SymbolTableCleaner : public ObjectVisitor {
// Since no objects have yet been moved we can safely access the map of
// the object.
if ((*p)->IsExternalString()) {
- Heap::FinalizeExternalString(String::cast(*p));
+ heap_->FinalizeExternalString(String::cast(*p));
}
// Set the entry to null_value (as deleted).
- *p = Heap::raw_unchecked_null_value();
+ *p = heap_->raw_unchecked_null_value();
pointers_removed_++;
}
}
@@ -988,7 +1021,9 @@ class SymbolTableCleaner : public ObjectVisitor {
int PointersRemoved() {
return pointers_removed_;
}
+
private:
+ Heap* heap_;
int pointers_removed_;
};
@@ -1010,30 +1045,44 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
ASSERT(!object->IsMarked());
- ASSERT(Heap::Contains(object));
+ ASSERT(HEAP->Contains(object));
if (object->IsMap()) {
Map* map = Map::cast(object);
- if (FLAG_cleanup_caches_in_maps_at_gc) {
- map->ClearCodeCache();
+ if (FLAG_cleanup_code_caches_at_gc) {
+ map->ClearCodeCache(heap());
}
SetMark(map);
- if (FLAG_collect_maps &&
- map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
- map->instance_type() <= JS_FUNCTION_TYPE) {
+
+ // When map collection is enabled we have to mark through map's transitions
+ // in a special way to make transition links weak.
+ // Only maps for subclasses of JSReceiver can have transitions.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
MarkMapContents(map);
} else {
- marking_stack.Push(map);
+ marking_stack_.Push(map);
}
} else {
SetMark(object);
- marking_stack.Push(object);
+ marking_stack_.Push(object);
}
}
void MarkCompactCollector::MarkMapContents(Map* map) {
- MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
- *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
+ // Mark prototype transitions array but don't push it into marking stack.
+ // This will make references from it weak. We will clean dead prototype
+ // transitions in ClearNonLiveTransitions.
+ FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
+ if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
+
+ Object* raw_descriptor_array =
+ *HeapObject::RawField(map,
+ Map::kInstanceDescriptorsOrBitField3Offset);
+ if (!raw_descriptor_array->IsSmi()) {
+ MarkDescriptorArray(
+ reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
+ }
// Mark the Object* fields of the Map.
// Since the descriptor array has been marked already, it is fine
@@ -1043,7 +1092,7 @@ void MarkCompactCollector::MarkMapContents(Map* map) {
Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
- StaticMarkingVisitor::VisitPointers(start_slot, end_slot);
+ StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
}
@@ -1051,7 +1100,7 @@ void MarkCompactCollector::MarkDescriptorArray(
DescriptorArray* descriptors) {
if (descriptors->IsMarked()) return;
// Empty descriptor array is marked as a root before any maps are marked.
- ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
+ ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
SetMark(descriptors);
FixedArray* contents = reinterpret_cast<FixedArray*>(
@@ -1061,11 +1110,11 @@ void MarkCompactCollector::MarkDescriptorArray(
ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
SetMark(contents);
- // Contents contains (value, details) pairs. If the details say that
- // the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or
- // NULL_DESCRIPTOR, we don't mark the value as live. Only for
- // MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object* (a
- // Map*).
+ // Contents contains (value, details) pairs. If the details say that the type
+ // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
+ // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
+ // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
+ // CONSTANT_TRANSITION is the value an Object* (a Map*).
for (int i = 0; i < contents->length(); i += 2) {
// If the pair (value, details) at index i, i+1 is not
// a transition or null descriptor, mark the value.
@@ -1074,27 +1123,27 @@ void MarkCompactCollector::MarkDescriptorArray(
HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
if (object->IsHeapObject() && !object->IsMarked()) {
SetMark(object);
- marking_stack.Push(object);
+ marking_stack_.Push(object);
}
}
}
// The DescriptorArray descriptors contains a pointer to its contents array,
// but the contents array is already marked.
- marking_stack.Push(descriptors);
+ marking_stack_.Push(descriptors);
}
void MarkCompactCollector::CreateBackPointers() {
- HeapObjectIterator iterator(Heap::map_space());
+ HeapObjectIterator iterator(heap()->map_space());
for (HeapObject* next_object = iterator.next();
next_object != NULL; next_object = iterator.next()) {
if (next_object->IsMap()) { // Could also be ByteArray on free list.
Map* map = Map::cast(next_object);
- if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
- map->instance_type() <= JS_FUNCTION_TYPE) {
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
map->CreateBackPointers();
} else {
- ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
+ ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
}
}
}
@@ -1111,25 +1160,29 @@ static int OverflowObjectSize(HeapObject* obj) {
}
-// Fill the marking stack with overflowed objects returned by the given
-// iterator. Stop when the marking stack is filled or the end of the space
-// is reached, whichever comes first.
-template<class T>
-static void ScanOverflowedObjects(T* it) {
- // The caller should ensure that the marking stack is initially not full,
- // so that we don't waste effort pointlessly scanning for objects.
- ASSERT(!marking_stack.is_full());
-
- for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
- if (object->IsOverflowed()) {
- object->ClearOverflow();
- ASSERT(object->IsMarked());
- ASSERT(Heap::Contains(object));
- marking_stack.Push(object);
- if (marking_stack.is_full()) return;
+class OverflowedObjectsScanner : public AllStatic {
+ public:
+ // Fill the marking stack with overflowed objects returned by the given
+ // iterator. Stop when the marking stack is filled or the end of the space
+ // is reached, whichever comes first.
+ template<class T>
+ static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
+ T* it) {
+ // The caller should ensure that the marking stack is initially not full,
+ // so that we don't waste effort pointlessly scanning for objects.
+ ASSERT(!collector->marking_stack_.is_full());
+
+ for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
+ if (object->IsOverflowed()) {
+ object->ClearOverflow();
+ ASSERT(object->IsMarked());
+ ASSERT(HEAP->Contains(object));
+ collector->marking_stack_.Push(object);
+ if (collector->marking_stack_.is_full()) return;
+ }
}
}
-}
+};
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
@@ -1138,11 +1191,11 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
void MarkCompactCollector::MarkSymbolTable() {
- SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
+ SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
// Mark the symbol table itself.
SetMark(symbol_table);
// Explicitly mark the prefix.
- MarkingVisitor marker;
+ MarkingVisitor marker(heap());
symbol_table->IteratePrefix(&marker);
ProcessMarkingStack();
}
@@ -1151,13 +1204,13 @@ void MarkCompactCollector::MarkSymbolTable() {
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
- Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
+ heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
// Handle the symbol table specially.
MarkSymbolTable();
// There may be overflowed objects in the heap. Visit them now.
- while (marking_stack.overflowed()) {
+ while (marking_stack_.overflowed()) {
RefillMarkingStack();
EmptyMarkingStack();
}
@@ -1165,15 +1218,17 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
void MarkCompactCollector::MarkObjectGroups() {
- List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
+ List<ObjectGroup*>* object_groups =
+ heap()->isolate()->global_handles()->object_groups();
+ int last = 0;
for (int i = 0; i < object_groups->length(); i++) {
ObjectGroup* entry = object_groups->at(i);
- if (entry == NULL) continue;
+ ASSERT(entry != NULL);
- List<Object**>& objects = entry->objects_;
+ Object*** objects = entry->objects_;
bool group_marked = false;
- for (int j = 0; j < objects.length(); j++) {
+ for (size_t j = 0; j < entry->length_; j++) {
Object* object = *objects[j];
if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
group_marked = true;
@@ -1181,20 +1236,54 @@ void MarkCompactCollector::MarkObjectGroups() {
}
}
- if (!group_marked) continue;
+ if (!group_marked) {
+ (*object_groups)[last++] = entry;
+ continue;
+ }
- // An object in the group is marked, so mark as gray all white heap
- // objects in the group.
- for (int j = 0; j < objects.length(); ++j) {
+ // An object in the group is marked, so mark all heap objects in
+ // the group.
+ for (size_t j = 0; j < entry->length_; ++j) {
if ((*objects[j])->IsHeapObject()) {
MarkObject(HeapObject::cast(*objects[j]));
}
}
- // Once the entire group has been colored gray, set the object group
- // to NULL so it won't be processed again.
- delete object_groups->at(i);
- object_groups->at(i) = NULL;
+
+ // Once the entire group has been marked, dispose it because it's
+ // not needed anymore.
+ entry->Dispose();
+ }
+ object_groups->Rewind(last);
+}
+
+
+void MarkCompactCollector::MarkImplicitRefGroups() {
+ List<ImplicitRefGroup*>* ref_groups =
+ heap()->isolate()->global_handles()->implicit_ref_groups();
+
+ int last = 0;
+ for (int i = 0; i < ref_groups->length(); i++) {
+ ImplicitRefGroup* entry = ref_groups->at(i);
+ ASSERT(entry != NULL);
+
+ if (!(*entry->parent_)->IsMarked()) {
+ (*ref_groups)[last++] = entry;
+ continue;
+ }
+
+ Object*** children = entry->children_;
+ // A parent object is marked, so mark all child heap objects.
+ for (size_t j = 0; j < entry->length_; ++j) {
+ if ((*children[j])->IsHeapObject()) {
+ MarkObject(HeapObject::cast(*children[j]));
+ }
+ }
+
+ // Once the entire group has been marked, dispose it because it's
+ // not needed anymore.
+ entry->Dispose();
}
+ ref_groups->Rewind(last);
}
@@ -1203,10 +1292,10 @@ void MarkCompactCollector::MarkObjectGroups() {
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
void MarkCompactCollector::EmptyMarkingStack() {
- while (!marking_stack.is_empty()) {
- HeapObject* object = marking_stack.Pop();
+ while (!marking_stack_.is_empty()) {
+ HeapObject* object = marking_stack_.Pop();
ASSERT(object->IsHeapObject());
- ASSERT(Heap::Contains(object));
+ ASSERT(heap()->Contains(object));
ASSERT(object->IsMarked());
ASSERT(!object->IsOverflowed());
@@ -1228,38 +1317,38 @@ void MarkCompactCollector::EmptyMarkingStack() {
// overflowed objects in the heap so the overflow flag on the markings stack
// is cleared.
void MarkCompactCollector::RefillMarkingStack() {
- ASSERT(marking_stack.overflowed());
+ ASSERT(marking_stack_.overflowed());
- SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&new_it);
- if (marking_stack.is_full()) return;
+ SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
+ HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
&OverflowObjectSize);
- ScanOverflowedObjects(&old_pointer_it);
- if (marking_stack.is_full()) return;
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&old_data_it);
- if (marking_stack.is_full()) return;
+ HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&code_it);
- if (marking_stack.is_full()) return;
+ HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&map_it);
- if (marking_stack.is_full()) return;
+ HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
+ if (marking_stack_.is_full()) return;
- HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&cell_it);
- if (marking_stack.is_full()) return;
+ HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
+ if (marking_stack_.is_full()) return;
- LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&lo_it);
- if (marking_stack.is_full()) return;
+ LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
+ if (marking_stack_.is_full()) return;
- marking_stack.clear_overflowed();
+ marking_stack_.clear_overflowed();
}
@@ -1269,19 +1358,20 @@ void MarkCompactCollector::RefillMarkingStack() {
// objects in the heap.
void MarkCompactCollector::ProcessMarkingStack() {
EmptyMarkingStack();
- while (marking_stack.overflowed()) {
+ while (marking_stack_.overflowed()) {
RefillMarkingStack();
EmptyMarkingStack();
}
}
-void MarkCompactCollector::ProcessObjectGroups() {
+void MarkCompactCollector::ProcessExternalMarking() {
bool work_to_do = true;
- ASSERT(marking_stack.is_empty());
+ ASSERT(marking_stack_.is_empty());
while (work_to_do) {
MarkObjectGroups();
- work_to_do = !marking_stack.is_empty();
+ MarkImplicitRefGroups();
+ work_to_do = !marking_stack_.is_empty();
ProcessMarkingStack();
}
}
@@ -1292,7 +1382,7 @@ void MarkCompactCollector::MarkLiveObjects() {
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
// with the C stack limit check.
- PostponeInterruptsScope postpone;
+ PostponeInterruptsScope postpone(heap()->isolate());
#ifdef DEBUG
ASSERT(state_ == PREPARE_GC);
@@ -1300,21 +1390,20 @@ void MarkCompactCollector::MarkLiveObjects() {
#endif
// The to space contains live objects, the from space is used as a marking
// stack.
- marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
- Heap::new_space()->FromSpaceHigh());
+ marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
+ heap()->new_space()->FromSpaceHigh());
- ASSERT(!marking_stack.overflowed());
+ ASSERT(!marking_stack_.overflowed());
PrepareForCodeFlushing();
- RootMarkingVisitor root_visitor;
+ RootMarkingVisitor root_visitor(heap());
MarkRoots(&root_visitor);
// The objects reachable from the roots are marked, yet unreachable
- // objects are unmarked. Mark objects reachable from object groups
- // containing at least one marked object, and continue until no new
- // objects are reachable from the object groups.
- ProcessObjectGroups();
+ // objects are unmarked. Mark objects reachable due to host
+ // application specific logic.
+ ProcessExternalMarking();
// The objects reachable from the roots or object groups are marked,
// yet unreachable objects are unmarked. Mark objects reachable
@@ -1322,61 +1411,65 @@ void MarkCompactCollector::MarkLiveObjects() {
//
// First we identify nonlive weak handles and mark them as pending
// destruction.
- GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
+ heap()->isolate()->global_handles()->IdentifyWeakHandles(
+ &IsUnmarkedHeapObject);
// Then we mark the objects and process the transitive closure.
- GlobalHandles::IterateWeakRoots(&root_visitor);
- while (marking_stack.overflowed()) {
+ heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
+ while (marking_stack_.overflowed()) {
RefillMarkingStack();
EmptyMarkingStack();
}
- // Repeat the object groups to mark unmarked groups reachable from the
- // weak roots.
- ProcessObjectGroups();
+ // Repeat host application specific marking to mark unmarked objects
+ // reachable from the weak roots.
+ ProcessExternalMarking();
// Prune the symbol table removing all symbols only pointed to by the
// symbol table. Cannot use symbol_table() here because the symbol
// table is marked.
- SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
- SymbolTableCleaner v;
+ SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
+ SymbolTableCleaner v(heap());
symbol_table->IterateElements(&v);
symbol_table->ElementsRemoved(v.PointersRemoved());
- ExternalStringTable::Iterate(&v);
- ExternalStringTable::CleanUp();
+ heap()->external_string_table_.Iterate(&v);
+ heap()->external_string_table_.CleanUp();
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer;
- Heap::ProcessWeakReferences(&mark_compact_object_retainer);
+ heap()->ProcessWeakReferences(&mark_compact_object_retainer);
// Remove object groups after marking phase.
- GlobalHandles::RemoveObjectGroups();
+ heap()->isolate()->global_handles()->RemoveObjectGroups();
+ heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
// Flush code from collected candidates.
- FlushCode::ProcessCandidates();
+ if (is_code_flushing_enabled()) {
+ code_flusher_->ProcessCandidates();
+ }
// Clean up dead objects from the runtime profiler.
- RuntimeProfiler::RemoveDeadSamples();
+ heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
}
#ifdef DEBUG
void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
live_bytes_ += obj->Size();
- if (Heap::new_space()->Contains(obj)) {
+ if (heap()->new_space()->Contains(obj)) {
live_young_objects_size_ += obj->Size();
- } else if (Heap::map_space()->Contains(obj)) {
+ } else if (heap()->map_space()->Contains(obj)) {
ASSERT(obj->IsMap());
live_map_objects_size_ += obj->Size();
- } else if (Heap::cell_space()->Contains(obj)) {
+ } else if (heap()->cell_space()->Contains(obj)) {
ASSERT(obj->IsJSGlobalPropertyCell());
live_cell_objects_size_ += obj->Size();
- } else if (Heap::old_pointer_space()->Contains(obj)) {
+ } else if (heap()->old_pointer_space()->Contains(obj)) {
live_old_pointer_objects_size_ += obj->Size();
- } else if (Heap::old_data_space()->Contains(obj)) {
+ } else if (heap()->old_data_space()->Contains(obj)) {
live_old_data_objects_size_ += obj->Size();
- } else if (Heap::code_space()->Contains(obj)) {
+ } else if (heap()->code_space()->Contains(obj)) {
live_code_objects_size_ += obj->Size();
- } else if (Heap::lo_space()->Contains(obj)) {
+ } else if (heap()->lo_space()->Contains(obj)) {
live_lo_objects_size_ += obj->Size();
} else {
UNREACHABLE();
@@ -1392,7 +1485,7 @@ void MarkCompactCollector::SweepLargeObjectSpace() {
compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
#endif
// Deallocate unmarked objects and clear marked bits for marked objects.
- Heap::lo_space()->FreeUnmarkedObjects();
+ heap()->lo_space()->FreeUnmarkedObjects();
}
@@ -1405,7 +1498,7 @@ bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
void MarkCompactCollector::ClearNonLiveTransitions() {
- HeapObjectIterator map_iterator(Heap::map_space(), &SizeOfMarkedObject);
+ HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject);
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. At the same time,
// set all the prototype fields of maps back to their original value,
@@ -1423,8 +1516,8 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
ASSERT(SafeIsMap(map));
// Only JSObject and subtypes have map transitions and back pointers.
- if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
- if (map->instance_type() > JS_FUNCTION_TYPE) continue;
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
if (map->IsMarked() && map->attached_to_shared_function_info()) {
// This map is used for inobject slack tracking and has been detached
@@ -1433,6 +1526,48 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
}
+ // Clear dead prototype transitions.
+ int number_of_transitions = map->NumberOfProtoTransitions();
+ FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
+ int new_number_of_transitions = 0;
+ const int header = Map::kProtoTransitionHeaderSize;
+ const int proto_offset =
+ header + Map::kProtoTransitionPrototypeOffset;
+ const int map_offset = header + Map::kProtoTransitionMapOffset;
+ const int step = Map::kProtoTransitionElementsPerEntry;
+ for (int i = 0; i < number_of_transitions; i++) {
+ Object* prototype = prototype_transitions->get(proto_offset + i * step);
+ Object* cached_map = prototype_transitions->get(map_offset + i * step);
+ if (HeapObject::cast(prototype)->IsMarked() &&
+ HeapObject::cast(cached_map)->IsMarked()) {
+ if (new_number_of_transitions != i) {
+ prototype_transitions->set_unchecked(
+ heap_,
+ proto_offset + new_number_of_transitions * step,
+ prototype,
+ UPDATE_WRITE_BARRIER);
+ prototype_transitions->set_unchecked(
+ heap_,
+ map_offset + new_number_of_transitions * step,
+ cached_map,
+ SKIP_WRITE_BARRIER);
+ }
+ new_number_of_transitions++;
+ }
+
+ // Fill slots that became free with undefined value.
+ Object* undefined = heap()->raw_unchecked_undefined_value();
+ for (int i = new_number_of_transitions * step;
+ i < number_of_transitions * step;
+ i++) {
+ prototype_transitions->set_unchecked(heap_,
+ header + i,
+ undefined,
+ SKIP_WRITE_BARRIER);
+ }
+ map->SetNumberOfProtoTransitions(new_number_of_transitions);
+ }
+
// Follow the chain of back pointers to find the prototype.
Map* current = map;
while (SafeIsMap(current)) {
@@ -1455,7 +1590,7 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// This test will always be false on the first iteration.
if (on_dead_path && current->IsMarked()) {
on_dead_path = false;
- current->ClearNonLiveTransitions(real_prototype);
+ current->ClearNonLiveTransitions(heap(), real_prototype);
}
*HeapObject::RawField(current, Map::kPrototypeOffset) =
real_prototype;
@@ -1517,20 +1652,21 @@ void EncodeFreeRegion(Address free_start, int free_size) {
// Try to promote all objects in new space. Heap numbers and sequential
// strings are promoted to the code space, large objects to large object space,
// and all others to the old space.
-inline MaybeObject* MCAllocateFromNewSpace(HeapObject* object,
+inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
+ HeapObject* object,
int object_size) {
MaybeObject* forwarded;
- if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
+ if (object_size > heap->MaxObjectSizeInPagedSpace()) {
forwarded = Failure::Exception();
} else {
- OldSpace* target_space = Heap::TargetSpace(object);
- ASSERT(target_space == Heap::old_pointer_space() ||
- target_space == Heap::old_data_space());
+ OldSpace* target_space = heap->TargetSpace(object);
+ ASSERT(target_space == heap->old_pointer_space() ||
+ target_space == heap->old_data_space());
forwarded = target_space->MCAllocateRaw(object_size);
}
Object* result;
if (!forwarded->ToObject(&result)) {
- result = Heap::new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
+ result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
}
return result;
}
@@ -1538,48 +1674,53 @@ inline MaybeObject* MCAllocateFromNewSpace(HeapObject* object,
// Allocation functions for the paged spaces call the space's MCAllocateRaw.
MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
+ Heap *heap,
HeapObject* ignore,
int object_size) {
- return Heap::old_pointer_space()->MCAllocateRaw(object_size);
+ return heap->old_pointer_space()->MCAllocateRaw(object_size);
}
MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
+ Heap* heap,
HeapObject* ignore,
int object_size) {
- return Heap::old_data_space()->MCAllocateRaw(object_size);
+ return heap->old_data_space()->MCAllocateRaw(object_size);
}
MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
+ Heap* heap,
HeapObject* ignore,
int object_size) {
- return Heap::code_space()->MCAllocateRaw(object_size);
+ return heap->code_space()->MCAllocateRaw(object_size);
}
MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
+ Heap* heap,
HeapObject* ignore,
int object_size) {
- return Heap::map_space()->MCAllocateRaw(object_size);
+ return heap->map_space()->MCAllocateRaw(object_size);
}
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(HeapObject* ignore,
- int object_size) {
- return Heap::cell_space()->MCAllocateRaw(object_size);
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
+ Heap* heap, HeapObject* ignore, int object_size) {
+ return heap->cell_space()->MCAllocateRaw(object_size);
}
// The forwarding address is encoded at the same offset as the current
// to-space object, but in from space.
-inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
+inline void EncodeForwardingAddressInNewSpace(Heap* heap,
+ HeapObject* old_object,
int object_size,
Object* new_object,
int* ignored) {
int offset =
- Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
- Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
+ heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
+ Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
HeapObject::cast(new_object)->address();
}
@@ -1587,7 +1728,8 @@ inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
// The forwarding address is encoded in the map pointer of the object as an
// offset (in terms of live bytes) from the address of the first live object
// in the page.
-inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
+inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
+ HeapObject* old_object,
int object_size,
Object* new_object,
int* offset) {
@@ -1606,7 +1748,7 @@ inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
// Most non-live objects are ignored.
-inline void IgnoreNonLiveObject(HeapObject* object) {}
+inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
// Function template that, given a range of addresses (eg, a semispace or a
@@ -1620,7 +1762,8 @@ inline void IgnoreNonLiveObject(HeapObject* object) {}
template<MarkCompactCollector::AllocationFunction Alloc,
MarkCompactCollector::EncodingFunction Encode,
MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-inline void EncodeForwardingAddressesInRange(Address start,
+inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
+ Address start,
Address end,
int* offset) {
// The start address of the current free region while sweeping the space.
@@ -1640,12 +1783,12 @@ inline void EncodeForwardingAddressesInRange(Address start,
HeapObject* object = HeapObject::FromAddress(current);
if (object->IsMarked()) {
object->ClearMark();
- MarkCompactCollector::tracer()->decrement_marked_count();
+ collector->tracer()->decrement_marked_count();
object_size = object->Size();
- // Allocation cannot fail, because we are compacting the space.
- Object* forwarded = Alloc(object, object_size)->ToObjectUnchecked();
- Encode(object, object_size, forwarded, offset);
+ Object* forwarded =
+ Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
+ Encode(collector->heap(), object, object_size, forwarded, offset);
#ifdef DEBUG
if (FLAG_gc_verbose) {
@@ -1659,7 +1802,7 @@ inline void EncodeForwardingAddressesInRange(Address start,
}
} else { // Non-live object.
object_size = object->Size();
- ProcessNonLive(object);
+ ProcessNonLive(object, collector->heap()->isolate());
if (is_prev_alive) { // Transition from live to non-live.
free_start = current;
is_prev_alive = false;
@@ -1681,8 +1824,9 @@ void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
EncodeForwardingAddressInNewSpace,
IgnoreNonLiveObject>(
- Heap::new_space()->bottom(),
- Heap::new_space()->top(),
+ this,
+ heap()->new_space()->bottom(),
+ heap()->new_space()->top(),
&ignored);
}
@@ -1701,6 +1845,7 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
EncodeForwardingAddressesInRange<Alloc,
EncodeForwardingAddressInPagedSpace,
ProcessNonLive>(
+ this,
p->ObjectAreaStart(),
p->AllocationTop(),
&offset);
@@ -1718,14 +1863,15 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
// to encounter pointers to dead objects during traversal of dirty regions we
// should clear them to avoid encountering them during next dirty regions
// iteration.
-static void MigrateObject(Address dst,
+static void MigrateObject(Heap* heap,
+ Address dst,
Address src,
int size,
bool to_old_space) {
if (to_old_space) {
- Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
+ heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
} else {
- Heap::CopyBlock(dst, src, size);
+ heap->CopyBlock(dst, src, size);
}
Memory::Address_at(src) = dst;
@@ -1735,14 +1881,14 @@ static void MigrateObject(Address dst,
class StaticPointersToNewGenUpdatingVisitor : public
StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
public:
- static inline void VisitPointer(Object** p) {
+ static inline void VisitPointer(Heap* heap, Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
Address old_addr = obj->address();
- if (Heap::new_space()->Contains(obj)) {
- ASSERT(Heap::InFromSpace(*p));
+ if (heap->new_space()->Contains(obj)) {
+ ASSERT(heap->InFromSpace(*p));
*p = HeapObject::FromAddress(Memory::Address_at(old_addr));
}
}
@@ -1753,13 +1899,15 @@ class StaticPointersToNewGenUpdatingVisitor : public
// It does not expect to encounter pointers to dead objects.
class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
public:
+ explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
+
void VisitPointer(Object** p) {
- StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
+ StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
- StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
+ StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
}
}
@@ -1779,6 +1927,9 @@ class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
VisitPointer(&target);
rinfo->set_call_address(Code::cast(target)->instruction_start());
}
+
+ private:
+ Heap* heap_;
};
@@ -1789,7 +1940,7 @@ static void UpdatePointerToNewGen(HeapObject** p) {
if (!(*p)->IsHeapObject()) return;
Address old_addr = (*p)->address();
- ASSERT(Heap::InFromSpace(*p));
+ ASSERT(HEAP->InFromSpace(*p));
Address new_addr = Memory::Address_at(old_addr);
@@ -1803,39 +1954,42 @@ static void UpdatePointerToNewGen(HeapObject** p) {
}
-static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
+static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
+ Object** p) {
Address old_addr = HeapObject::cast(*p)->address();
Address new_addr = Memory::Address_at(old_addr);
return String::cast(HeapObject::FromAddress(new_addr));
}
-static bool TryPromoteObject(HeapObject* object, int object_size) {
+static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
Object* result;
- if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
+ if (object_size > heap->MaxObjectSizeInPagedSpace()) {
MaybeObject* maybe_result =
- Heap::lo_space()->AllocateRawFixedArray(object_size);
+ heap->lo_space()->AllocateRawFixedArray(object_size);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(), object->address(), object_size, true);
- MarkCompactCollector::tracer()->
+ MigrateObject(heap, target->address(), object->address(), object_size,
+ true);
+ heap->mark_compact_collector()->tracer()->
increment_promoted_objects_size(object_size);
return true;
}
} else {
- OldSpace* target_space = Heap::TargetSpace(object);
+ OldSpace* target_space = heap->TargetSpace(object);
- ASSERT(target_space == Heap::old_pointer_space() ||
- target_space == Heap::old_data_space());
+ ASSERT(target_space == heap->old_pointer_space() ||
+ target_space == heap->old_data_space());
MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
+ MigrateObject(heap,
+ target->address(),
object->address(),
object_size,
- target_space == Heap::old_pointer_space());
- MarkCompactCollector::tracer()->
+ target_space == heap->old_pointer_space());
+ heap->mark_compact_collector()->tracer()->
increment_promoted_objects_size(object_size);
return true;
}
@@ -1845,8 +1999,8 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
}
-static void SweepNewSpace(NewSpace* space) {
- Heap::CheckNewSpaceExpansionCriteria();
+static void SweepNewSpace(Heap* heap, NewSpace* space) {
+ heap->CheckNewSpaceExpansionCriteria();
Address from_bottom = space->bottom();
Address from_top = space->top();
@@ -1866,13 +2020,13 @@ static void SweepNewSpace(NewSpace* space) {
if (object->IsMarked()) {
object->ClearMark();
- MarkCompactCollector::tracer()->decrement_marked_count();
+ heap->mark_compact_collector()->tracer()->decrement_marked_count();
size = object->Size();
survivors_size += size;
// Aggressively promote young survivors to the old space.
- if (TryPromoteObject(object, size)) {
+ if (TryPromoteObject(heap, object, size)) {
continue;
}
@@ -1880,7 +2034,8 @@ static void SweepNewSpace(NewSpace* space) {
// Allocation cannot fail at this point: semispaces are of equal size.
Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
- MigrateObject(HeapObject::cast(target)->address(),
+ MigrateObject(heap,
+ HeapObject::cast(target)->address(),
current,
size,
false);
@@ -1894,7 +2049,7 @@ static void SweepNewSpace(NewSpace* space) {
}
// Second pass: find pointers to new space and update them.
- PointersToNewGenUpdatingVisitor updating_visitor;
+ PointersToNewGenUpdatingVisitor updating_visitor(heap);
// Update pointers in to space.
Address current = space->bottom();
@@ -1906,19 +2061,19 @@ static void SweepNewSpace(NewSpace* space) {
}
// Update roots.
- Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
+ heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
LiveObjectList::IterateElements(&updating_visitor);
// Update pointers in old spaces.
- Heap::IterateDirtyRegions(Heap::old_pointer_space(),
+ heap->IterateDirtyRegions(heap->old_pointer_space(),
&Heap::IteratePointersInDirtyRegion,
&UpdatePointerToNewGen,
- Heap::WATERMARK_SHOULD_BE_VALID);
+ heap->WATERMARK_SHOULD_BE_VALID);
- Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
+ heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
// Update pointers from cells.
- HeapObjectIterator cell_iterator(Heap::cell_space());
+ HeapObjectIterator cell_iterator(heap->cell_space());
for (HeapObject* cell = cell_iterator.next();
cell != NULL;
cell = cell_iterator.next()) {
@@ -1931,22 +2086,22 @@ static void SweepNewSpace(NewSpace* space) {
}
// Update pointer from the global contexts list.
- updating_visitor.VisitPointer(Heap::global_contexts_list_address());
+ updating_visitor.VisitPointer(heap->global_contexts_list_address());
// Update pointers from external string table.
- Heap::UpdateNewSpaceReferencesInExternalStringTable(
+ heap->UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
// All pointers were updated. Update auxiliary allocation info.
- Heap::IncrementYoungSurvivorsCounter(survivors_size);
+ heap->IncrementYoungSurvivorsCounter(survivors_size);
space->set_age_mark(space->top());
// Update JSFunction pointers from the runtime profiler.
- RuntimeProfiler::UpdateSamplesAfterScavenge();
+ heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
}
-static void SweepSpace(PagedSpace* space) {
+static void SweepSpace(Heap* heap, PagedSpace* space) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences
@@ -1984,7 +2139,7 @@ static void SweepSpace(PagedSpace* space) {
object = HeapObject::FromAddress(current);
if (object->IsMarked()) {
object->ClearMark();
- MarkCompactCollector::tracer()->decrement_marked_count();
+ heap->mark_compact_collector()->tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
space->DeallocateBlock(free_start,
@@ -1993,7 +2148,8 @@ static void SweepSpace(PagedSpace* space) {
is_previous_alive = true;
}
} else {
- MarkCompactCollector::ReportDeleteIfNeeded(object);
+ heap->mark_compact_collector()->ReportDeleteIfNeeded(
+ object, heap->isolate());
if (is_previous_alive) { // Transition from live to free.
free_start = current;
is_previous_alive = false;
@@ -2093,24 +2249,24 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
// Objects in the active semispace of the young generation may be
// relocated to the inactive semispace (if not promoted). Set the
// relocation info to the beginning of the inactive semispace.
- Heap::new_space()->MCResetRelocationInfo();
+ heap()->new_space()->MCResetRelocationInfo();
// Compute the forwarding pointers in each space.
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
ReportDeleteIfNeeded>(
- Heap::old_pointer_space());
+ heap()->old_pointer_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
IgnoreNonLiveObject>(
- Heap::old_data_space());
+ heap()->old_data_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
ReportDeleteIfNeeded>(
- Heap::code_space());
+ heap()->code_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
IgnoreNonLiveObject>(
- Heap::cell_space());
+ heap()->cell_space());
// Compute new space next to last after the old and code spaces have been
@@ -2122,25 +2278,26 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
// non-live map pointers to get the sizes of non-live objects.
EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
IgnoreNonLiveObject>(
- Heap::map_space());
+ heap()->map_space());
// Write relocation info to the top page, so we can use it later. This is
// done after promoting objects from the new space so we get the correct
// allocation top.
- Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
- Heap::old_data_space()->MCWriteRelocationInfoToPage();
- Heap::code_space()->MCWriteRelocationInfoToPage();
- Heap::map_space()->MCWriteRelocationInfoToPage();
- Heap::cell_space()->MCWriteRelocationInfoToPage();
+ heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
+ heap()->old_data_space()->MCWriteRelocationInfoToPage();
+ heap()->code_space()->MCWriteRelocationInfoToPage();
+ heap()->map_space()->MCWriteRelocationInfoToPage();
+ heap()->cell_space()->MCWriteRelocationInfoToPage();
}
class MapIterator : public HeapObjectIterator {
public:
- MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
+ explicit MapIterator(Heap* heap)
+ : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
- explicit MapIterator(Address start)
- : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
+ MapIterator(Heap* heap, Address start)
+ : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
private:
static int SizeCallback(HeapObject* unused) {
@@ -2152,10 +2309,12 @@ class MapIterator : public HeapObjectIterator {
class MapCompact {
public:
- explicit MapCompact(int live_maps)
- : live_maps_(live_maps),
- to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
- map_to_evacuate_it_(to_evacuate_start_),
+ explicit MapCompact(Heap* heap, int live_maps)
+ : heap_(heap),
+ live_maps_(live_maps),
+ to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
+ vacant_map_it_(heap),
+ map_to_evacuate_it_(heap, to_evacuate_start_),
first_map_to_evacuate_(
reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
}
@@ -2175,37 +2334,44 @@ class MapCompact {
}
void UpdateMapPointersInRoots() {
- Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
- GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
- LiveObjectList::IterateElements(&map_updating_visitor_);
+ MapUpdatingVisitor map_updating_visitor;
+ heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
+ heap()->isolate()->global_handles()->IterateWeakRoots(
+ &map_updating_visitor);
+ LiveObjectList::IterateElements(&map_updating_visitor);
}
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
- ASSERT(space != Heap::map_space());
+ ASSERT(space != heap()->map_space());
PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* p = it.next();
- UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
+ UpdateMapPointersInRange(heap(),
+ p->ObjectAreaStart(),
+ p->AllocationTop());
}
}
void UpdateMapPointersInNewSpace() {
- NewSpace* space = Heap::new_space();
- UpdateMapPointersInRange(space->bottom(), space->top());
+ NewSpace* space = heap()->new_space();
+ UpdateMapPointersInRange(heap(), space->bottom(), space->top());
}
void UpdateMapPointersInLargeObjectSpace() {
- LargeObjectIterator it(Heap::lo_space());
+ LargeObjectIterator it(heap()->lo_space());
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- UpdateMapPointersInObject(obj);
+ UpdateMapPointersInObject(heap(), obj);
}
void Finish() {
- Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
+ heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
}
+ inline Heap* heap() const { return heap_; }
+
private:
+ Heap* heap_;
int live_maps_;
Address to_evacuate_start_;
MapIterator vacant_map_it_;
@@ -2215,6 +2381,8 @@ class MapCompact {
// Helper class for updating map pointers in HeapObjects.
class MapUpdatingVisitor: public ObjectVisitor {
public:
+ MapUpdatingVisitor() {}
+
void VisitPointer(Object** p) {
UpdateMapPointer(p);
}
@@ -2237,8 +2405,6 @@ class MapCompact {
}
};
- static MapUpdatingVisitor map_updating_visitor_;
-
static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
while (true) {
HeapObject* next = it->next();
@@ -2272,9 +2438,8 @@ class MapCompact {
ASSERT(Map::kSize % 4 == 0);
- Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
- map_to_evacuate->address(),
- Map::kSize);
+ map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
+ vacant_map->address(), map_to_evacuate->address(), Map::kSize);
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
@@ -2294,15 +2459,15 @@ class MapCompact {
return new_map;
}
- static int UpdateMapPointersInObject(HeapObject* obj) {
+ static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
ASSERT(!obj->IsMarked());
Map* map = obj->map();
- ASSERT(Heap::map_space()->Contains(map));
+ ASSERT(heap->map_space()->Contains(map));
MapWord map_word = map->map_word();
ASSERT(!map_word.IsMarked());
if (map_word.IsOverflowed()) {
Map* new_map = GetForwardedMap(map_word);
- ASSERT(Heap::map_space()->Contains(new_map));
+ ASSERT(heap->map_space()->Contains(new_map));
obj->set_map(new_map);
#ifdef DEBUG
@@ -2316,16 +2481,17 @@ class MapCompact {
}
int size = obj->SizeFromMap(map);
- obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
+ MapUpdatingVisitor map_updating_visitor;
+ obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
return size;
}
- static void UpdateMapPointersInRange(Address start, Address end) {
+ static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
HeapObject* object;
int size;
for (Address current = start; current < end; current += size) {
object = HeapObject::FromAddress(current);
- size = UpdateMapPointersInObject(object);
+ size = UpdateMapPointersInObject(heap, object);
ASSERT(size > 0);
}
}
@@ -2342,8 +2508,6 @@ class MapCompact {
#endif
};
-MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
-
void MarkCompactCollector::SweepSpaces() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
@@ -2355,26 +2519,26 @@ void MarkCompactCollector::SweepSpaces() {
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- SweepSpace(Heap::old_pointer_space());
- SweepSpace(Heap::old_data_space());
- SweepSpace(Heap::code_space());
- SweepSpace(Heap::cell_space());
+ SweepSpace(heap(), heap()->old_pointer_space());
+ SweepSpace(heap(), heap()->old_data_space());
+ SweepSpace(heap(), heap()->code_space());
+ SweepSpace(heap(), heap()->cell_space());
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
- SweepNewSpace(Heap::new_space());
+ SweepNewSpace(heap(), heap()->new_space());
}
- SweepSpace(Heap::map_space());
+ SweepSpace(heap(), heap()->map_space());
- Heap::IterateDirtyRegions(Heap::map_space(),
- &Heap::IteratePointersInDirtyMapsRegion,
- &UpdatePointerToNewGen,
- Heap::WATERMARK_SHOULD_BE_VALID);
+ heap()->IterateDirtyRegions(heap()->map_space(),
+ &heap()->IteratePointersInDirtyMapsRegion,
+ &UpdatePointerToNewGen,
+ heap()->WATERMARK_SHOULD_BE_VALID);
- intptr_t live_maps_size = Heap::map_space()->Size();
+ intptr_t live_maps_size = heap()->map_space()->Size();
int live_maps = static_cast<int>(live_maps_size / Map::kSize);
ASSERT(live_map_objects_size_ == live_maps_size);
- if (Heap::map_space()->NeedsCompaction(live_maps)) {
- MapCompact map_compact(live_maps);
+ if (heap()->map_space()->NeedsCompaction(live_maps)) {
+ MapCompact map_compact(heap(), live_maps);
map_compact.CompactMaps();
map_compact.UpdateMapPointersInRoots();
@@ -2382,7 +2546,7 @@ void MarkCompactCollector::SweepSpaces() {
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
- if (space == Heap::map_space()) continue;
+ if (space == heap()->map_space()) continue;
map_compact.UpdateMapPointersInPagedSpace(space);
}
map_compact.UpdateMapPointersInNewSpace();
@@ -2401,7 +2565,7 @@ void MarkCompactCollector::SweepSpaces() {
int MarkCompactCollector::IterateLiveObjectsInRange(
Address start,
Address end,
- HeapObjectCallback size_func) {
+ LiveObjectCallback size_func) {
int live_objects_size = 0;
Address current = start;
while (current < end) {
@@ -2411,7 +2575,7 @@ int MarkCompactCollector::IterateLiveObjectsInRange(
} else if (encoded_map == kMultiFreeEncoding) {
current += Memory::int_at(current + kIntSize);
} else {
- int size = size_func(HeapObject::FromAddress(current));
+ int size = (this->*size_func)(HeapObject::FromAddress(current));
current += size;
live_objects_size += size;
}
@@ -2420,15 +2584,15 @@ int MarkCompactCollector::IterateLiveObjectsInRange(
}
-int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
- HeapObjectCallback size_f) {
+int MarkCompactCollector::IterateLiveObjects(
+ NewSpace* space, LiveObjectCallback size_f) {
ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
}
-int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
- HeapObjectCallback size_f) {
+int MarkCompactCollector::IterateLiveObjects(
+ PagedSpace* space, LiveObjectCallback size_f) {
ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
int total = 0;
PageIterator it(space, PageIterator::PAGES_IN_USE);
@@ -2448,6 +2612,8 @@ int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
// Helper class for updating pointers in HeapObjects.
class UpdatingVisitor: public ObjectVisitor {
public:
+ explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
+
void VisitPointer(Object** p) {
UpdatePointer(p);
}
@@ -2476,6 +2642,8 @@ class UpdatingVisitor: public ObjectVisitor {
reinterpret_cast<Code*>(target)->instruction_start());
}
+ inline Heap* heap() const { return heap_; }
+
private:
void UpdatePointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
@@ -2483,27 +2651,27 @@ class UpdatingVisitor: public ObjectVisitor {
HeapObject* obj = HeapObject::cast(*p);
Address old_addr = obj->address();
Address new_addr;
- ASSERT(!Heap::InFromSpace(obj));
+ ASSERT(!heap()->InFromSpace(obj));
- if (Heap::new_space()->Contains(obj)) {
+ if (heap()->new_space()->Contains(obj)) {
Address forwarding_pointer_addr =
- Heap::new_space()->FromSpaceLow() +
- Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
+ heap()->new_space()->FromSpaceLow() +
+ heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
new_addr = Memory::Address_at(forwarding_pointer_addr);
#ifdef DEBUG
- ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
- Heap::old_data_space()->Contains(new_addr) ||
- Heap::new_space()->FromSpaceContains(new_addr) ||
- Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
-
- if (Heap::new_space()->FromSpaceContains(new_addr)) {
- ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
- Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
+ ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
+ heap()->old_data_space()->Contains(new_addr) ||
+ heap()->new_space()->FromSpaceContains(new_addr) ||
+ heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
+
+ if (heap()->new_space()->FromSpaceContains(new_addr)) {
+ ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+ heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
}
#endif
- } else if (Heap::lo_space()->Contains(obj)) {
+ } else if (heap()->lo_space()->Contains(obj)) {
// Don't move objects in the large object space.
return;
@@ -2532,6 +2700,8 @@ class UpdatingVisitor: public ObjectVisitor {
}
#endif
}
+
+ Heap* heap_;
};
@@ -2540,31 +2710,34 @@ void MarkCompactCollector::UpdatePointers() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
state_ = UPDATE_POINTERS;
#endif
- UpdatingVisitor updating_visitor;
- RuntimeProfiler::UpdateSamplesAfterCompact(&updating_visitor);
- Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
- GlobalHandles::IterateWeakRoots(&updating_visitor);
+ UpdatingVisitor updating_visitor(heap());
+ heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
+ &updating_visitor);
+ heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
+ heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
// Update the pointer to the head of the weak list of global contexts.
- updating_visitor.VisitPointer(&Heap::global_contexts_list_);
+ updating_visitor.VisitPointer(&heap()->global_contexts_list_);
LiveObjectList::IterateElements(&updating_visitor);
- int live_maps_size = IterateLiveObjects(Heap::map_space(),
- &UpdatePointersInOldObject);
- int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
- &UpdatePointersInOldObject);
- int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
- &UpdatePointersInOldObject);
- int live_codes_size = IterateLiveObjects(Heap::code_space(),
- &UpdatePointersInOldObject);
- int live_cells_size = IterateLiveObjects(Heap::cell_space(),
- &UpdatePointersInOldObject);
- int live_news_size = IterateLiveObjects(Heap::new_space(),
- &UpdatePointersInNewObject);
+ int live_maps_size = IterateLiveObjects(
+ heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_pointer_olds_size = IterateLiveObjects(
+ heap()->old_pointer_space(),
+ &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_data_olds_size = IterateLiveObjects(
+ heap()->old_data_space(),
+ &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_codes_size = IterateLiveObjects(
+ heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_cells_size = IterateLiveObjects(
+ heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_news_size = IterateLiveObjects(
+ heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
// Large objects do not move, the map word can be updated directly.
- LargeObjectIterator it(Heap::lo_space());
+ LargeObjectIterator it(heap()->lo_space());
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
UpdatePointersInNewObject(obj);
}
@@ -2591,8 +2764,8 @@ int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
Address forwarded = GetForwardingAddressInOldSpace(old_map);
- ASSERT(Heap::map_space()->Contains(old_map));
- ASSERT(Heap::map_space()->Contains(forwarded));
+ ASSERT(heap()->map_space()->Contains(old_map));
+ ASSERT(heap()->map_space()->Contains(forwarded));
#ifdef DEBUG
if (FLAG_gc_verbose) {
PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
@@ -2607,7 +2780,7 @@ int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
int obj_size = obj->SizeFromMap(old_map);
// Update pointers in the object body.
- UpdatingVisitor updating_visitor;
+ UpdatingVisitor updating_visitor(heap());
obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
return obj_size;
}
@@ -2616,8 +2789,8 @@ int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
// Decode the map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+ ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
// At this point, the first word of map_addr is also encoded, cannot
// cast it to Map* using Map::cast.
@@ -2638,7 +2811,7 @@ int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
#endif
// Update pointers in the object body.
- UpdatingVisitor updating_visitor;
+ UpdatingVisitor updating_visitor(heap());
obj->IterateBody(type, obj_size, &updating_visitor);
return obj_size;
}
@@ -2694,18 +2867,19 @@ void MarkCompactCollector::RelocateObjects() {
#endif
// Relocates objects, always relocate map objects first. Relocating
// objects in other space relies on map objects to get object size.
- int live_maps_size = IterateLiveObjects(Heap::map_space(),
- &RelocateMapObject);
- int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
- &RelocateOldPointerObject);
- int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
- &RelocateOldDataObject);
- int live_codes_size = IterateLiveObjects(Heap::code_space(),
- &RelocateCodeObject);
- int live_cells_size = IterateLiveObjects(Heap::cell_space(),
- &RelocateCellObject);
- int live_news_size = IterateLiveObjects(Heap::new_space(),
- &RelocateNewObject);
+ int live_maps_size = IterateLiveObjects(
+ heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
+ int live_pointer_olds_size = IterateLiveObjects(
+ heap()->old_pointer_space(),
+ &MarkCompactCollector::RelocateOldPointerObject);
+ int live_data_olds_size = IterateLiveObjects(
+ heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
+ int live_codes_size = IterateLiveObjects(
+ heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
+ int live_cells_size = IterateLiveObjects(
+ heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
+ int live_news_size = IterateLiveObjects(
+ heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
USE(live_maps_size);
USE(live_pointer_olds_size);
@@ -2721,28 +2895,28 @@ void MarkCompactCollector::RelocateObjects() {
ASSERT(live_news_size == live_young_objects_size_);
// Flip from and to spaces
- Heap::new_space()->Flip();
+ heap()->new_space()->Flip();
- Heap::new_space()->MCCommitRelocationInfo();
+ heap()->new_space()->MCCommitRelocationInfo();
// Set age_mark to bottom in to space
- Address mark = Heap::new_space()->bottom();
- Heap::new_space()->set_age_mark(mark);
+ Address mark = heap()->new_space()->bottom();
+ heap()->new_space()->set_age_mark(mark);
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
space->MCCommitRelocationInfo();
- Heap::CheckNewSpaceExpansionCriteria();
- Heap::IncrementYoungSurvivorsCounter(live_news_size);
+ heap()->CheckNewSpaceExpansionCriteria();
+ heap()->IncrementYoungSurvivorsCounter(live_news_size);
}
int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
// Recover map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+ ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
@@ -2755,9 +2929,9 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- Map::kSize);
+ heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ Map::kSize);
}
#ifdef DEBUG
@@ -2801,8 +2975,8 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
PagedSpace* space) {
// Recover map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(map_addr));
+ Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+ ASSERT(heap()->map_space()->Contains(map_addr));
// Get forwarding address before resetting map pointer.
Address new_addr = GetForwardingAddressInOldSpace(obj);
@@ -2814,12 +2988,12 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
if (new_addr != old_addr) {
// Move contents.
- if (space == Heap::old_data_space()) {
- Heap::MoveBlock(new_addr, old_addr, obj_size);
+ if (space == heap()->old_data_space()) {
+ heap()->MoveBlock(new_addr, old_addr, obj_size);
} else {
- Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- obj_size);
+ heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
}
}
@@ -2827,46 +3001,47 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsSharedFunctionInfo()) {
- PROFILE(SFIMoveEvent(old_addr, new_addr));
+ PROFILE(heap()->isolate(),
+ SharedFunctionInfoMoveEvent(old_addr, new_addr));
}
- HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
+ HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
+ return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
}
int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, Heap::old_data_space());
+ return RelocateOldNonCodeObject(obj, heap()->old_data_space());
}
int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
- return RelocateOldNonCodeObject(obj, Heap::cell_space());
+ return RelocateOldNonCodeObject(obj, heap()->cell_space());
}
int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// Recover map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+ ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
// Reset the map pointer.
- int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
+ int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
Address old_addr = obj->address();
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlock(new_addr, old_addr, obj_size);
+ heap()->MoveBlock(new_addr, old_addr, obj_size);
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
@@ -2874,9 +3049,9 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// May also update inline cache target.
Code::cast(copied_to)->Relocate(new_addr - old_addr);
// Notify the logger that compiled code has moved.
- PROFILE(CodeMoveEvent(old_addr, new_addr));
+ PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
}
- HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
+ HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
@@ -2887,28 +3062,28 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
// Get forwarding address
Address old_addr = obj->address();
- int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
+ int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
Address new_addr =
- Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
+ Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
#ifdef DEBUG
- if (Heap::new_space()->FromSpaceContains(new_addr)) {
- ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
- Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
+ if (heap()->new_space()->FromSpaceContains(new_addr)) {
+ ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+ heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
} else {
- ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
- Heap::TargetSpace(obj) == Heap::old_data_space());
+ ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
+ heap()->TargetSpace(obj) == heap()->old_data_space());
}
#endif
// New and old addresses cannot overlap.
- if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
- Heap::CopyBlock(new_addr, old_addr, obj_size);
+ if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
+ heap()->CopyBlock(new_addr, old_addr, obj_size);
} else {
- Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- obj_size);
+ heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
}
#ifdef DEBUG
@@ -2919,15 +3094,29 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsSharedFunctionInfo()) {
- PROFILE(SFIMoveEvent(old_addr, new_addr));
+ PROFILE(heap()->isolate(),
+ SharedFunctionInfoMoveEvent(old_addr, new_addr));
}
- HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
+ HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
-void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
+void MarkCompactCollector::EnableCodeFlushing(bool enable) {
+ if (enable) {
+ if (code_flusher_ != NULL) return;
+ code_flusher_ = new CodeFlusher(heap()->isolate());
+ } else {
+ if (code_flusher_ == NULL) return;
+ delete code_flusher_;
+ code_flusher_ = NULL;
+ }
+}
+
+
+void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
+ Isolate* isolate) {
#ifdef ENABLE_GDB_JIT_INTERFACE
if (obj->IsCode()) {
GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
@@ -2935,7 +3124,7 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
- PROFILE(CodeDeleteEvent(obj->address()));
+ PROFILE(isolate, CodeDeleteEvent(obj->address()));
}
#endif
}
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 1b7e60022..04d0ff69a 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -28,6 +28,8 @@
#ifndef V8_MARK_COMPACT_H_
#define V8_MARK_COMPACT_H_
+#include "spaces.h"
+
namespace v8 {
namespace internal {
@@ -37,23 +39,77 @@ namespace internal {
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Forward declarations.
-class RootMarkingVisitor;
+class CodeFlusher;
+class GCTracer;
class MarkingVisitor;
+class RootMarkingVisitor;
+
+
+// ----------------------------------------------------------------------------
+// Marking stack for tracing live objects.
+
+class MarkingStack {
+ public:
+ MarkingStack() : low_(NULL), top_(NULL), high_(NULL), overflowed_(false) { }
+
+ void Initialize(Address low, Address high) {
+ top_ = low_ = reinterpret_cast<HeapObject**>(low);
+ high_ = reinterpret_cast<HeapObject**>(high);
+ overflowed_ = false;
+ }
+
+ bool is_full() const { return top_ >= high_; }
+
+ bool is_empty() const { return top_ <= low_; }
+
+ bool overflowed() const { return overflowed_; }
+
+ void clear_overflowed() { overflowed_ = false; }
+
+ // Push the (marked) object on the marking stack if there is room,
+ // otherwise mark the object as overflowed and wait for a rescan of the
+ // heap.
+ void Push(HeapObject* object) {
+ CHECK(object->IsHeapObject());
+ if (is_full()) {
+ object->SetOverflow();
+ overflowed_ = true;
+ } else {
+ *(top_++) = object;
+ }
+ }
+
+ HeapObject* Pop() {
+ ASSERT(!is_empty());
+ HeapObject* object = *(--top_);
+ CHECK(object->IsHeapObject());
+ return object;
+ }
+
+ private:
+ HeapObject** low_;
+ HeapObject** top_;
+ HeapObject** high_;
+ bool overflowed_;
+
+ DISALLOW_COPY_AND_ASSIGN(MarkingStack);
+};
// -------------------------------------------------------------------------
// Mark-Compact collector
-//
-// All methods are static.
-class MarkCompactCollector: public AllStatic {
+class OverflowedObjectsScanner;
+
+class MarkCompactCollector {
public:
// Type of functions to compute forwarding addresses of objects in
// compacted spaces. Given an object and its size, return a (non-failure)
// Object* that will be the object after forwarding. There is a separate
// allocation function for each (compactable) space based on the location
// of the object before compaction.
- typedef MaybeObject* (*AllocationFunction)(HeapObject* object,
+ typedef MaybeObject* (*AllocationFunction)(Heap* heap,
+ HeapObject* object,
int object_size);
// Type of functions to encode the forwarding address for an object.
@@ -64,17 +120,21 @@ class MarkCompactCollector: public AllStatic {
// page as input, and is updated to contain the offset to be used for the
// next live object in the same page. For spaces using a different
// encoding (ie, contiguous spaces), the offset parameter is ignored.
- typedef void (*EncodingFunction)(HeapObject* old_object,
+ typedef void (*EncodingFunction)(Heap* heap,
+ HeapObject* old_object,
int object_size,
Object* new_object,
int* offset);
// Type of functions to process non-live objects.
- typedef void (*ProcessNonLiveFunction)(HeapObject* object);
+ typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
+
+ // Pointer to member function, used in IterateLiveObjects.
+ typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
// Set the global force_compaction flag, it must be called before Prepare
// to take effect.
- static void SetForceCompaction(bool value) {
+ void SetForceCompaction(bool value) {
force_compaction_ = value;
}
@@ -83,16 +143,16 @@ class MarkCompactCollector: public AllStatic {
// Prepares for GC by resetting relocation info in old and map spaces and
// choosing spaces to compact.
- static void Prepare(GCTracer* tracer);
+ void Prepare(GCTracer* tracer);
// Performs a global garbage collection.
- static void CollectGarbage();
+ void CollectGarbage();
// True if the last full GC performed heap compaction.
- static bool HasCompacted() { return compacting_collection_; }
+ bool HasCompacted() { return compacting_collection_; }
// True after the Prepare phase if the compaction is taking place.
- static bool IsCompacting() {
+ bool IsCompacting() {
#ifdef DEBUG
// For the purposes of asserts we don't want this to keep returning true
// after the collection is completed.
@@ -104,20 +164,20 @@ class MarkCompactCollector: public AllStatic {
// The count of the number of objects left marked at the end of the last
// completed full GC (expected to be zero).
- static int previous_marked_count() { return previous_marked_count_; }
+ int previous_marked_count() { return previous_marked_count_; }
// During a full GC, there is a stack-allocated GCTracer that is used for
// bookkeeping information. Return a pointer to that tracer.
- static GCTracer* tracer() { return tracer_; }
+ GCTracer* tracer() { return tracer_; }
#ifdef DEBUG
// Checks whether performing mark-compact collection.
- static bool in_use() { return state_ > PREPARE_GC; }
- static bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
+ bool in_use() { return state_ > PREPARE_GC; }
+ bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
#endif
// Determine type of object and emit deletion log event.
- static void ReportDeleteIfNeeded(HeapObject* obj);
+ static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
// Returns size of a possibly marked object.
static int SizeOfMarkedObject(HeapObject* obj);
@@ -127,7 +187,16 @@ class MarkCompactCollector: public AllStatic {
static const uint32_t kSingleFreeEncoding = 0;
static const uint32_t kMultiFreeEncoding = 1;
+ inline Heap* heap() const { return heap_; }
+
+ CodeFlusher* code_flusher() { return code_flusher_; }
+ inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
+ void EnableCodeFlushing(bool enable);
+
private:
+ MarkCompactCollector();
+ ~MarkCompactCollector();
+
#ifdef DEBUG
enum CollectorState {
IDLE,
@@ -140,28 +209,28 @@ class MarkCompactCollector: public AllStatic {
};
// The current stage of the collector.
- static CollectorState state_;
+ CollectorState state_;
#endif
// Global flag that forces a compaction.
- static bool force_compaction_;
+ bool force_compaction_;
// Global flag indicating whether spaces were compacted on the last GC.
- static bool compacting_collection_;
+ bool compacting_collection_;
// Global flag indicating whether spaces will be compacted on the next GC.
- static bool compact_on_next_gc_;
+ bool compact_on_next_gc_;
// The number of objects left marked at the end of the last completed full
// GC (expected to be zero).
- static int previous_marked_count_;
+ int previous_marked_count_;
// A pointer to the current stack-allocated GC tracer object during a full
// collection (NULL before and after).
- static GCTracer* tracer_;
+ GCTracer* tracer_;
// Finishes GC, performs heap verification if enabled.
- static void Finish();
+ void Finish();
// -----------------------------------------------------------------------
// Phase 1: Marking live objects.
@@ -179,85 +248,82 @@ class MarkCompactCollector: public AllStatic {
friend class CodeMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
- static void PrepareForCodeFlushing();
+ void PrepareForCodeFlushing();
// Marking operations for objects reachable from roots.
- static void MarkLiveObjects();
+ void MarkLiveObjects();
- static void MarkUnmarkedObject(HeapObject* obj);
+ void MarkUnmarkedObject(HeapObject* obj);
- static inline void MarkObject(HeapObject* obj) {
+ inline void MarkObject(HeapObject* obj) {
if (!obj->IsMarked()) MarkUnmarkedObject(obj);
}
- static inline void SetMark(HeapObject* obj) {
- tracer_->increment_marked_count();
-#ifdef DEBUG
- UpdateLiveObjectCount(obj);
-#endif
- obj->SetMark();
- }
+ inline void SetMark(HeapObject* obj);
// Creates back pointers for all map transitions, stores them in
// the prototype field. The original prototype pointers are restored
// in ClearNonLiveTransitions(). All JSObject maps
// connected by map transitions have the same prototype object, which
// is why we can use this field temporarily for back pointers.
- static void CreateBackPointers();
+ void CreateBackPointers();
// Mark a Map and its DescriptorArray together, skipping transitions.
- static void MarkMapContents(Map* map);
- static void MarkDescriptorArray(DescriptorArray* descriptors);
+ void MarkMapContents(Map* map);
+ void MarkDescriptorArray(DescriptorArray* descriptors);
// Mark the heap roots and all objects reachable from them.
- static void MarkRoots(RootMarkingVisitor* visitor);
+ void MarkRoots(RootMarkingVisitor* visitor);
// Mark the symbol table specially. References to symbols from the
// symbol table are weak.
- static void MarkSymbolTable();
+ void MarkSymbolTable();
// Mark objects in object groups that have at least one object in the
// group marked.
- static void MarkObjectGroups();
+ void MarkObjectGroups();
+
+ // Mark objects in implicit references groups if their parent object
+ // is marked.
+ void MarkImplicitRefGroups();
- // Mark all objects in an object group with at least one marked
- // object, then all objects reachable from marked objects in object
- // groups, and repeat.
- static void ProcessObjectGroups();
+ // Mark all objects which are reachable due to host application
+ // logic like object groups or implicit references' groups.
+ void ProcessExternalMarking();
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
- static void ProcessMarkingStack();
+ void ProcessMarkingStack();
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
// overflow flag will be set.
- static void EmptyMarkingStack();
+ void EmptyMarkingStack();
// Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow
// flag on the marking stack.
- static void RefillMarkingStack();
+ void RefillMarkingStack();
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
#ifdef DEBUG
- static void UpdateLiveObjectCount(HeapObject* obj);
+ void UpdateLiveObjectCount(HeapObject* obj);
#endif
// We sweep the large object space in the same way whether we are
// compacting or not, because the large object space is never compacted.
- static void SweepLargeObjectSpace();
+ void SweepLargeObjectSpace();
// Test whether a (possibly marked) object is a Map.
static inline bool SafeIsMap(HeapObject* object);
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
- static void ClearNonLiveTransitions();
+ void ClearNonLiveTransitions();
// -----------------------------------------------------------------------
// Phase 2: Sweeping to clear mark bits and free non-live objects for
@@ -302,32 +368,32 @@ class MarkCompactCollector: public AllStatic {
// Encodes forwarding addresses of objects in compactable parts of the
// heap.
- static void EncodeForwardingAddresses();
+ void EncodeForwardingAddresses();
// Encodes the forwarding addresses of objects in new space.
- static void EncodeForwardingAddressesInNewSpace();
+ void EncodeForwardingAddressesInNewSpace();
// Function template to encode the forwarding addresses of objects in
// paged spaces, parameterized by allocation and non-live processing
// functions.
template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
- static void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
+ void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
// Iterates live objects in a space, passes live objects
// to a callback function which returns the heap size of the object.
// Returns the number of live objects iterated.
- static int IterateLiveObjects(NewSpace* space, HeapObjectCallback size_f);
- static int IterateLiveObjects(PagedSpace* space, HeapObjectCallback size_f);
+ int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f);
+ int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f);
// Iterates the live objects between a range of addresses, returning the
// number of live objects.
- static int IterateLiveObjectsInRange(Address start, Address end,
- HeapObjectCallback size_func);
+ int IterateLiveObjectsInRange(Address start, Address end,
+ LiveObjectCallback size_func);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
- static void SweepSpaces();
+ void SweepSpaces();
// -----------------------------------------------------------------------
// Phase 3: Updating pointers in live objects.
@@ -341,15 +407,15 @@ class MarkCompactCollector: public AllStatic {
friend class UpdatingVisitor; // helper for updating visited objects
// Updates pointers in all spaces.
- static void UpdatePointers();
+ void UpdatePointers();
// Updates pointers in an object in new space.
// Returns the heap size of the object.
- static int UpdatePointersInNewObject(HeapObject* obj);
+ int UpdatePointersInNewObject(HeapObject* obj);
// Updates pointers in an object in old spaces.
// Returns the heap size of the object.
- static int UpdatePointersInOldObject(HeapObject* obj);
+ int UpdatePointersInOldObject(HeapObject* obj);
// Calculates the forwarding address of an object in an old space.
static Address GetForwardingAddressInOldSpace(HeapObject* obj);
@@ -363,31 +429,31 @@ class MarkCompactCollector: public AllStatic {
// After: Objects have been moved to their new addresses.
// Relocates objects in all spaces.
- static void RelocateObjects();
+ void RelocateObjects();
// Converts a code object's inline target to addresses, convention from
// address to target happens in the marking phase.
- static int ConvertCodeICTargetToAddress(HeapObject* obj);
+ int ConvertCodeICTargetToAddress(HeapObject* obj);
// Relocate a map object.
- static int RelocateMapObject(HeapObject* obj);
+ int RelocateMapObject(HeapObject* obj);
// Relocates an old object.
- static int RelocateOldPointerObject(HeapObject* obj);
- static int RelocateOldDataObject(HeapObject* obj);
+ int RelocateOldPointerObject(HeapObject* obj);
+ int RelocateOldDataObject(HeapObject* obj);
// Relocate a property cell object.
- static int RelocateCellObject(HeapObject* obj);
+ int RelocateCellObject(HeapObject* obj);
// Helper function.
- static inline int RelocateOldNonCodeObject(HeapObject* obj,
- PagedSpace* space);
+ inline int RelocateOldNonCodeObject(HeapObject* obj,
+ PagedSpace* space);
// Relocates an object in the code space.
- static int RelocateCodeObject(HeapObject* obj);
+ int RelocateCodeObject(HeapObject* obj);
// Copy a new object.
- static int RelocateNewObject(HeapObject* obj);
+ int RelocateNewObject(HeapObject* obj);
#ifdef DEBUG
// -----------------------------------------------------------------------
@@ -396,28 +462,28 @@ class MarkCompactCollector: public AllStatic {
// mark-sweep collection.
// Size of live objects in Heap::to_space_.
- static int live_young_objects_size_;
+ int live_young_objects_size_;
// Size of live objects in Heap::old_pointer_space_.
- static int live_old_pointer_objects_size_;
+ int live_old_pointer_objects_size_;
// Size of live objects in Heap::old_data_space_.
- static int live_old_data_objects_size_;
+ int live_old_data_objects_size_;
// Size of live objects in Heap::code_space_.
- static int live_code_objects_size_;
+ int live_code_objects_size_;
// Size of live objects in Heap::map_space_.
- static int live_map_objects_size_;
+ int live_map_objects_size_;
// Size of live objects in Heap::cell_space_.
- static int live_cell_objects_size_;
+ int live_cell_objects_size_;
// Size of live objects in Heap::lo_space_.
- static int live_lo_objects_size_;
+ int live_lo_objects_size_;
// Number of live bytes in this collection.
- static int live_bytes_;
+ int live_bytes_;
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
@@ -425,6 +491,13 @@ class MarkCompactCollector: public AllStatic {
friend class UnmarkObjectVisitor;
static void UnmarkObject(HeapObject* obj);
#endif
+
+ Heap* heap_;
+ MarkingStack marking_stack_;
+ CodeFlusher* code_flusher_;
+
+ friend class Heap;
+ friend class OverflowedObjectsScanner;
};
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 990000a32..4cbf0af74 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -32,7 +32,6 @@
#include "execution.h"
#include "messages.h"
#include "spaces-inl.h"
-#include "top.h"
namespace v8 {
namespace internal {
@@ -57,29 +56,24 @@ void MessageHandler::DefaultMessageReport(const MessageLocation* loc,
}
-void MessageHandler::ReportMessage(const char* msg) {
- PrintF("%s\n", msg);
-}
-
-
Handle<JSMessageObject> MessageHandler::MakeMessageObject(
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
Handle<String> stack_trace,
Handle<JSArray> stack_frames) {
- Handle<String> type_handle = Factory::LookupAsciiSymbol(type);
+ Handle<String> type_handle = FACTORY->LookupAsciiSymbol(type);
Handle<FixedArray> arguments_elements =
- Factory::NewFixedArray(args.length());
+ FACTORY->NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
arguments_elements->set(i, *args[i]);
}
Handle<JSArray> arguments_handle =
- Factory::NewJSArrayWithElements(arguments_elements);
+ FACTORY->NewJSArrayWithElements(arguments_elements);
int start = 0;
int end = 0;
- Handle<Object> script_handle = Factory::undefined_value();
+ Handle<Object> script_handle = FACTORY->undefined_value();
if (loc) {
start = loc->start_pos();
end = loc->end_pos();
@@ -87,15 +81,15 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
}
Handle<Object> stack_trace_handle = stack_trace.is_null()
- ? Factory::undefined_value()
+ ? FACTORY->undefined_value()
: Handle<Object>::cast(stack_trace);
Handle<Object> stack_frames_handle = stack_frames.is_null()
- ? Factory::undefined_value()
+ ? FACTORY->undefined_value()
: Handle<Object>::cast(stack_frames);
Handle<JSMessageObject> message =
- Factory::NewJSMessageObject(type_handle,
+ FACTORY->NewJSMessageObject(type_handle,
arguments_handle,
start,
end,
@@ -107,42 +101,63 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
}
-void MessageHandler::ReportMessage(MessageLocation* loc,
+void MessageHandler::ReportMessage(Isolate* isolate,
+ MessageLocation* loc,
Handle<Object> message) {
+ // We are calling into embedder's code which can throw exceptions.
+ // Thus we need to save current exception state, reset it to the clean one
+ // and ignore scheduled exceptions callbacks can throw.
+ Isolate::ExceptionScope exception_scope(isolate);
+ isolate->clear_pending_exception();
+ isolate->set_external_caught_exception(false);
+
v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
- v8::NeanderArray global_listeners(Factory::message_listeners());
+ v8::NeanderArray global_listeners(FACTORY->message_listeners());
int global_length = global_listeners.length();
if (global_length == 0) {
DefaultMessageReport(loc, message);
+ if (isolate->has_scheduled_exception()) {
+ isolate->clear_scheduled_exception();
+ }
} else {
for (int i = 0; i < global_length; i++) {
HandleScope scope;
if (global_listeners.get(i)->IsUndefined()) continue;
v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
- Handle<Proxy> callback_obj(Proxy::cast(listener.get(0)));
+ Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
v8::MessageCallback callback =
- FUNCTION_CAST<v8::MessageCallback>(callback_obj->proxy());
+ FUNCTION_CAST<v8::MessageCallback>(callback_obj->address());
Handle<Object> callback_data(listener.get(1));
- callback(api_message_obj, v8::Utils::ToLocal(callback_data));
+ {
+ // Do not allow exceptions to propagate.
+ v8::TryCatch try_catch;
+ callback(api_message_obj, v8::Utils::ToLocal(callback_data));
+ }
+ if (isolate->has_scheduled_exception()) {
+ isolate->clear_scheduled_exception();
+ }
}
}
}
Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
- Handle<String> fmt_str = Factory::LookupAsciiSymbol("FormatMessage");
+ Handle<String> fmt_str = FACTORY->LookupAsciiSymbol("FormatMessage");
Handle<JSFunction> fun =
- Handle<JSFunction>(JSFunction::cast(
- Top::builtins()->GetPropertyNoExceptionThrown(*fmt_str)));
+ Handle<JSFunction>(
+ JSFunction::cast(
+ Isolate::Current()->js_builtins_object()->
+ GetPropertyNoExceptionThrown(*fmt_str)));
Object** argv[1] = { data.location() };
bool caught_exception;
Handle<Object> result =
- Execution::TryCall(fun, Top::builtins(), 1, argv, &caught_exception);
+ Execution::TryCall(fun,
+ Isolate::Current()->js_builtins_object(), 1, argv, &caught_exception);
if (caught_exception || !result->IsString()) {
- return Factory::LookupAsciiSymbol("<error>");
+ return FACTORY->LookupAsciiSymbol("<error>");
}
Handle<String> result_string = Handle<String>::cast(result);
// A string that has been obtained from JS code in this way is
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 48f324477..fc2162ded 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -89,9 +89,6 @@ class MessageLocation {
// of message listeners registered in an environment
class MessageHandler {
public:
- // Report a message (w/o JS heap allocation).
- static void ReportMessage(const char* msg);
-
// Returns a message object for the API to use.
static Handle<JSMessageObject> MakeMessageObject(
const char* type,
@@ -101,7 +98,9 @@ class MessageHandler {
Handle<JSArray> stack_frames);
// Report a formatted message (needs JS allocation).
- static void ReportMessage(MessageLocation* loc, Handle<Object> message);
+ static void ReportMessage(Isolate* isolate,
+ MessageLocation* loc,
+ Handle<Object> message);
static void DefaultMessageReport(const MessageLocation* loc,
Handle<Object> message_obj);
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index f39ea9ff6..841c5180a 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -142,11 +142,13 @@ function FormatMessage(message) {
kMessages = {
// Error
cyclic_proto: ["Cyclic __proto__ value"],
+ code_gen_from_strings: ["Code generation from strings disallowed for this context"],
// TypeError
unexpected_token: ["Unexpected token ", "%0"],
unexpected_token_number: ["Unexpected number"],
unexpected_token_string: ["Unexpected string"],
unexpected_token_identifier: ["Unexpected identifier"],
+ unexpected_reserved: ["Unexpected reserved word"],
unexpected_strict_reserved: ["Unexpected strict mode reserved word"],
unexpected_eos: ["Unexpected end of input"],
malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"],
@@ -189,7 +191,13 @@ function FormatMessage(message) {
proto_object_or_null: ["Object prototype may only be an Object or null"],
property_desc_object: ["Property description must be an object: ", "%0"],
redefine_disallowed: ["Cannot redefine property: ", "%0"],
- define_disallowed: ["Cannot define property, object is not extensible: ", "%0"],
+ define_disallowed: ["Cannot define property:", "%0", ", object is not extensible."],
+ non_extensible_proto: ["%0", " is not extensible"],
+ handler_non_object: ["Proxy.", "%0", " called with non-object as handler"],
+ handler_trap_missing: ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
+ proxy_prop_not_configurable: ["Trap ", "%1", " of proxy handler ", "%0", " returned non-configurable descriptor for property ", "%2"],
+ proxy_non_object_prop_names: ["Trap ", "%1", " returned non-object ", "%0"],
+ proxy_repeated_prop_name: ["Trap ", "%1", " returned repeated property name ", "%2"],
// RangeError
invalid_array_length: ["Invalid array length"],
stack_overflow: ["Maximum call stack size exceeded"],
@@ -205,6 +213,7 @@ function FormatMessage(message) {
invalid_json: ["String '", "%0", "' is not valid JSON"],
circular_structure: ["Converting circular structure to JSON"],
obj_ctor_property_non_object: ["Object.", "%0", " called on non-object"],
+ called_on_null_or_undefined: ["%0", " called on null or undefined"],
array_indexof_not_defined: ["Array.getIndexOf: Argument undefined"],
object_not_extensible: ["Can't add property ", "%0", ", object is not extensible"],
illegal_access: ["Illegal access"],
@@ -212,7 +221,8 @@ function FormatMessage(message) {
strict_mode_with: ["Strict mode code may not include a with statement"],
strict_catch_variable: ["Catch variable may not be eval or arguments in strict mode"],
too_many_arguments: ["Too many arguments in function call (only 32766 allowed)"],
- too_many_parameters: ["Too many parameters in function definition"],
+ too_many_parameters: ["Too many parameters in function definition (only 32766 allowed)"],
+ too_many_variables: ["Too many variables declared (only 32767 allowed)"],
strict_param_name: ["Parameter name eval or arguments is not allowed in strict mode"],
strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
strict_var_name: ["Variable name may not be eval or arguments in strict mode"],
@@ -231,6 +241,8 @@ function FormatMessage(message) {
strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"],
strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
+ strict_poison_pill: ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
+ strict_caller: ["Illegal access to a strict mode caller function."],
};
}
var message_type = %MessageGetType(message);
@@ -492,10 +504,24 @@ Script.prototype.nameOrSourceURL = function() {
// because this file is being processed by js2c whose handling of spaces
// in regexps is broken. Also, ['"] are excluded from allowed URLs to
// avoid matches against sources that invoke evals with sourceURL.
- var sourceUrlPattern =
- /\/\/@[\040\t]sourceURL=[\040\t]*([^\s'"]*)[\040\t]*$/m;
- var match = sourceUrlPattern.exec(this.source);
- return match ? match[1] : this.name;
+ // A better solution would be to detect these special comments in
+ // the scanner/parser.
+ var source = ToString(this.source);
+ var sourceUrlPos = %StringIndexOf(source, "sourceURL=", 0);
+ if (sourceUrlPos > 4) {
+ var sourceUrlPattern =
+ /\/\/@[\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm;
+ // Don't reuse lastMatchInfo here, so we create a new array with room
+ // for four captures (array with length one longer than the index
+ // of the fourth capture, where the numbering is zero-based).
+ var matchInfo = new InternalArray(CAPTURE(3) + 1);
+ var match =
+ %_RegExpExec(sourceUrlPattern, source, sourceUrlPos - 4, matchInfo);
+ if (match) {
+ return SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
+ }
+ }
+ return this.name;
}
@@ -659,18 +685,24 @@ function DefineOneShotAccessor(obj, name, fun) {
// can't rely on 'this' being the same as 'obj'.
var hasBeenSet = false;
var value;
- obj.__defineGetter__(name, function () {
+ function getter() {
if (hasBeenSet) {
return value;
}
hasBeenSet = true;
value = fun(obj);
return value;
- });
- obj.__defineSetter__(name, function (v) {
+ }
+ function setter(v) {
hasBeenSet = true;
value = v;
- });
+ }
+ var desc = { get: getter,
+ set: setter,
+ enumerable: false,
+ configurable: true };
+ desc = ToPropertyDescriptor(desc);
+ DefineOwnProperty(obj, name, desc, true);
}
function CallSite(receiver, fun, pos) {
@@ -974,15 +1006,15 @@ function DefineError(f) {
// overwriting allows leaks of error objects between script blocks
// in the same context in a browser setting. Therefore we fix the
// name.
- %SetProperty(f.prototype, "name", name, READ_ONLY | DONT_DELETE);
+ %SetProperty(f.prototype, "name", name, DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetCode(f, function(m) {
if (%_IsConstructCall()) {
// Define all the expected properties directly on the error
// object. This avoids going through getters and setters defined
// on prototype objects.
- %IgnoreAttributesAndSetProperty(this, 'stack', void 0);
- %IgnoreAttributesAndSetProperty(this, 'arguments', void 0);
- %IgnoreAttributesAndSetProperty(this, 'type', void 0);
+ %IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM);
+ %IgnoreAttributesAndSetProperty(this, 'arguments', void 0, DONT_ENUM);
+ %IgnoreAttributesAndSetProperty(this, 'type', void 0, DONT_ENUM);
if (m === kAddMessageAccessorsMarker) {
// DefineOneShotAccessor always inserts a message property and
// ignores setters.
@@ -990,7 +1022,10 @@ function DefineError(f) {
return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
});
} else if (!IS_UNDEFINED(m)) {
- %IgnoreAttributesAndSetProperty(this, 'message', ToString(m));
+ %IgnoreAttributesAndSetProperty(this,
+ 'message',
+ ToString(m),
+ DONT_ENUM);
}
captureStackTrace(this, f);
} else {
@@ -1025,7 +1060,19 @@ DefineError(function URIError() { });
$Error.captureStackTrace = captureStackTrace;
// Setup extra properties of the Error.prototype object.
-$Error.prototype.message = '';
+function setErrorMessage() {
+ var desc = {value: '',
+ enumerable: false,
+ configurable: true,
+ writable: true };
+ DefineOwnProperty($Error.prototype,
+ 'message',
+ ToPropertyDescriptor(desc),
+ true);
+
+}
+
+setErrorMessage();
// Global list of error objects visited during errorToString. This is
// used to detect cycles in error toString formatting.
@@ -1050,6 +1097,10 @@ function errorToStringDetectCycle() {
}
function errorToString() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Error.prototype.toString"]);
+ }
// This helper function is needed because access to properties on
// the builtins object do not work inside of a catch clause.
function isCyclicErrorMarker(o) { return o === cyclic_error_marker; }
@@ -1068,5 +1119,5 @@ function errorToString() {
InstallFunctions($Error.prototype, DONT_ENUM, ['toString', errorToString]);
// Boilerplate for exceptions for stack overflows. Used from
-// Top::StackOverflow().
+// Isolate::StackOverflow().
const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 2e634617c..b5ffe7391 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
@@ -38,22 +38,14 @@
#include "mips/assembler-mips.h"
#include "cpu.h"
+#include "debug.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// Condition
-
-Condition NegateCondition(Condition cc) {
- ASSERT(cc != cc_always);
- return static_cast<Condition>(cc ^ 1);
-}
-
-
-// -----------------------------------------------------------------------------
-// Operand and MemOperand
+// Operand and MemOperand.
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
@@ -61,17 +53,13 @@ Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rmode_ = rmode;
}
+
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
-Operand::Operand(const char* s) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(s);
- rmode_ = RelocInfo::EMBEDDED_STRING;
-}
Operand::Operand(Smi* value) {
rm_ = no_reg;
@@ -79,10 +67,12 @@ Operand::Operand(Smi* value) {
rmode_ = RelocInfo::NONE;
}
+
Operand::Operand(Register rm) {
rm_ = rm;
}
+
bool Operand::is_reg() const {
return rm_.is_valid();
}
@@ -90,11 +80,15 @@ bool Operand::is_reg() const {
// -----------------------------------------------------------------------------
-// RelocInfo
+// RelocInfo.
void RelocInfo::apply(intptr_t delta) {
- // On MIPS we do not use pc relative addressing, so we don't need to patch the
- // code here.
+ if (IsInternalReference(rmode_)) {
+ // Absolute code pointer inside code object moves with the code object.
+ byte* p = reinterpret_cast<byte*>(pc_);
+ int count = Assembler::RelocateInternalReference(p, delta);
+ CPU::FlushICache(p, count * sizeof(uint32_t));
+ }
}
@@ -110,6 +104,11 @@ Address RelocInfo::target_address_address() {
}
+int RelocInfo::target_address_size() {
+ return Assembler::kExternalTargetSize;
+}
+
+
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
@@ -130,8 +129,12 @@ Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
Object** RelocInfo::target_object_address() {
+ // Provide a "natural pointer" to the embedded object,
+ // which can be de-referenced during heap iteration.
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(pc_);
+ reconstructed_obj_ptr_ =
+ reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return &reconstructed_obj_ptr_;
}
@@ -143,23 +146,52 @@ void RelocInfo::set_target_object(Object* target) {
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
+ reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
+ return &reconstructed_adr_ptr_;
+}
+
+
+Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+ Address address = Memory::Address_at(pc_);
+ return Handle<JSGlobalPropertyCell>(
+ reinterpret_cast<JSGlobalPropertyCell**>(address));
+}
+
+
+JSGlobalPropertyCell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+ Address address = Memory::Address_at(pc_);
+ Object* object = HeapObject::FromAddress(
+ address - JSGlobalPropertyCell::kValueOffset);
+ return reinterpret_cast<JSGlobalPropertyCell*>(object);
+}
+
+
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+ Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+ Memory::Address_at(pc_) = address;
}
Address RelocInfo::call_address() {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
- return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // The pc_ offset of 0 assumes mips patched return sequence per
+ // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
+ // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ return Assembler::target_address_at(pc_);
}
void RelocInfo::set_call_address(Address target) {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
- Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // The pc_ offset of 0 assumes mips patched return sequence per
+ // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
+ // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ Assembler::set_target_address_at(pc_, target);
}
@@ -169,9 +201,8 @@ Object* RelocInfo::call_object() {
Object** RelocInfo::call_object_address() {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
@@ -182,18 +213,80 @@ void RelocInfo::set_call_object(Object* target) {
bool RelocInfo::IsPatchedReturnSequence() {
-#ifdef DEBUG
- PrintF("%s - %d - %s : Checking for jal(r)",
- __FILE__, __LINE__, __func__);
+ Instr instr0 = Assembler::instr_at(pc_);
+ Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
+ Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
+ bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
+ (instr1 & kOpcodeMask) == ORI &&
+ (instr2 & kOpcodeMask) == SPECIAL &&
+ (instr2 & kFunctionFieldMask) == JALR);
+ return patched_return;
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ Instr current_instr = Assembler::instr_at(pc_);
+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ Object** p = target_object_address();
+ Object* orig = *p;
+ visitor->VisitPointer(p);
+ if (*p != orig) {
+ set_target_object(*p);
+ }
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+ visitor->VisitGlobalPropertyCell(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ visitor->VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // TODO(isolates): Get a cached isolate below.
+ } else if (((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
+ Isolate::Current()->debug()->has_break_points()) {
+ visitor->VisitDebugTarget(this);
#endif
- return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) &&
- (((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) ||
- ((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR));
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitPointer(heap, target_object_address());
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+ StaticVisitor::VisitGlobalPropertyCell(heap, this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (heap->isolate()->debug()->has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(heap, this);
+#endif
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
}
// -----------------------------------------------------------------------------
-// Assembler
+// Assembler.
void Assembler::CheckBuffer() {
@@ -203,10 +296,20 @@ void Assembler::CheckBuffer() {
}
+void Assembler::CheckTrampolinePoolQuick() {
+ if (pc_offset() >= next_buffer_check_) {
+ CheckTrampolinePool();
+ }
+}
+
+
void Assembler::emit(Instr x) {
- CheckBuffer();
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
+ CheckTrampolinePoolQuick();
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index a3b316b14..4ca6a91aa 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -40,82 +40,42 @@
#include "mips/assembler-mips-inl.h"
#include "serialize.h"
-
namespace v8 {
namespace internal {
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+
+void CpuFeatures::Probe() {
+ ASSERT(!initialized_);
+#ifdef DEBUG
+ initialized_ = true;
+#endif
+ // If the compiler is allowed to use fpu then we can use fpu too in our
+ // code generation.
+#if !defined(__mips__)
+ // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
+ if (FLAG_enable_fpu) {
+ supported_ |= 1u << FPU;
+ }
+#else
+ if (Serializer::enabled()) {
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ return; // No features if we might serialize.
+ }
+ if (OS::MipsCpuHasFeature(FPU)) {
+ // This implementation also sets the FPU flags if
+ // runtime detection of FPU returns true.
+ supported_ |= 1u << FPU;
+ found_by_runtime_probing_ |= 1u << FPU;
+ }
+#endif
+}
-const Register no_reg = { -1 };
-
-const Register zero_reg = { 0 };
-const Register at = { 1 };
-const Register v0 = { 2 };
-const Register v1 = { 3 };
-const Register a0 = { 4 };
-const Register a1 = { 5 };
-const Register a2 = { 6 };
-const Register a3 = { 7 };
-const Register t0 = { 8 };
-const Register t1 = { 9 };
-const Register t2 = { 10 };
-const Register t3 = { 11 };
-const Register t4 = { 12 };
-const Register t5 = { 13 };
-const Register t6 = { 14 };
-const Register t7 = { 15 };
-const Register s0 = { 16 };
-const Register s1 = { 17 };
-const Register s2 = { 18 };
-const Register s3 = { 19 };
-const Register s4 = { 20 };
-const Register s5 = { 21 };
-const Register s6 = { 22 };
-const Register s7 = { 23 };
-const Register t8 = { 24 };
-const Register t9 = { 25 };
-const Register k0 = { 26 };
-const Register k1 = { 27 };
-const Register gp = { 28 };
-const Register sp = { 29 };
-const Register s8_fp = { 30 };
-const Register ra = { 31 };
-
-
-const FPURegister no_creg = { -1 };
-
-const FPURegister f0 = { 0 };
-const FPURegister f1 = { 1 };
-const FPURegister f2 = { 2 };
-const FPURegister f3 = { 3 };
-const FPURegister f4 = { 4 };
-const FPURegister f5 = { 5 };
-const FPURegister f6 = { 6 };
-const FPURegister f7 = { 7 };
-const FPURegister f8 = { 8 };
-const FPURegister f9 = { 9 };
-const FPURegister f10 = { 10 };
-const FPURegister f11 = { 11 };
-const FPURegister f12 = { 12 };
-const FPURegister f13 = { 13 };
-const FPURegister f14 = { 14 };
-const FPURegister f15 = { 15 };
-const FPURegister f16 = { 16 };
-const FPURegister f17 = { 17 };
-const FPURegister f18 = { 18 };
-const FPURegister f19 = { 19 };
-const FPURegister f20 = { 20 };
-const FPURegister f21 = { 21 };
-const FPURegister f22 = { 22 };
-const FPURegister f23 = { 23 };
-const FPURegister f24 = { 24 };
-const FPURegister f25 = { 25 };
-const FPURegister f26 = { 26 };
-const FPURegister f27 = { 27 };
-const FPURegister f28 = { 28 };
-const FPURegister f29 = { 29 };
-const FPURegister f30 = { 30 };
-const FPURegister f31 = { 31 };
int ToNumber(Register reg) {
ASSERT(reg.is_valid());
@@ -156,6 +116,7 @@ int ToNumber(Register reg) {
return kNumbers[reg.code()];
}
+
Register ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters);
const Register kRegisters[] = {
@@ -179,7 +140,16 @@ Register ToRegister(int num) {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo.
-const int RelocInfo::kApplyMask = 0;
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
+
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on MIPS means that it is a lui/ori instruction, and that is
+ // always the case inside code objects.
+ return true;
+}
+
// Patch the code at the current address with the supplied instructions.
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
@@ -210,7 +180,7 @@ Operand::Operand(Handle<Object> handle) {
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!Heap::InNewSpace(obj));
+ ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
@@ -221,26 +191,64 @@ Operand::Operand(Handle<Object> handle) {
}
}
-MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) {
+
+MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
}
// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-static const int kMinimalBufferSize = 4*KB;
-static byte* spare_buffer_ = NULL;
-
-Assembler::Assembler(void* buffer, int buffer_size) {
+// Specific instructions, constants, and masks.
+
+static const int kNegOffset = 0x00008000;
+// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
+// operations as post-increment of sp.
+const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
+ | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
+// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
+const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
+ | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
+// sw(r, MemOperand(sp, 0))
+const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
+ | (0 & kImm16Mask);
+// lw(r, MemOperand(sp, 0))
+const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
+ | (0 & kImm16Mask);
+
+const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
+ | (0 & kImm16Mask);
+
+const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
+ | (0 & kImm16Mask);
+
+const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
+ | (kNegOffset & kImm16Mask);
+
+const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
+ | (kNegOffset & kImm16Mask);
+// A mask for the Rt register for push, pop, lw, sw instructions.
+const Instr kRtMask = kRtFieldMask;
+const Instr kLwSwInstrTypeMask = 0xffe00000;
+const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
+const Instr kLwSwOffsetMask = kImm16Mask;
+
+
+// Spare buffer.
+static const int kMinimalBufferSize = 4 * KB;
+
+
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
+ positions_recorder_(this),
+ emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
- if (spare_buffer_ != NULL) {
- buffer = spare_buffer_;
- spare_buffer_ = NULL;
+ if (isolate()->assembler_spare_buffer() != NULL) {
+ buffer = isolate()->assembler_spare_buffer();
+ isolate()->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
@@ -263,17 +271,29 @@ Assembler::Assembler(void* buffer, int buffer_size) {
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- current_statement_position_ = RelocInfo::kNoPosition;
- current_position_ = RelocInfo::kNoPosition;
- written_statement_position_ = current_statement_position_;
- written_position_ = current_position_;
+
+ last_trampoline_pool_end_ = 0;
+ no_trampoline_pool_before_ = 0;
+ trampoline_pool_blocked_nesting_ = 0;
+ // We leave space (16 * kTrampolineSlotsSize)
+ // for BlockTrampolinePoolScope buffer.
+ next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ internal_trampoline_exception_ = false;
+ last_bound_pos_ = 0;
+
+ trampoline_emitted_ = false;
+ unbound_labels_count_ = 0;
+ block_buffer_growth_ = false;
+
+ ast_id_for_reloc_info_ = kNoASTId;
}
Assembler::~Assembler() {
if (own_buffer_) {
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
@@ -282,7 +302,7 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
- ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
+ ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
// Setup code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -291,6 +311,139 @@ void Assembler::GetCode(CodeDesc* desc) {
}
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::CodeTargetAlign() {
+ // No advantage to aligning branch/call targets to more than
+ // single instruction, that I am aware of.
+ Align(4);
+}
+
+
+Register Assembler::GetRtReg(Instr instr) {
+ Register rt;
+ rt.code_ = (instr & kRtFieldMask) >> kRtShift;
+ return rt;
+}
+
+
+Register Assembler::GetRsReg(Instr instr) {
+ Register rs;
+ rs.code_ = (instr & kRsFieldMask) >> kRsShift;
+ return rs;
+}
+
+
+Register Assembler::GetRdReg(Instr instr) {
+ Register rd;
+ rd.code_ = (instr & kRdFieldMask) >> kRdShift;
+ return rd;
+}
+
+
+uint32_t Assembler::GetRt(Instr instr) {
+ return (instr & kRtFieldMask) >> kRtShift;
+}
+
+
+uint32_t Assembler::GetRtField(Instr instr) {
+ return instr & kRtFieldMask;
+}
+
+
+uint32_t Assembler::GetRs(Instr instr) {
+ return (instr & kRsFieldMask) >> kRsShift;
+}
+
+
+uint32_t Assembler::GetRsField(Instr instr) {
+ return instr & kRsFieldMask;
+}
+
+
+uint32_t Assembler::GetRd(Instr instr) {
+ return (instr & kRdFieldMask) >> kRdShift;
+}
+
+
+uint32_t Assembler::GetRdField(Instr instr) {
+ return instr & kRdFieldMask;
+}
+
+
+uint32_t Assembler::GetSa(Instr instr) {
+ return (instr & kSaFieldMask) >> kSaShift;
+}
+
+
+uint32_t Assembler::GetSaField(Instr instr) {
+ return instr & kSaFieldMask;
+}
+
+
+uint32_t Assembler::GetOpcodeField(Instr instr) {
+ return instr & kOpcodeMask;
+}
+
+
+uint32_t Assembler::GetFunction(Instr instr) {
+ return (instr & kFunctionFieldMask) >> kFunctionShift;
+}
+
+
+uint32_t Assembler::GetFunctionField(Instr instr) {
+ return instr & kFunctionFieldMask;
+}
+
+
+uint32_t Assembler::GetImmediate16(Instr instr) {
+ return instr & kImm16Mask;
+}
+
+
+uint32_t Assembler::GetLabelConst(Instr instr) {
+ return instr & ~kImm16Mask;
+}
+
+
+bool Assembler::IsPop(Instr instr) {
+ return (instr & ~kRtMask) == kPopRegPattern;
+}
+
+
+bool Assembler::IsPush(Instr instr) {
+ return (instr & ~kRtMask) == kPushRegPattern;
+}
+
+
+bool Assembler::IsSwRegFpOffset(Instr instr) {
+ return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsLwRegFpOffset(Instr instr) {
+ return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsSwRegFpNegOffset(Instr instr) {
+ return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
+ kSwRegFpNegOffsetPattern);
+}
+
+
+bool Assembler::IsLwRegFpNegOffset(Instr instr) {
+ return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
+ kLwRegFpNegOffsetPattern);
+}
+
+
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -301,14 +454,21 @@ void Assembler::GetCode(CodeDesc* desc) {
// to be generated; pos() is the position of the last
// instruction using the label.
+// The link chain is terminated by a value in the instruction of -1,
+// which is an otherwise illegal value (branch -1 is inf loop).
+// The instruction 16-bit offset field addresses 32-bit words, but in
+// code is conv to an 18-bit value addressing bytes, hence the -4 value.
-// The link chain is terminated by a negative code position (must be aligned).
const int kEndOfChain = -4;
+// Determines the end of the Jump chain (a subset of the label link chain).
+const int kEndOfJumpChain = 0;
-bool Assembler::is_branch(Instr instr) {
- uint32_t opcode = ((instr & kOpcodeMask));
- uint32_t rt_field = ((instr & kRtFieldMask));
- uint32_t rs_field = ((instr & kRsFieldMask));
+
+bool Assembler::IsBranch(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt_field = GetRtField(instr);
+ uint32_t rs_field = GetRsField(instr);
+ uint32_t label_constant = GetLabelConst(instr);
// Checks if the instruction is a branch.
return opcode == BEQ ||
opcode == BNE ||
@@ -317,10 +477,130 @@ bool Assembler::is_branch(Instr instr) {
opcode == BEQL ||
opcode == BNEL ||
opcode == BLEZL ||
- opcode == BGTZL||
+ opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
- (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
+ label_constant == 0; // Emitted label const in reg-exp engine.
+}
+
+
+bool Assembler::IsBeq(Instr instr) {
+ return GetOpcodeField(instr) == BEQ;
+}
+
+
+bool Assembler::IsBne(Instr instr) {
+ return GetOpcodeField(instr) == BNE;
+}
+
+
+bool Assembler::IsJump(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt_field = GetRtField(instr);
+ uint32_t rd_field = GetRdField(instr);
+ uint32_t function_field = GetFunctionField(instr);
+ // Checks if the instruction is a jump.
+ return opcode == J || opcode == JAL ||
+ (opcode == SPECIAL && rt_field == 0 &&
+ ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
+}
+
+
+bool Assembler::IsJ(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a jump.
+ return opcode == J;
+}
+
+
+bool Assembler::IsLui(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a load upper immediate.
+ return opcode == LUI;
+}
+
+
+bool Assembler::IsOri(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a load upper immediate.
+ return opcode == ORI;
+}
+
+
+bool Assembler::IsNop(Instr instr, unsigned int type) {
+ // See Assembler::nop(type).
+ ASSERT(type < 32);
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt = GetRt(instr);
+ uint32_t rs = GetRs(instr);
+ uint32_t sa = GetSa(instr);
+
+ // nop(type) == sll(zero_reg, zero_reg, type);
+ // Technically all these values will be 0 but
+ // this makes more sense to the reader.
+
+ bool ret = (opcode == SLL &&
+ rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ sa == type);
+
+ return ret;
+}
+
+
+int32_t Assembler::GetBranchOffset(Instr instr) {
+ ASSERT(IsBranch(instr));
+ return ((int16_t)(instr & kImm16Mask)) << 2;
+}
+
+
+bool Assembler::IsLw(Instr instr) {
+ return ((instr & kOpcodeMask) == LW);
+}
+
+
+int16_t Assembler::GetLwOffset(Instr instr) {
+ ASSERT(IsLw(instr));
+ return ((instr & kImm16Mask));
+}
+
+
+Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
+ ASSERT(IsLw(instr));
+
+ // We actually create a new lw instruction based on the original one.
+ Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
+ | (offset & kImm16Mask);
+
+ return temp_instr;
+}
+
+
+bool Assembler::IsSw(Instr instr) {
+ return ((instr & kOpcodeMask) == SW);
+}
+
+
+Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
+ ASSERT(IsSw(instr));
+ return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
+}
+
+
+bool Assembler::IsAddImmediate(Instr instr) {
+ return ((instr & kOpcodeMask) == ADDIU);
+}
+
+
+Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
+ ASSERT(IsAddImmediate(instr));
+ return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
+}
+
+
+bool Assembler::IsAndImmediate(Instr instr) {
+ return GetOpcodeField(instr) == ANDI;
}
@@ -328,16 +608,55 @@ int Assembler::target_at(int32_t pos) {
Instr instr = instr_at(pos);
if ((instr & ~kImm16Mask) == 0) {
// Emitted label constant, not part of a branch.
- return instr - (Code::kHeaderSize - kHeapObjectTag);
+ if (instr == 0) {
+ return kEndOfChain;
+ } else {
+ int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ return (imm18 + pos);
+ }
}
- // Check we have a branch instruction.
- ASSERT(is_branch(instr));
+ // Check we have a branch or jump instruction.
+ ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmectic shifts for signed integers.
- int32_t imm18 = ((instr &
- static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ if (IsBranch(instr)) {
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
- return pos + kBranchPCOffset + imm18;
+ if (imm18 == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + kBranchPCOffset + imm18;
+ }
+ } else if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+
+ if (imm == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ int32_t delta = instr_address - imm;
+ ASSERT(pos > delta);
+ return pos - delta;
+ }
+ } else {
+ int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if (imm28 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ instr_address &= kImm28Mask;
+ int32_t delta = instr_address - imm28;
+ ASSERT(pos > delta);
+ return pos - delta;
+ }
+ }
}
@@ -351,15 +670,41 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
return;
}
- ASSERT(is_branch(instr));
- int32_t imm18 = target_pos - (pos + kBranchPCOffset);
- ASSERT((imm18 & 3) == 0);
+ ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
+ if (IsBranch(instr)) {
+ int32_t imm18 = target_pos - (pos + kBranchPCOffset);
+ ASSERT((imm18 & 3) == 0);
+
+ instr &= ~kImm16Mask;
+ int32_t imm16 = imm18 >> 2;
+ ASSERT(is_int16(imm16));
+
+ instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ } else if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ uint32_t imm = (uint32_t)buffer_ + target_pos;
+ ASSERT((imm & 3) == 0);
+
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+
+ instr_at_put(pos + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm & kHiMask) >> kLuiShift));
+ instr_at_put(pos + 1 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+ } else {
+ uint32_t imm28 = (uint32_t)buffer_ + target_pos;
+ imm28 &= kImm28Mask;
+ ASSERT((imm28 & 3) == 0);
- instr &= ~kImm16Mask;
- int32_t imm16 = imm18 >> 2;
- ASSERT(is_int16(imm16));
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = imm28 >> 2;
+ ASSERT(is_uint26(imm26));
- instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ instr_at_put(pos, instr | (imm26 & kImm26Mask));
+ }
}
@@ -388,11 +733,34 @@ void Assembler::print(Label* L) {
void Assembler::bind_to(Label* L, int pos) {
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
+ int32_t trampoline_pos = kInvalidSlotPos;
+ if (L->is_linked() && !trampoline_emitted_) {
+ unbound_labels_count_--;
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
+
while (L->is_linked()) {
int32_t fixup_pos = L->pos();
- next(L); // call next before overwriting link with target at fixup_pos
- target_at_put(fixup_pos, pos);
+ int32_t dist = pos - fixup_pos;
+ next(L); // Call next before overwriting link with target at fixup_pos.
+ Instr instr = instr_at(fixup_pos);
+ if (IsBranch(instr)) {
+ if (dist > kMaxBranchOffset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK(trampoline_pos != kInvalidSlotPos);
+ }
+ ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
+ target_at_put(fixup_pos, trampoline_pos);
+ fixup_pos = trampoline_pos;
+ dist = pos - fixup_pos;
+ }
+ target_at_put(fixup_pos, pos);
+ } else {
+ ASSERT(IsJ(instr) || IsLui(instr));
+ target_at_put(fixup_pos, pos);
+ }
}
L->bind_to(pos);
@@ -403,29 +771,8 @@ void Assembler::bind_to(Label* L, int pos) {
}
-void Assembler::link_to(Label* L, Label* appendix) {
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // Append appendix to L's list.
- int fixup_pos;
- int link = L->pos();
- do {
- fixup_pos = link;
- link = target_at(fixup_pos);
- } while (link > 0);
- ASSERT(link == kEndOfChain);
- target_at_put(fixup_pos, appendix->pos());
- } else {
- // L is empty, simply use appendix
- *L = *appendix;
- }
- }
- appendix->Unuse(); // appendix should not be used anymore
-}
-
-
void Assembler::bind(Label* L) {
- ASSERT(!L->is_bound()); // label can only be bound once
+ ASSERT(!L->is_bound()); // Label can only be bound once.
bind_to(L, pc_offset());
}
@@ -433,26 +780,27 @@ void Assembler::bind(Label* L) {
void Assembler::next(Label* L) {
ASSERT(L->is_linked());
int link = target_at(L->pos());
- if (link > 0) {
- L->link_to(link);
- } else {
- ASSERT(link == kEndOfChain);
+ ASSERT(link > 0 || link == kEndOfChain);
+ if (link == kEndOfChain) {
L->Unuse();
+ } else if (link > 0) {
+ L->link_to(link);
}
}
+bool Assembler::is_near(Label* L) {
+ if (L->is_bound()) {
+ return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
+ }
+ return false;
+}
// We have to use a temporary register for things that can be relocated even
// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-bool Assembler::MustUseAt(RelocInfo::Mode rmode) {
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- return Serializer::enabled();
- } else if (rmode == RelocInfo::NONE) {
- return false;
- }
- return true;
+bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
+ return rmode != RelocInfo::NONE;
}
@@ -470,14 +818,28 @@ void Assembler::GenInstrRegister(Opcode opcode,
void Assembler::GenInstrRegister(Opcode opcode,
+ Register rs,
+ Register rt,
+ uint16_t msb,
+ uint16_t lsb,
+ SecondaryField func) {
+ ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (msb << kRdShift) | (lsb << kSaShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
SecondaryField fmt,
FPURegister ft,
FPURegister fs,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
- Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift)
- | (fd.code() << 6) | func;
+ ASSERT(CpuFeatures::IsEnabled(FPU));
+ Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
+ | (fd.code() << kFdShift) | func;
emit(instr);
}
@@ -489,8 +851,22 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
+ ASSERT(CpuFeatures::IsEnabled(FPU));
Instr instr = opcode | fmt | (rt.code() << kRtShift)
- | (fs.code() << kFsShift) | (fd.code() << 6) | func;
+ | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ Register rt,
+ FPUControlRegister fs,
+ SecondaryField func) {
+ ASSERT(fs.is_valid() && rt.is_valid());
+ ASSERT(CpuFeatures::IsEnabled(FPU));
+ Instr instr =
+ opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
emit(instr);
}
@@ -523,35 +899,85 @@ void Assembler::GenInstrImmediate(Opcode opcode,
FPURegister ft,
int32_t j) {
ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
+ ASSERT(CpuFeatures::IsEnabled(FPU));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
emit(instr);
}
-// Registers are in the order of the instruction encoding, from left to right.
void Assembler::GenInstrJump(Opcode opcode,
uint32_t address) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
ASSERT(is_uint26(address));
Instr instr = opcode | address;
emit(instr);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry(int32_t pos) {
+ int32_t trampoline_entry = kInvalidSlotPos;
+
+ if (!internal_trampoline_exception_) {
+ if (trampoline_.start() > pos) {
+ trampoline_entry = trampoline_.take_slot();
+ }
+
+ if (kInvalidSlotPos == trampoline_entry) {
+ internal_trampoline_exception_ = true;
+ }
+ }
+ return trampoline_entry;
+}
+
+
+uint32_t Assembler::jump_address(Label* L) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+
+ uint32_t imm = (uint32_t)buffer_ + target_pos;
+ ASSERT((imm & 3) == 0);
+
+ return imm;
}
int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t target_pos;
+
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
- target_pos = L->pos(); // L's link
+ target_pos = L->pos();
+ L->link_to(pc_offset());
} else {
- target_pos = kEndOfChain;
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
}
- L->link_to(pc_offset());
}
int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+ ASSERT((offset & 3) == 0);
+ ASSERT(is_int16(offset >> 2));
+
return offset;
}
@@ -560,14 +986,24 @@ void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
target_pos = L->pos();
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
} else {
if (L->is_linked()) {
- target_pos = L->pos(); // L's link
+ target_pos = L->pos(); // L's link.
+ int32_t imm18 = target_pos - at_offset;
+ ASSERT((imm18 & 3) == 0);
+ int32_t imm16 = imm18 >> 2;
+ ASSERT(is_int16(imm16));
+ instr_at_put(at_offset, (imm16 & kImm16Mask));
} else {
target_pos = kEndOfChain;
+ instr_at_put(at_offset, 0);
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
}
L->link_to(at_offset);
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
}
}
@@ -580,47 +1016,66 @@ void Assembler::b(int16_t offset) {
void Assembler::bal(int16_t offset) {
+ positions_recorder()->WriteRecordedPositions();
bgezal(zero_reg, offset);
}
void Assembler::beq(Register rs, Register rt, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BEQ, rs, rt, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bgez(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZ, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bgezal(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bgtz(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BGTZ, rs, zero_reg, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::blez(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BLEZ, rs, zero_reg, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bltz(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZ, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bltzal(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bne(Register rs, Register rt, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BNE, rs, rt, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -631,18 +1086,27 @@ void Assembler::j(int32_t target) {
void Assembler::jr(Register rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (rs.is(ra)) {
+ positions_recorder()->WriteRecordedPositions();
+ }
GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::jal(int32_t target) {
+ positions_recorder()->WriteRecordedPositions();
ASSERT(is_uint28(target) && ((target & 3) == 0));
GenInstrJump(JAL, target >> 2);
}
void Assembler::jalr(Register rs, Register rd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -650,31 +1114,16 @@ void Assembler::jalr(Register rs, Register rd) {
// Arithmetic.
-void Assembler::add(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD);
-}
-
-
void Assembler::addu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
}
-void Assembler::addi(Register rd, Register rs, int32_t j) {
- GenInstrImmediate(ADDI, rs, rd, j);
-}
-
-
void Assembler::addiu(Register rd, Register rs, int32_t j) {
GenInstrImmediate(ADDIU, rs, rd, j);
}
-void Assembler::sub(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB);
-}
-
-
void Assembler::subu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
}
@@ -743,7 +1192,15 @@ void Assembler::nor(Register rd, Register rs, Register rt) {
// Shifts.
-void Assembler::sll(Register rd, Register rt, uint16_t sa) {
+void Assembler::sll(Register rd,
+ Register rt,
+ uint16_t sa,
+ bool coming_from_nop) {
+ // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
+ // generated using the sll instruction. They must be generated using
+ // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
+ // instructions.
+ ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
}
@@ -773,30 +1230,134 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
}
+void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
+ // Should be called via MacroAssembler::Ror.
+ ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
+ ASSERT(mips32r2);
+ Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
+ emit(instr);
+}
+
+
+void Assembler::rotrv(Register rd, Register rt, Register rs) {
+ // Should be called via MacroAssembler::Ror.
+ ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+ ASSERT(mips32r2);
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
+ emit(instr);
+}
+
+
//------------Memory-instructions-------------
+// Helper for base-reg + offset, when offset is larger than int16.
+void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
+ ASSERT(!src.rm().is(at));
+ lui(at, src.offset_ >> kLuiShift);
+ ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
+ addu(at, at, src.rm()); // Add base register.
+}
+
+
void Assembler::lb(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
+ }
}
void Assembler::lbu(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lh(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lhu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
+ }
}
void Assembler::lw(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lwl(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lwr(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
}
void Assembler::sb(Register rd, const MemOperand& rs) {
- GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::sh(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
+ }
}
void Assembler::sw(Register rd, const MemOperand& rs) {
- GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::swl(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::swr(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
}
@@ -808,13 +1369,37 @@ void Assembler::lui(Register rd, int32_t j) {
//-------------Misc-instructions--------------
// Break / Trap instructions.
-void Assembler::break_(uint32_t code) {
+void Assembler::break_(uint32_t code, bool break_as_stop) {
ASSERT((code & ~0xfffff) == 0);
+ // We need to invalidate breaks that could be stops as well because the
+ // simulator expects a char pointer after the stop instruction.
+ // See constants-mips.h for explanation.
+ ASSERT((break_as_stop &&
+ code <= kMaxStopCode &&
+ code > kMaxWatchpointCode) ||
+ (!break_as_stop &&
+ (code > kMaxStopCode ||
+ code <= kMaxWatchpointCode)));
Instr break_instr = SPECIAL | BREAK | (code << 6);
emit(break_instr);
}
+void Assembler::stop(const char* msg, uint32_t code) {
+ ASSERT(code > kMaxWatchpointCode);
+ ASSERT(code <= kMaxStopCode);
+#if defined(V8_HOST_ARCH_MIPS)
+ break_(0x54321);
+#else // V8_HOST_ARCH_MIPS
+ BlockTrampolinePoolFor(2);
+ // The Simulator will handle the stop instruction and get the message address.
+ // On MIPS stop() is just a special kind of break_().
+ break_(code, true);
+ emit(reinterpret_cast<Instr>(msg));
+#endif
+}
+
+
void Assembler::tge(Register rs, Register rt, uint16_t code) {
ASSERT(is_uint10(code));
Instr instr = SPECIAL | TGE | rs.code() << kRsShift
@@ -841,7 +1426,8 @@ void Assembler::tlt(Register rs, Register rt, uint16_t code) {
void Assembler::tltu(Register rs, Register rt, uint16_t code) {
ASSERT(is_uint10(code));
- Instr instr = SPECIAL | TLTU | rs.code() << kRsShift
+ Instr instr =
+ SPECIAL | TLTU | rs.code() << kRsShift
| rt.code() << kRtShift | code << 6;
emit(instr);
}
@@ -896,6 +1482,54 @@ void Assembler::sltiu(Register rt, Register rs, int32_t j) {
}
+// Conditional move.
+void Assembler::movz(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
+}
+
+
+void Assembler::movn(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
+}
+
+
+void Assembler::movt(Register rd, Register rs, uint16_t cc) {
+ Register rt;
+ rt.code_ = (cc & 0x0007) << 2 | 1;
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
+}
+
+
+void Assembler::movf(Register rd, Register rs, uint16_t cc) {
+ Register rt;
+ rt.code_ = (cc & 0x0007) << 2 | 0;
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
+}
+
+
+// Bit twiddling.
+void Assembler::clz(Register rd, Register rs) {
+ // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+}
+
+
+void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Ins.
+ // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
+ ASSERT(mips32r2);
+ GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
+}
+
+
+void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Ext.
+ // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ ASSERT(mips32r2);
+ GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
+}
+
+
//--------Coprocessor-instructions----------------
// Load, store, move.
@@ -905,7 +1539,12 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
- GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
+ // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
+ // load to two 32-bit loads.
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
}
@@ -915,27 +1554,74 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
- GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
+ // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
+ // store to two 32-bit stores.
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
}
-void Assembler::mtc1(FPURegister fs, Register rt) {
+void Assembler::mtc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MTC1, rt, fs, f0);
}
-void Assembler::mthc1(FPURegister fs, Register rt) {
- GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+void Assembler::mfc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MFC1, rt, fs, f0);
}
-void Assembler::mfc1(FPURegister fs, Register rt) {
- GenInstrRegister(COP1, MFC1, rt, fs, f0);
+void Assembler::ctc1(Register rt, FPUControlRegister fs) {
+ GenInstrRegister(COP1, CTC1, rt, fs);
+}
+
+
+void Assembler::cfc1(Register rt, FPUControlRegister fs) {
+ GenInstrRegister(COP1, CFC1, rt, fs);
+}
+
+
+// Arithmetic.
+
+void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
+}
+
+
+void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
+}
+
+
+void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
+}
+
+
+void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
}
-void Assembler::mfhc1(FPURegister fs, Register rt) {
- GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+void Assembler::abs_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
+}
+
+
+void Assembler::mov_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
+}
+
+
+void Assembler::neg_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
+}
+
+
+void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
}
@@ -951,22 +1637,107 @@ void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
}
+void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
+}
+
+
+void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
+}
+
+
+void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
+}
+
+
+void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
+}
+
+
+void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
+}
+
+
+void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
+}
+
+
+void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
+}
+
+
+void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
+}
+
+
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
+void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
+ GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
+}
+
+
+void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
+ GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
+}
+
+
+void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
+}
+
+
+void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
+}
+
+
+void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
+}
+
+
+void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
+}
+
+
+void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
+}
+
+
+void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
+}
+
+
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
}
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -982,6 +1753,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
@@ -993,7 +1765,8 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
// Conditions.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
- FPURegister ft, FPURegister fs, uint16_t cc) {
+ FPURegister fs, FPURegister ft, uint16_t cc) {
+ ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
@@ -1002,7 +1775,18 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt,
}
+void Assembler::fcmp(FPURegister src1, const double src2,
+ FPUCondition cond) {
+ ASSERT(CpuFeatures::IsEnabled(FPU));
+ ASSERT(src2 == 0.0);
+ mtc1(zero_reg, f14);
+ cvt_d_w(f14, f14);
+ c(cond, D, src1, f14, 0);
+}
+
+
void Assembler::bc1f(int16_t offset, uint16_t cc) {
+ ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
@@ -1010,6 +1794,7 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) {
void Assembler::bc1t(int16_t offset, uint16_t cc) {
+ ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
@@ -1018,58 +1803,66 @@ void Assembler::bc1t(int16_t offset, uint16_t cc) {
// Debugging.
void Assembler::RecordJSReturn() {
- WriteRecordedPositions();
+ positions_recorder()->WriteRecordedPositions();
CheckBuffer();
RecordRelocInfo(RelocInfo::JS_RETURN);
}
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
void Assembler::RecordComment(const char* msg) {
- if (FLAG_debug_code) {
+ if (FLAG_code_comments) {
CheckBuffer();
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}
}
-void Assembler::RecordPosition(int pos) {
- if (pos == RelocInfo::kNoPosition) return;
- ASSERT(pos >= 0);
- current_position_ = pos;
-}
-
-
-void Assembler::RecordStatementPosition(int pos) {
- if (pos == RelocInfo::kNoPosition) return;
- ASSERT(pos >= 0);
- current_statement_position_ = pos;
-}
+int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
+ Instr instr = instr_at(pc);
+ ASSERT(IsJ(instr) || IsLui(instr));
+ if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ if (imm == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ imm += pc_delta;
+ ASSERT((imm & 3) == 0);
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
-bool Assembler::WriteRecordedPositions() {
- bool written = false;
+ instr_at_put(pc + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ instr_at_put(pc + 1 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+ return 2; // Number of instructions patched.
+ } else {
+ uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if ((int32_t)imm28 == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ imm28 += pc_delta;
+ imm28 &= kImm28Mask;
+ ASSERT((imm28 & 3) == 0);
- // Write the statement position if it is different from what was written last
- // time.
- if (current_statement_position_ != written_statement_position_) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
- written_statement_position_ = current_statement_position_;
- written = true;
- }
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = imm28 >> 2;
+ ASSERT(is_uint26(imm26));
- // Write the position if it is different from what was written last time and
- // also different from the written statement position.
- if (current_position_ != written_position_ &&
- current_position_ != written_statement_position_) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::POSITION, current_position_);
- written_position_ = current_position_;
- written = true;
+ instr_at_put(pc, instr | (imm26 & kImm26Mask));
+ return 1; // Number of instructions patched.
}
-
- // Return whether something was written.
- return written;
}
@@ -1077,7 +1870,7 @@ void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
- CodeDesc desc; // the new buffer
+ CodeDesc desc; // The new buffer.
if (buffer_size_ < 4*KB) {
desc.buffer_size = 4*KB;
} else if (buffer_size_ < 1*MB) {
@@ -1085,7 +1878,7 @@ void Assembler::GrowBuffer() {
} else {
desc.buffer_size = buffer_size_ + 1*MB;
}
- CHECK_GT(desc.buffer_size, 0); // no overflow
+ CHECK_GT(desc.buffer_size, 0); // No overflow.
// Setup new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
@@ -1108,20 +1901,39 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
-
- // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
- // shift by pc_delta. But on MIPS the target address it directly loaded, so
- // we do not need to relocate here.
+ // Relocate runtime entries.
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ RelocInfo::Mode rmode = it.rinfo()->rmode();
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
+ RelocateInternalReference(p, pc_delta);
+ }
+ }
ASSERT(!overflow());
}
+void Assembler::db(uint8_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
+}
+
+
+void Assembler::dd(uint32_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
+}
+
+
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+ RelocInfo rinfo(pc_, rmode, data); // We do not try to reuse pool constants.
+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
- ASSERT(RelocInfo::IsJSReturn(rmode)
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+ || RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
@@ -1133,82 +1945,120 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!FLAG_debug_code) {
return;
}
- ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- reloc_info_writer.Write(&rinfo);
+ ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ ASSERT(ast_id_for_reloc_info_ != kNoASTId);
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
+ ast_id_for_reloc_info_ = kNoASTId;
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
+ } else {
+ reloc_info_writer.Write(&rinfo);
+ }
}
}
+void Assembler::BlockTrampolinePoolFor(int instructions) {
+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
+void Assembler::CheckTrampolinePool() {
+ // Some small sequences of instructions must not be broken up by the
+ // insertion of a trampoline pool; such sequences are protected by setting
+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
+ // which are both checked here. Also, recursive calls to CheckTrampolinePool
+ // are blocked by trampoline_pool_blocked_nesting_.
+ if ((trampoline_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_trampoline_pool_before_)) {
+ // Emission is currently blocked; make sure we try again as soon as
+ // possible.
+ if (trampoline_pool_blocked_nesting_ > 0) {
+ next_buffer_check_ = pc_offset() + kInstrSize;
+ } else {
+ next_buffer_check_ = no_trampoline_pool_before_;
+ }
+ return;
+ }
+
+ ASSERT(!trampoline_emitted_);
+ ASSERT(unbound_labels_count_ >= 0);
+ if (unbound_labels_count_ > 0) {
+ // First we emit jump (2 instructions), then we emit trampoline pool.
+ { BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label after_pool;
+ b(&after_pool);
+ nop();
+
+ int pool_start = pc_offset();
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ uint32_t imm32;
+ imm32 = jump_address(&after_pool);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal
+ // references until associated instructions are emitted and available
+ // to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jr(at);
+ nop();
+ }
+ bind(&after_pool);
+ trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+
+ trampoline_emitted_ = true;
+ // As we are only going to emit trampoline once, we need to prevent any
+ // further emission.
+ next_buffer_check_ = kMaxInt;
+ }
+ } else {
+ // Number of branches to unbound label at this point is zero, so we can
+ // move next buffer check to maximum.
+ next_buffer_check_ = pc_offset() +
+ kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ }
+ return;
+}
+
+
Address Assembler::target_address_at(Address pc) {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
- // Check we have 2 instructions generated by li.
- ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
- ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
- (instr2 & kOpcodeMask) == ORI ||
- (instr2 & kOpcodeMask) == LUI)));
- // Interpret these 2 instructions.
- if (instr1 == nopInstr) {
- if ((instr2 & kOpcodeMask) == ADDI) {
- return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
- } else if ((instr2 & kOpcodeMask) == ORI) {
- return reinterpret_cast<Address>(instr2 & kImm16Mask);
- } else if ((instr2 & kOpcodeMask) == LUI) {
- return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
- }
- } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
- // 32 bits value.
+ // Interpret 2 instructions generated by li: lui/ori
+ if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
+ // Assemble the 32 bit value.
return reinterpret_cast<Address>(
- (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
+ (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
}
- // We should never get here.
+ // We should never get here, force a bad address if we do.
UNREACHABLE();
return (Address)0x0;
}
void Assembler::set_target_address_at(Address pc, Address target) {
- // On MIPS we need to patch the code to generate.
+ // On MIPS we patch the address into lui/ori instruction pair.
- // First check we have a li.
+ // First check we have an li (lui/ori pair).
Instr instr2 = instr_at(pc + kInstrSize);
#ifdef DEBUG
Instr instr1 = instr_at(pc);
- // Check we have indeed the result from a li with MustUseAt true.
- CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
- ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
- (instr2 & kOpcodeMask)== ORI ||
- (instr2 & kOpcodeMask)== LUI)));
+ // Check we have indeed the result from a li with MustUseReg true.
+ CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
#endif
-
- uint32_t rt_code = (instr2 & kRtFieldMask);
+ uint32_t rt_code = GetRtField(instr2);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
uint32_t itarget = reinterpret_cast<uint32_t>(target);
- if (is_int16(itarget)) {
- // nop
- // addiu rt zero_reg j
- *p = nopInstr;
- *(p+1) = ADDIU | rt_code | (itarget & LOMask);
- } else if (!(itarget & HIMask)) {
- // nop
- // ori rt zero_reg j
- *p = nopInstr;
- *(p+1) = ORI | rt_code | (itarget & LOMask);
- } else if (!(itarget & LOMask)) {
- // nop
- // lui rt (HIMask & itarget)>>16
- *p = nopInstr;
- *(p+1) = LUI | rt_code | ((itarget & HIMask)>>16);
- } else {
- // lui rt (HIMask & itarget)>>16
- // ori rt rt, (LOMask & itarget)
- *p = LUI | rt_code | ((itarget & HIMask)>>16);
- *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask);
- }
+ // lui rt, high-16.
+ // ori rt rt, low-16.
+ *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
+ *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
CPU::FlushICache(pc, 2 * sizeof(int32_t));
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index a687c2b8f..92c958b96 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
@@ -41,8 +41,6 @@
#include "constants-mips.h"
#include "serialize.h"
-using namespace assembler::mips;
-
namespace v8 {
namespace internal {
@@ -69,10 +67,49 @@ namespace internal {
// -----------------------------------------------------------------------------
-// Implementation of Register and FPURegister
+// Implementation of Register and FPURegister.
// Core register.
struct Register {
+ static const int kNumRegisters = v8::internal::kNumRegisters;
+ static const int kNumAllocatableRegisters = 14; // v0 through t7.
+ static const int kSizeInBytes = 4;
+
+ static int ToAllocationIndex(Register reg) {
+ return reg.code() - 2; // zero_reg and 'at' are skipped.
+ }
+
+ static Register FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ return from_code(index + 2); // zero_reg and 'at' are skipped.
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ const char* const names[] = {
+ "v0",
+ "v1",
+ "a0",
+ "a1",
+ "a2",
+ "a3",
+ "t0",
+ "t1",
+ "t2",
+ "t3",
+ "t4",
+ "t5",
+ "t6",
+ "t7",
+ };
+ return names[index];
+ }
+
+ static Register from_code(int code) {
+ Register r = { code };
+ return r;
+ }
+
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(Register reg) const { return code_ == reg.code_; }
int code() const {
@@ -88,40 +125,41 @@ struct Register {
int code_;
};
-extern const Register no_reg;
-
-extern const Register zero_reg;
-extern const Register at;
-extern const Register v0;
-extern const Register v1;
-extern const Register a0;
-extern const Register a1;
-extern const Register a2;
-extern const Register a3;
-extern const Register t0;
-extern const Register t1;
-extern const Register t2;
-extern const Register t3;
-extern const Register t4;
-extern const Register t5;
-extern const Register t6;
-extern const Register t7;
-extern const Register s0;
-extern const Register s1;
-extern const Register s2;
-extern const Register s3;
-extern const Register s4;
-extern const Register s5;
-extern const Register s6;
-extern const Register s7;
-extern const Register t8;
-extern const Register t9;
-extern const Register k0;
-extern const Register k1;
-extern const Register gp;
-extern const Register sp;
-extern const Register s8_fp;
-extern const Register ra;
+const Register no_reg = { -1 };
+
+const Register zero_reg = { 0 };
+const Register at = { 1 };
+const Register v0 = { 2 };
+const Register v1 = { 3 };
+const Register a0 = { 4 };
+const Register a1 = { 5 };
+const Register a2 = { 6 };
+const Register a3 = { 7 };
+const Register t0 = { 8 };
+const Register t1 = { 9 };
+const Register t2 = { 10 };
+const Register t3 = { 11 };
+const Register t4 = { 12 };
+const Register t5 = { 13 };
+const Register t6 = { 14 };
+const Register t7 = { 15 };
+const Register s0 = { 16 };
+const Register s1 = { 17 };
+const Register s2 = { 18 };
+const Register s3 = { 19 };
+const Register s4 = { 20 };
+const Register s5 = { 21 };
+const Register s6 = { 22 };
+const Register s7 = { 23 };
+const Register t8 = { 24 };
+const Register t9 = { 25 };
+const Register k0 = { 26 };
+const Register k1 = { 27 };
+const Register gp = { 28 };
+const Register sp = { 29 };
+const Register s8_fp = { 30 };
+const Register ra = { 31 };
+
int ToNumber(Register reg);
@@ -129,7 +167,50 @@ Register ToRegister(int num);
// Coprocessor register.
struct FPURegister {
- bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; }
+ static const int kNumRegisters = v8::internal::kNumFPURegisters;
+ // f0 has been excluded from allocation. This is following ia32
+ // where xmm0 is excluded.
+ static const int kNumAllocatableRegisters = 15;
+
+ static int ToAllocationIndex(FPURegister reg) {
+ ASSERT(reg.code() != 0);
+ ASSERT(reg.code() % 2 == 0);
+ return (reg.code() / 2) - 1;
+ }
+
+ static FPURegister FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ return from_code((index + 1) * 2);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ const char* const names[] = {
+ "f2",
+ "f4",
+ "f6",
+ "f8",
+ "f10",
+ "f12",
+ "f14",
+ "f16",
+ "f18",
+ "f20",
+ "f22",
+ "f24",
+ "f26",
+ "f28",
+ "f30"
+ };
+ return names[index];
+ }
+
+ static FPURegister from_code(int code) {
+ FPURegister r = { code };
+ return r;
+ }
+
+ bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
bool is(FPURegister creg) const { return code_ == creg.code_; }
int code() const {
ASSERT(is_valid());
@@ -139,84 +220,74 @@ struct FPURegister {
ASSERT(is_valid());
return 1 << code_;
}
-
+ void setcode(int f) {
+ code_ = f;
+ ASSERT(is_valid());
+ }
// Unfortunately we can't make this private in a struct.
int code_;
};
-extern const FPURegister no_creg;
-
-extern const FPURegister f0;
-extern const FPURegister f1;
-extern const FPURegister f2;
-extern const FPURegister f3;
-extern const FPURegister f4;
-extern const FPURegister f5;
-extern const FPURegister f6;
-extern const FPURegister f7;
-extern const FPURegister f8;
-extern const FPURegister f9;
-extern const FPURegister f10;
-extern const FPURegister f11;
-extern const FPURegister f12; // arg
-extern const FPURegister f13;
-extern const FPURegister f14; // arg
-extern const FPURegister f15;
-extern const FPURegister f16;
-extern const FPURegister f17;
-extern const FPURegister f18;
-extern const FPURegister f19;
-extern const FPURegister f20;
-extern const FPURegister f21;
-extern const FPURegister f22;
-extern const FPURegister f23;
-extern const FPURegister f24;
-extern const FPURegister f25;
-extern const FPURegister f26;
-extern const FPURegister f27;
-extern const FPURegister f28;
-extern const FPURegister f29;
-extern const FPURegister f30;
-extern const FPURegister f31;
-
-
-// Returns the equivalent of !cc.
-// Negation of the default no_condition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc);
-
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case Uless:
- return Ugreater;
- case Ugreater:
- return Uless;
- case Ugreater_equal:
- return Uless_equal;
- case Uless_equal:
- return Ugreater_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- };
-}
-
-
-enum Hint {
- no_hint = 0
+typedef FPURegister DoubleRegister;
+
+const FPURegister no_creg = { -1 };
+
+const FPURegister f0 = { 0 }; // Return value in hard float mode.
+const FPURegister f1 = { 1 };
+const FPURegister f2 = { 2 };
+const FPURegister f3 = { 3 };
+const FPURegister f4 = { 4 };
+const FPURegister f5 = { 5 };
+const FPURegister f6 = { 6 };
+const FPURegister f7 = { 7 };
+const FPURegister f8 = { 8 };
+const FPURegister f9 = { 9 };
+const FPURegister f10 = { 10 };
+const FPURegister f11 = { 11 };
+const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
+const FPURegister f13 = { 13 };
+const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
+const FPURegister f15 = { 15 };
+const FPURegister f16 = { 16 };
+const FPURegister f17 = { 17 };
+const FPURegister f18 = { 18 };
+const FPURegister f19 = { 19 };
+const FPURegister f20 = { 20 };
+const FPURegister f21 = { 21 };
+const FPURegister f22 = { 22 };
+const FPURegister f23 = { 23 };
+const FPURegister f24 = { 24 };
+const FPURegister f25 = { 25 };
+const FPURegister f26 = { 26 };
+const FPURegister f27 = { 27 };
+const FPURegister f28 = { 28 };
+const FPURegister f29 = { 29 };
+const FPURegister f30 = { 30 };
+const FPURegister f31 = { 31 };
+
+// FPU (coprocessor 1) control registers.
+// Currently only FCSR (#31) is implemented.
+struct FPUControlRegister {
+ bool is_valid() const { return code_ == kFCSRRegister; }
+ bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+ void setcode(int f) {
+ code_ = f;
+ ASSERT(is_valid());
+ }
+ // Unfortunately we can't make this private in a struct.
+ int code_;
};
-inline Hint NegateHint(Hint hint) {
- return no_hint;
-}
+const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
+const FPUControlRegister FCSR = { kFCSRRegister };
// -----------------------------------------------------------------------------
@@ -245,7 +316,7 @@ class Operand BASE_EMBEDDED {
private:
Register rm_;
- int32_t imm32_; // Valid if rm_ == no_reg
+ int32_t imm32_; // Valid if rm_ == no_reg.
RelocInfo::Mode rmode_;
friend class Assembler;
@@ -257,17 +328,119 @@ class Operand BASE_EMBEDDED {
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
-
- explicit MemOperand(Register rn, int16_t offset = 0);
+ explicit MemOperand(Register rn, int32_t offset = 0);
+ int32_t offset() const { return offset_; }
private:
- int16_t offset_;
+ int32_t offset_;
friend class Assembler;
};
-class Assembler : public Malloced {
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
+ if (f == FPU && !FLAG_enable_fpu) return false;
+ return (supported_ & (1u << f)) != 0;
+ }
+
+
+#ifdef DEBUG
+ // Check whether a feature is currently enabled.
+ static bool IsEnabled(CpuFeature f) {
+ ASSERT(initialized_);
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL) {
+ // When no isolate is available, work as if we're running in
+ // release mode.
+ return IsSupported(f);
+ }
+ unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
+ return (enabled & (1u << f)) != 0;
+ }
+#endif
+
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+
+ public:
+ explicit Scope(CpuFeature f) {
+ unsigned mask = 1u << f;
+ ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() ||
+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+ isolate_ = Isolate::UncheckedCurrent();
+ old_enabled_ = 0;
+ if (isolate_ != NULL) {
+ old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
+ isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+ }
+ }
+ ~Scope() {
+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+ if (isolate_ != NULL) {
+ isolate_->set_enabled_cpu_features(old_enabled_);
+ }
+ }
+
+ private:
+ Isolate* isolate_;
+ unsigned old_enabled_;
+#else
+
+ public:
+ explicit Scope(CpuFeature f) {}
+#endif
+ };
+
+ class TryForceFeatureScope BASE_EMBEDDED {
+ public:
+ explicit TryForceFeatureScope(CpuFeature f)
+ : old_supported_(CpuFeatures::supported_) {
+ if (CanForce()) {
+ CpuFeatures::supported_ |= (1u << f);
+ }
+ }
+
+ ~TryForceFeatureScope() {
+ if (CanForce()) {
+ CpuFeatures::supported_ = old_supported_;
+ }
+ }
+
+ private:
+ static bool CanForce() {
+ // It's only safe to temporarily force support of CPU features
+ // when there's only a single isolate, which is guaranteed when
+ // the serializer is enabled.
+ return Serializer::enabled();
+ }
+
+ const unsigned old_supported_;
+ };
+
+ private:
+#ifdef DEBUG
+ static bool initialized_;
+#endif
+ static unsigned supported_;
+ static unsigned found_by_runtime_probing_;
+
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+};
+
+
+class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@@ -282,9 +455,12 @@ class Assembler : public Malloced {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- Assembler(void* buffer, int buffer_size);
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -304,10 +480,13 @@ class Assembler : public Malloced {
//
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
- void bind(Label* L); // binds an unbound label L to the current code position
+ void bind(Label* L); // Binds an unbound label L to current code position.
+ // Determines if Label is bound and near enough so that branch instruction
+ // can be used to reach it, instead of jump instruction.
+ bool is_near(Label* L);
- // Returns the branch offset to the given label from the current code position
- // Links the label to the current position if it is still unbound
+ // Returns the branch offset to the given label from the current code
+ // position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true.
int32_t branch_offset(Label* L, bool jump_elimination_allowed);
int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
@@ -315,17 +494,12 @@ class Assembler : public Malloced {
ASSERT((o & 3) == 0); // Assert the offset is aligned.
return o >> 2;
}
+ uint32_t jump_address(Label* L);
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
- // Size of an instruction.
- static const int kInstrSize = sizeof(Instr);
-
- // Difference between address of current opcode and target address offset.
- static const int kBranchPCOffset = 4;
-
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
static void set_target_address_at(Address pc, Address target);
@@ -344,8 +518,25 @@ class Assembler : public Malloced {
set_target_address_at(instruction_payload, target);
}
- static const int kCallTargetSize = 3 * kPointerSize;
- static const int kExternalTargetSize = 3 * kPointerSize;
+ // Size of an instruction.
+ static const int kInstrSize = sizeof(Instr);
+
+ // Difference between address of current opcode and target address offset.
+ static const int kBranchPCOffset = 4;
+
+ // Here we are patching the address in the LUI/ORI instruction pair.
+ // These values are used in the serialization process and must be zero for
+ // MIPS platform, as Code, Embedded Object or External-reference pointers
+ // are split across two consecutive instructions and don't exist separately
+ // in the code, so the serializer should not step forwards in memory after
+ // a target is resolved and written.
+ static const int kCallTargetSize = 0 * kInstrSize;
+ static const int kExternalTargetSize = 0 * kInstrSize;
+
+ // Number of consecutive instructions used to store 32bit constant.
+ // Used in RelocInfo::target_address_address() function to tell serializer
+ // address of the instruction that follows LUI/ORI instruction pair.
+ static const int kInstructionsFor32BitConstant = 2;
// Distance between the instruction referring to the address of the call
// target and the return address.
@@ -353,19 +544,56 @@ class Assembler : public Malloced {
// Distance between start of patched return sequence and the emitted address
// to jump to.
- static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+ static const int kPatchReturnSequenceAddressOffset = 0;
// Distance between start of patched debug break slot and the emitted address
// to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
+ static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
+
+ // Difference between address of current opcode and value read from pc
+ // register.
+ static const int kPcLoadDelta = 4;
+
+ // Number of instructions used for the JS return sequence. The constant is
+ // used by the debugger to patch the JS return sequence.
+ static const int kJSReturnSequenceInstructions = 7;
+ static const int kDebugBreakSlotInstructions = 4;
+ static const int kDebugBreakSlotLength =
+ kDebugBreakSlotInstructions * kInstrSize;
+
// ---------------------------------------------------------------------------
// Code generation.
- void nop() { sll(zero_reg, zero_reg, 0); }
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ NON_MARKING_NOP = 0,
+ DEBUG_BREAK_NOP,
+ // IC markers.
+ PROPERTY_ACCESS_INLINED,
+ PROPERTY_ACCESS_INLINED_CONTEXT,
+ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+ // Helper values.
+ LAST_CODE_MARKER,
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
+ };
+
+ // Type == 0 is the default non-marking type.
+ void nop(unsigned int type = 0) {
+ ASSERT(type < 32);
+ sll(zero_reg, zero_reg, type, true);
+ }
- //------- Branch and jump instructions --------
+ // --------Branch-and-jump-instructions----------
// We don't use likely variant of instructions.
void b(int16_t offset);
void b(Label* L) { b(branch_offset(L, false)>>2); }
@@ -388,7 +616,7 @@ class Assembler : public Malloced {
}
// Never use the int16_t b(l)cond version with a branch offset
- // instead of using the Label* version. See Twiki for infos.
+ // instead of using the Label* version.
// Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
void j(int32_t target);
@@ -400,9 +628,7 @@ class Assembler : public Malloced {
//-------Data-processing-instructions---------
// Arithmetic.
- void add(Register rd, Register rs, Register rt);
void addu(Register rd, Register rs, Register rt);
- void sub(Register rd, Register rs, Register rt);
void subu(Register rd, Register rs, Register rt);
void mult(Register rs, Register rt);
void multu(Register rs, Register rt);
@@ -410,7 +636,6 @@ class Assembler : public Malloced {
void divu(Register rs, Register rt);
void mul(Register rd, Register rs, Register rt);
- void addi(Register rd, Register rs, int32_t j);
void addiu(Register rd, Register rs, int32_t j);
// Logical.
@@ -425,27 +650,40 @@ class Assembler : public Malloced {
void lui(Register rd, int32_t j);
// Shifts.
- void sll(Register rd, Register rt, uint16_t sa);
+ // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
+ // and may cause problems in normal code. coming_from_nop makes sure this
+ // doesn't happen.
+ void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
void sllv(Register rd, Register rt, Register rs);
void srl(Register rd, Register rt, uint16_t sa);
void srlv(Register rd, Register rt, Register rs);
void sra(Register rt, Register rd, uint16_t sa);
void srav(Register rt, Register rd, Register rs);
+ void rotr(Register rd, Register rt, uint16_t sa);
+ void rotrv(Register rd, Register rt, Register rs);
//------------Memory-instructions-------------
void lb(Register rd, const MemOperand& rs);
void lbu(Register rd, const MemOperand& rs);
+ void lh(Register rd, const MemOperand& rs);
+ void lhu(Register rd, const MemOperand& rs);
void lw(Register rd, const MemOperand& rs);
+ void lwl(Register rd, const MemOperand& rs);
+ void lwr(Register rd, const MemOperand& rs);
void sb(Register rd, const MemOperand& rs);
+ void sh(Register rd, const MemOperand& rs);
void sw(Register rd, const MemOperand& rs);
+ void swl(Register rd, const MemOperand& rs);
+ void swr(Register rd, const MemOperand& rs);
//-------------Misc-instructions--------------
// Break / Trap instructions.
- void break_(uint32_t code);
+ void break_(uint32_t code, bool break_as_stop = false);
+ void stop(const char* msg, uint32_t code = kMaxStopCode);
void tge(Register rs, Register rt, uint16_t code);
void tgeu(Register rs, Register rt, uint16_t code);
void tlt(Register rs, Register rt, uint16_t code);
@@ -463,6 +701,16 @@ class Assembler : public Malloced {
void slti(Register rd, Register rs, int32_t j);
void sltiu(Register rd, Register rs, int32_t j);
+ // Conditional move.
+ void movz(Register rd, Register rs, Register rt);
+ void movn(Register rd, Register rs, Register rt);
+ void movt(Register rd, Register rs, uint16_t cc = 0);
+ void movf(Register rd, Register rs, uint16_t cc = 0);
+
+ // Bit twiddling.
+ void clz(Register rd, Register rs);
+ void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
//--------Coprocessor-instructions----------------
@@ -473,19 +721,44 @@ class Assembler : public Malloced {
void swc1(FPURegister fs, const MemOperand& dst);
void sdc1(FPURegister fs, const MemOperand& dst);
- // When paired with MTC1 to write a value to a 64-bit FPR, the MTC1 must be
- // executed first, followed by the MTHC1.
- void mtc1(FPURegister fs, Register rt);
- void mthc1(FPURegister fs, Register rt);
- void mfc1(FPURegister fs, Register rt);
- void mfhc1(FPURegister fs, Register rt);
+ void mtc1(Register rt, FPURegister fs);
+ void mfc1(Register rt, FPURegister fs);
+
+ void ctc1(Register rt, FPUControlRegister fs);
+ void cfc1(Register rt, FPUControlRegister fs);
+
+ // Arithmetic.
+ void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void abs_d(FPURegister fd, FPURegister fs);
+ void mov_d(FPURegister fd, FPURegister fs);
+ void neg_d(FPURegister fd, FPURegister fs);
+ void sqrt_d(FPURegister fd, FPURegister fs);
// Conversion.
void cvt_w_s(FPURegister fd, FPURegister fs);
void cvt_w_d(FPURegister fd, FPURegister fs);
+ void trunc_w_s(FPURegister fd, FPURegister fs);
+ void trunc_w_d(FPURegister fd, FPURegister fs);
+ void round_w_s(FPURegister fd, FPURegister fs);
+ void round_w_d(FPURegister fd, FPURegister fs);
+ void floor_w_s(FPURegister fd, FPURegister fs);
+ void floor_w_d(FPURegister fd, FPURegister fs);
+ void ceil_w_s(FPURegister fd, FPURegister fs);
+ void ceil_w_d(FPURegister fd, FPURegister fs);
void cvt_l_s(FPURegister fd, FPURegister fs);
void cvt_l_d(FPURegister fd, FPURegister fs);
+ void trunc_l_s(FPURegister fd, FPURegister fs);
+ void trunc_l_d(FPURegister fd, FPURegister fs);
+ void round_l_s(FPURegister fd, FPURegister fs);
+ void round_l_d(FPURegister fd, FPURegister fs);
+ void floor_l_s(FPURegister fd, FPURegister fs);
+ void floor_l_d(FPURegister fd, FPURegister fs);
+ void ceil_l_s(FPURegister fd, FPURegister fs);
+ void ceil_l_d(FPURegister fd, FPURegister fs);
void cvt_s_w(FPURegister fd, FPURegister fs);
void cvt_s_l(FPURegister fd, FPURegister fs);
@@ -503,31 +776,78 @@ class Assembler : public Malloced {
void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
void bc1t(int16_t offset, uint16_t cc = 0);
void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
-
+ void fcmp(FPURegister src1, const double src2, FPUCondition cond);
// Check the code size generated from label to here.
int InstructionsGeneratedSince(Label* l) {
return (pc_offset() - l->pos()) / kInstrSize;
}
+ // Class for scoping postponing the trampoline pool generation.
+ class BlockTrampolinePoolScope {
+ public:
+ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockTrampolinePool();
+ }
+ ~BlockTrampolinePoolScope() {
+ assem_->EndBlockTrampolinePool();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
+ };
+
+ // Class for postponing the assembly buffer growth. Typically used for
+ // sequences of instructions that must be emitted as a unit, before
+ // buffer growth (and relocation) can occur.
+ // This blocking scope is not nestable.
+ class BlockGrowBufferScope {
+ public:
+ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockGrowBuffer();
+ }
+ ~BlockGrowBufferScope() {
+ assem_->EndBlockGrowBuffer();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+ };
+
// Debugging.
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
+ // Record the AST id of the CallIC being compiled, so that it can be placed
+ // in the relocation information.
+ void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; }
+
// Record a comment relocation entry that can be used by a disassembler.
- // Use --debug_code to enable.
+ // Use --code-comments to enable.
void RecordComment(const char* msg);
- void RecordPosition(int pos);
- void RecordStatementPosition(int pos);
- bool WriteRecordedPositions();
+ static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
+
+ // Writes a single byte or word of data in the code stream. Used for
+ // inline tables, e.g., jump-tables.
+ void db(uint8_t data);
+ void dd(uint32_t data);
int32_t pc_offset() const { return pc_ - buffer_; }
- int32_t current_position() const { return current_position_; }
- int32_t current_statement_position() const {
- return current_statement_position_;
- }
+
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+
+ // Postpone the generation of the trampoline pool for the specified number of
+ // instructions.
+ void BlockTrampolinePoolFor(int instructions);
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
@@ -537,12 +857,9 @@ class Assembler : public Malloced {
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
- protected:
- int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
// Read/patch instructions.
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
- void instr_at_put(byte* pc, Instr instr) {
+ static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
@@ -551,7 +868,64 @@ class Assembler : public Malloced {
}
// Check if an instruction is a branch of some kind.
- bool is_branch(Instr instr);
+ static bool IsBranch(Instr instr);
+ static bool IsBeq(Instr instr);
+ static bool IsBne(Instr instr);
+
+ static bool IsJump(Instr instr);
+ static bool IsJ(Instr instr);
+ static bool IsLui(Instr instr);
+ static bool IsOri(Instr instr);
+
+ static bool IsNop(Instr instr, unsigned int type);
+ static bool IsPop(Instr instr);
+ static bool IsPush(Instr instr);
+ static bool IsLwRegFpOffset(Instr instr);
+ static bool IsSwRegFpOffset(Instr instr);
+ static bool IsLwRegFpNegOffset(Instr instr);
+ static bool IsSwRegFpNegOffset(Instr instr);
+
+ static Register GetRtReg(Instr instr);
+ static Register GetRsReg(Instr instr);
+ static Register GetRdReg(Instr instr);
+
+ static uint32_t GetRt(Instr instr);
+ static uint32_t GetRtField(Instr instr);
+ static uint32_t GetRs(Instr instr);
+ static uint32_t GetRsField(Instr instr);
+ static uint32_t GetRd(Instr instr);
+ static uint32_t GetRdField(Instr instr);
+ static uint32_t GetSa(Instr instr);
+ static uint32_t GetSaField(Instr instr);
+ static uint32_t GetOpcodeField(Instr instr);
+ static uint32_t GetFunction(Instr instr);
+ static uint32_t GetFunctionField(Instr instr);
+ static uint32_t GetImmediate16(Instr instr);
+ static uint32_t GetLabelConst(Instr instr);
+
+ static int32_t GetBranchOffset(Instr instr);
+ static bool IsLw(Instr instr);
+ static int16_t GetLwOffset(Instr instr);
+ static Instr SetLwOffset(Instr instr, int16_t offset);
+
+ static bool IsSw(Instr instr);
+ static Instr SetSwOffset(Instr instr, int16_t offset);
+ static bool IsAddImmediate(Instr instr);
+ static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
+
+ static bool IsAndImmediate(Instr instr);
+
+ void CheckTrampolinePool();
+
+ protected:
+ // Relocation for a type-recording IC has the AST id added to it. This
+ // member variable is a way to pass the information from the call site to
+ // the relocation info.
+ unsigned ast_id_for_reloc_info_;
+
+ bool emit_debug_code() const { return emit_debug_code_; }
+
+ int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos.
int target_at(int32_t pos);
@@ -560,11 +934,52 @@ class Assembler : public Malloced {
void target_at_put(int32_t pos, int32_t target_pos);
// Say if we need to relocate with this mode.
- bool MustUseAt(RelocInfo::Mode rmode);
+ bool MustUseReg(RelocInfo::Mode rmode);
// Record reloc info for current pc_.
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ // Block the emission of the trampoline pool before pc_offset.
+ void BlockTrampolinePoolBefore(int pc_offset) {
+ if (no_trampoline_pool_before_ < pc_offset)
+ no_trampoline_pool_before_ = pc_offset;
+ }
+
+ void StartBlockTrampolinePool() {
+ trampoline_pool_blocked_nesting_++;
+ }
+
+ void EndBlockTrampolinePool() {
+ trampoline_pool_blocked_nesting_--;
+ }
+
+ bool is_trampoline_pool_blocked() const {
+ return trampoline_pool_blocked_nesting_ > 0;
+ }
+
+ bool has_exception() const {
+ return internal_trampoline_exception_;
+ }
+
+ bool is_trampoline_emitted() const {
+ return trampoline_emitted_;
+ }
+
+ // Temporarily block automatic assembly buffer growth.
+ void StartBlockGrowBuffer() {
+ ASSERT(!block_buffer_growth_);
+ block_buffer_growth_ = true;
+ }
+
+ void EndBlockGrowBuffer() {
+ ASSERT(block_buffer_growth_);
+ block_buffer_growth_ = false;
+ }
+
+ bool is_buffer_growth_blocked() const {
+ return block_buffer_growth_;
+ }
+
private:
// Code buffer:
// The buffer into which code and relocation info are generated.
@@ -585,6 +1000,25 @@ class Assembler : public Malloced {
static const int kGap = 32;
byte* pc_; // The program counter - moves forward.
+
+ // Repeated checking whether the trampoline pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated.
+ static const int kCheckConstIntervalInst = 32;
+ static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+ int next_buffer_check_; // pc offset of next buffer check.
+
+ // Emission of the trampoline pool may be blocked in some code sequences.
+ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_trampoline_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the last emitted pool to guarantee a maximal distance.
+ int last_trampoline_pool_end_; // pc offset of the end of the last pool.
+
+ // Automatic growth of the assembly buffer may be blocked for some sequences.
+ bool block_buffer_growth_; // Block growth when true.
+
// Relocation information generation.
// Each relocation is encoded as a variable size value.
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@@ -593,16 +1027,11 @@ class Assembler : public Malloced {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
- // Source position information.
- int current_position_;
- int current_statement_position_;
- int written_position_;
- int written_statement_position_;
-
// Code emission.
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
+ inline void CheckTrampolinePoolQuick();
// Instruction generation.
// We have 3 different kind of encoding layout on MIPS.
@@ -620,6 +1049,13 @@ class Assembler : public Malloced {
SecondaryField func = NULLSF);
void GenInstrRegister(Opcode opcode,
+ Register rs,
+ Register rt,
+ uint16_t msb,
+ uint16_t lsb,
+ SecondaryField func);
+
+ void GenInstrRegister(Opcode opcode,
SecondaryField fmt,
FPURegister ft,
FPURegister fs,
@@ -633,6 +1069,12 @@ class Assembler : public Malloced {
FPURegister fd,
SecondaryField func = NULLSF);
+ void GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ Register rt,
+ FPUControlRegister fs,
+ SecondaryField func = NULLSF);
+
void GenInstrImmediate(Opcode opcode,
Register rs,
@@ -651,15 +1093,96 @@ class Assembler : public Malloced {
void GenInstrJump(Opcode opcode,
uint32_t address);
+ // Helpers.
+ void LoadRegPlusOffsetToAt(const MemOperand& src);
// Labels.
void print(Label* L);
void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
void next(Label* L);
+ // One trampoline consists of:
+ // - space for trampoline slots,
+ // - space for labels.
+ //
+ // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
+ // Space for trampoline slots preceeds space for labels. Each label is of one
+ // instruction size, so total amount for labels is equal to
+ // label_count * kInstrSize.
+ class Trampoline {
+ public:
+ Trampoline() {
+ start_ = 0;
+ next_slot_ = 0;
+ free_slot_count_ = 0;
+ end_ = 0;
+ }
+ Trampoline(int start, int slot_count) {
+ start_ = start;
+ next_slot_ = start;
+ free_slot_count_ = slot_count;
+ end_ = start + slot_count * kTrampolineSlotsSize;
+ }
+ int start() {
+ return start_;
+ }
+ int end() {
+ return end_;
+ }
+ int take_slot() {
+ int trampoline_slot = kInvalidSlotPos;
+ if (free_slot_count_ <= 0) {
+ // We have run out of space on trampolines.
+ // Make sure we fail in debug mode, so we become aware of each case
+ // when this happens.
+ ASSERT(0);
+ // Internal exception will be caught.
+ } else {
+ trampoline_slot = next_slot_;
+ free_slot_count_--;
+ next_slot_ += kTrampolineSlotsSize;
+ }
+ return trampoline_slot;
+ }
+ private:
+ int start_;
+ int end_;
+ int next_slot_;
+ int free_slot_count_;
+ };
+
+ int32_t get_trampoline_entry(int32_t pos);
+ int unbound_labels_count_;
+ // If trampoline is emitted, generated code is becoming large. As this is
+ // already a slow case which can possibly break our code generation for the
+ // extreme case, we use this information to trigger different mode of
+ // branch instruction generation, where we use jump instructions rather
+ // than regular branch instructions.
+ bool trampoline_emitted_;
+ static const int kTrampolineSlotsSize = 4 * kInstrSize;
+ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+ static const int kInvalidSlotPos = -1;
+
+ Trampoline trampoline_;
+ bool internal_trampoline_exception_;
+
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
+ friend class CodePatcher;
+ friend class BlockTrampolinePoolScope;
+
+ PositionsRecorder positions_recorder_;
+ bool emit_debug_code_;
+ friend class PositionsRecorder;
+ friend class EnsureSpace;
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) {
+ assembler->CheckBuffer();
+ }
};
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 95329389e..4bb1d8cba 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,8 +31,10 @@
#if defined(V8_TARGET_ARCH_MIPS)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
#include "runtime.h"
namespace v8 {
@@ -45,32 +47,971 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments excluding receiver
+ // -- a1 : called function (only guaranteed when
+ // -- extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument
+ // -- sp[4 * agrc] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ push(a1);
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToExternalReference expects a0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ Addu(a0, a0, Operand(num_extra_args + 1));
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the global context.
+
+ __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(result,
+ FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the Array function from the global context.
+ __ lw(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int initial_capacity,
+ Label* gc_required) {
+ ASSERT(initial_capacity > 0);
+ // Load the initial map from the array function.
+ __ lw(scratch1, FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+ __ AllocateInNewSpace(size,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ sw(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ mov(scratch3, zero_reg);
+ __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ Addu(scratch1, result, Operand(JSArray::kSize));
+ __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ And(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array (untagged)
+ // scratch2: start of next object
+ __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ sw(scratch3, MemOperand(scratch1));
+ __ Addu(scratch1, scratch1, kPointerSize);
+ __ li(scratch3, Operand(Smi::FromInt(initial_capacity)));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ sw(scratch3, MemOperand(scratch1));
+ __ Addu(scratch1, scratch1, kPointerSize);
+
+ // Fill the FixedArray with the hole value.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ ASSERT(initial_capacity <= kLoopUnfoldLimit);
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < initial_capacity; i++) {
+ __ sw(scratch3, MemOperand(scratch1));
+ __ Addu(scratch1, scratch1, kPointerSize);
+ }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array_storage,
+ Register elements_array_end,
+ Register scratch1,
+ Register scratch2,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ lw(elements_array_storage,
+ FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ Branch(&not_empty, ne, array_size, Operand(zero_reg));
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize +
+ FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size,
+ result,
+ elements_array_end,
+ scratch1,
+ gc_required,
+ TAG_OBJECT);
+ __ Branch(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested number of elements.
+ __ bind(&not_empty);
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ li(elements_array_end,
+ (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
+ __ sra(scratch1, array_size, kSmiTagSize);
+ __ Addu(elements_array_end, elements_array_end, scratch1);
+ __ AllocateInNewSpace(
+ elements_array_end,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array_storage: initial map
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(elements_array_storage,
+ FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ sw(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // array_size: size of array (smi)
+ __ Addu(elements_array_storage, result, Operand(JSArray::kSize));
+ __ sw(elements_array_storage,
+ FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ And(elements_array_storage,
+ elements_array_storage,
+ Operand(~kHeapObjectTagMask));
+ // Initialize the fixed array and fill it with holes. FixedArray length is
+ // stored as a smi.
+ // result: JSObject
+ // elements_array_storage: elements array (untagged)
+ // array_size: size of array (smi)
+ __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ sw(scratch1, MemOperand(elements_array_storage));
+ __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+ // Length of the FixedArray is the number of pre-allocated elements if
+ // the actual JSArray has length 0 and the size of the JSArray for non-empty
+ // JSArrays. The length of a FixedArray is stored as a smi.
+ ASSERT(kSmiTag == 0);
+ __ li(at, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ movz(array_size, at, array_size);
+
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ sw(array_size, MemOperand(elements_array_storage));
+ __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+ // Calculate elements array and elements array end.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // array_size: smi-tagged size of elements array
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(elements_array_end, array_size, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(elements_array_end, elements_array_storage, elements_array_end);
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // elements_array_end: start of next object
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+ __ Branch(&entry);
+ __ bind(&loop);
+ __ sw(scratch1, MemOperand(elements_array_storage));
+ __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+ __ bind(&entry);
+ __ Branch(&loop, lt, elements_array_storage, Operand(elements_array_end));
+ }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// a0: argc
+// a1: constructor (built-in Array function)
+// ra: return address
+// sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in a1 needs to be preserved for
+// entering the generic code. In both cases argc in a0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+ Label* call_generic_code) {
+ Counters* counters = masm->isolate()->counters();
+ Label argc_one_or_more, argc_two_or_more;
+
+ // Check for array construction with zero arguments or one.
+ __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ JSArray::kPreallocatedArrayElements,
+ call_generic_code);
+ __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
+ // Setup return value, remove receiver from stack and return.
+ __ mov(v0, a2);
+ __ Addu(sp, sp, Operand(kPointerSize));
+ __ Ret();
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ Branch(&argc_two_or_more, ne, a0, Operand(1));
+
+ ASSERT(kSmiTag == 0);
+ __ lw(a2, MemOperand(sp)); // Get the argument from the stack.
+ __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
+ __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
+
+ // Handle construction of an empty array of a certain size. Bail out if size
+ // is too large to actually allocate an elements array.
+ ASSERT(kSmiTag == 0);
+ __ Branch(call_generic_code, Ugreater_equal, a2,
+ Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+
+ // a0: argc
+ // a1: constructor
+ // a2: array_size (smi)
+ // sp[0]: argument
+ AllocateJSArray(masm,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ t2,
+ t3,
+ true,
+ call_generic_code);
+ __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
+
+ // Setup return value, remove receiver and argument from stack and return.
+ __ mov(v0, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ __ sll(a2, a0, kSmiTagSize); // Convert argc to a smi.
+
+ // a0: argc
+ // a1: constructor
+ // a2: array_size (smi)
+ // sp[0]: last argument
+ AllocateJSArray(masm,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ t2,
+ t3,
+ false,
+ call_generic_code);
+ __ IncrementCounter(counters->array_function_native(), 1, a2, t2);
+
+ // Fill arguments as array elements. Copy from the top of the stack (last
+ // element) to the array backing store filling it backwards. Note:
+ // elements_array_end points after the backing store.
+ // a0: argc
+ // a3: JSArray
+ // t0: elements_array storage start (untagged)
+ // t1: elements_array_end (untagged)
+ // sp[0]: last argument
+
+ Label loop, entry;
+ __ Branch(&entry);
+ __ bind(&loop);
+ __ pop(a2);
+ __ Addu(t1, t1, -kPointerSize);
+ __ sw(a2, MemOperand(t1));
+ __ bind(&entry);
+ __ Branch(&loop, lt, t0, Operand(t1));
+
+ // Remove caller arguments and receiver from the stack, setup return value and
+ // return.
+ // a0: argc
+ // a3: JSArray
+ // sp[0]: receiver
+ __ Addu(sp, sp, Operand(kPointerSize));
+ __ mov(v0, a3);
+ __ Ret();
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, a1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array functions should be maps.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function (1)",
+ t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t0);
+ __ Assert(eq, "Unexpected initial map for Array function (2)",
+ t0, Operand(MAP_TYPE));
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->ArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin and internal
+ // Array functions which always have a map.
+ // Initial map for the builtin Array function should be a map.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function (3)",
+ t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t0);
+ __ Assert(eq, "Unexpected initial map for Array function (4)",
+ t0, Operand(MAP_TYPE));
+ }
+
+ // Run the native code for the Array function called as a constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
+
+ Register function = a1;
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
+ __ Assert(eq, "Unexpected String function", function, Operand(a2));
+ }
+
+ // Load the first arguments in a0 and get rid of the rest.
+ Label no_arguments;
+ __ Branch(&no_arguments, eq, a0, Operand(zero_reg));
+ // First args = sp[(argc - 1) * 4].
+ __ Subu(a0, a0, Operand(1));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ Addu(sp, a0, sp);
+ __ lw(a0, MemOperand(sp));
+ // sp now point to args[0], drop args[0] + receiver.
+ __ Drop(2);
+
+ Register argument = a2;
+ Label not_cached, argument_is_string;
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm,
+ a0, // Input.
+ argument, // Result.
+ a3, // Scratch.
+ t0, // Scratch.
+ t1, // Scratch.
+ false, // Is it a Smi?
+ &not_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
+ __ bind(&argument_is_string);
+
+ // ----------- S t a t e -------------
+ // -- a2 : argument converted to string
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -----------------------------------
+
+ Label gc_required;
+ __ AllocateInNewSpace(JSValue::kSize,
+ v0, // Result.
+ a3, // Scratch.
+ t0, // Scratch.
+ &gc_required,
+ TAG_OBJECT);
+
+ // Initialising the String Object.
+ Register map = a3;
+ __ LoadGlobalFunctionInitialMap(function, map, t0);
+ if (FLAG_debug_code) {
+ __ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset));
+ __ Assert(eq, "Unexpected string wrapper instance size",
+ t0, Operand(JSValue::kSize >> kPointerSizeLog2));
+ __ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+ __ Assert(eq, "Unexpected unused properties of string wrapper",
+ t0, Operand(zero_reg));
+ }
+ __ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ __ sw(argument, FieldMemOperand(v0, JSValue::kValueOffset));
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+ __ Ret();
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ bind(&not_cached);
+ __ JumpIfSmi(a0, &convert_argument);
+
+ // Is it a String?
+ __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ ASSERT(kNotStringTag != 0);
+ __ And(t0, a3, Operand(kIsNotStringMask));
+ __ Branch(&convert_argument, ne, t0, Operand(zero_reg));
+ __ mov(argument, a0);
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+ __ Branch(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into a2.
+ __ bind(&convert_argument);
+ __ push(function); // Preserve the function.
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+ __ EnterInternalFrame();
+ __ push(v0);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ pop(function);
+ __ mov(argument, v0);
+ __ Branch(&argument_is_string);
+
+ // Load the empty string into a2, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ bind(&no_arguments);
+ __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
+ __ Drop(1);
+ __ Branch(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to
+ // create a string wrapper.
+ __ bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
+ __ EnterInternalFrame();
+ __ push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ __ LeaveInternalFrame();
+ __ Ret();
}
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ Label non_function_call;
+ // Check that the function is not a smi.
+ __ And(t0, a1, Operand(kSmiTagMask));
+ __ Branch(&non_function_call, eq, t0, Operand(zero_reg));
+ // Check that the function is a JSFunction.
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // Jump to the function-specific construct stub.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
+ __ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(Operand(t9));
+
+ // a0: number of arguments
+ // a1: called object
+ __ bind(&non_function_call);
+ // CALL_NON_FUNCTION expects the non-function constructor as receiver
+ // (instead of the original receiver from the call site). The receiver is
+ // stack element argc.
+ // Set expected number of arguments to zero (not changing a0).
+ __ mov(a2, zero_reg);
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ SetCallKind(t1, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool count_constructions) {
+ // Should never count constructions for api objects.
+ ASSERT(!is_api_function || !count_constructions);
+
+ Isolate* isolate = masm->isolate();
+
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ __ EnterConstructFrame();
+
+ // Preserve the two incoming parameters on the stack.
+ __ sll(a0, a0, kSmiTagSize); // Tag arguments count.
+ __ MultiPushReversed(a0.bit() | a1.bit());
+
+ // Use t7 to hold undefined, which is used in several places below.
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+ Label rt_call, allocated;
+ // Try to allocate the object without transitioning into C code. If any of the
+ // preconditions is not met, the code bails out to the runtime call.
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ li(a2, Operand(debug_step_in_fp));
+ __ lw(a2, MemOperand(a2));
+ __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+#endif
+
+ // Load the initial map and verify that it is in fact a map.
+ // a1: constructor function
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Branch(&rt_call, eq, t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t4);
+ __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+
+ // Check that the constructor is not constructing a JSFunction (see comments
+ // in Runtime_NewObject in runtime.cc). In which case the initial map's
+ // instance type would be JS_FUNCTION_TYPE.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+ __ lbu(t0, constructor_count);
+ __ Subu(t0, t0, Operand(1));
+ __ sb(t0, constructor_count);
+ __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+ __ Push(a1, a2);
+
+ __ push(a1); // Constructor.
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(a2);
+ __ pop(a1);
+
+ __ bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to initial
+ // map and properties and elements are set to empty fixed array.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size
+ // t4: JSObject (not tagged)
+ __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t5, t4);
+ __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+ __ Addu(t5, t5, Operand(3*kPointerSize));
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+ // Fill all the in-object properties with appropriate filler.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size (in words)
+ // t4: JSObject (not tagged)
+ // t5: First in-object property of JSObject (not tagged)
+ __ sll(t0, a3, kPointerSizeLog2);
+ __ addu(t6, t4, t0); // End of object.
+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ // To allow for truncation.
+ __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+ } else {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sw(t7, MemOperand(t5, 0));
+ __ addiu(t5, t5, kPointerSize);
+ __ bind(&entry);
+ __ Branch(&loop, Uless, t5, Operand(t6));
+ }
+
+ // Add the object tag to make the JSObject real, so that we can continue and
+ // jump into the continuation code at any time from now on. Any failures
+ // need to undo the allocation, so that the heap is in a consistent state
+ // and verifiable.
+ __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed. Continue with allocated
+ // object if not fall through to runtime call if it is.
+ // a1: constructor function
+ // t4: JSObject
+ // t5: start of next object (not tagged)
+ __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields and
+ // in-object properties.
+ __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+ __ And(t6,
+ a0,
+ Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
+ __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
+ __ Addu(a3, a3, Operand(t0));
+ __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
+ __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
+ __ subu(a3, a3, t0);
+
+ // Done if no extra properties are to be allocated.
+ __ Branch(&allocated, eq, a3, Operand(zero_reg));
+ __ Assert(greater_equal, "Property allocation count failed.",
+ a3, Operand(zero_reg));
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // a1: constructor
+ // a3: number of elements in properties array
+ // t4: JSObject
+ // t5: start of next object
+ __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ AllocateInNewSpace(
+ a0,
+ t5,
+ t6,
+ a2,
+ &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+ // Initialize the FixedArray.
+ // a1: constructor
+ // a3: number of elements in properties array (un-tagged)
+ // t4: JSObject
+ // t5: start of next object
+ __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+ __ mov(a2, t5);
+ __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+ __ sll(a0, a3, kSmiTagSize);
+ __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+ __ Addu(a2, a2, Operand(2 * kPointerSize));
+
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+ // Initialize the fields to undefined.
+ // a1: constructor
+ // a2: First element of FixedArray (not tagged)
+ // a3: number of elements in properties array
+ // t4: JSObject
+ // t5: FixedArray (not tagged)
+ __ sll(t3, a3, kPointerSizeLog2);
+ __ addu(t6, a2, t3); // End of object.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+ __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sw(t7, MemOperand(a2));
+ __ addiu(a2, a2, kPointerSize);
+ __ bind(&entry);
+ __ Branch(&loop, less, a2, Operand(t6));
+ }
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject.
+ // a1: constructor function
+ // t4: JSObject
+ // t5: FixedArray (not tagged)
+ __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag.
+ __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a4: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // t4: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(t4, t5);
+ }
+
+ __ bind(&rt_call);
+ // Allocate the new receiver object using the runtime call.
+ // a1: constructor function
+ __ push(a1); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(t4, v0);
+
+ // Receiver for constructor call allocated.
+ // t4: JSObject
+ __ bind(&allocated);
+ __ push(t4);
+
+ // Push the function and the allocated receiver from the stack.
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, kPointerSize));
+ __ MultiPushReversed(a1.bit() | t4.bit());
+
+ // Reload the number of arguments from the stack.
+ // a1: constructor function
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ lw(a3, MemOperand(sp, 4 * kPointerSize));
+
+ // Setup pointer to last argument.
+ __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Setup number of arguments for function call below.
+ __ srl(a0, a3, kSmiTagSize);
+
+ // Copy arguments and receiver to the expression stack.
+ // a0: number of arguments
+ // a1: constructor function
+ // a2: address of last argument (caller sp)
+ // a3: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t1, MemOperand(t0));
+ __ push(t1);
+ __ bind(&entry);
+ __ Addu(a3, a3, Operand(-2));
+ __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+
+ // Call the function.
+ // a0: number of arguments
+ // a1: constructor function
+ if (is_api_function) {
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
+
+ // Pop the function from the stack.
+ // v0: result
+ // sp[0]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ Pop();
+
+ // Restore context from the frame.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ And(t0, v0, Operand(kSmiTagMask));
+ __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, a3, a3);
+ __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ lw(v0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+ __ LeaveConstructFrame();
+ __ sll(t0, a1, kPointerSizeLog2 - 1);
+ __ Addu(sp, sp, t0);
+ __ Addu(sp, sp, kPointerSize);
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Generate_JSConstructStubHelper(masm, false, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -78,23 +1019,16 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from JSEntryStub::GenerateBody
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: reveiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // arguments slots
- // handler frame
- // entry frame
- // callee saved registers + ra
- // 4 args slots
- // args
+ // ----------- S t a t e -------------
+ // -- a0: code entry
+ // -- a1: function
+ // -- a2: reveiver_pointer
+ // -- a3: argc
+ // -- s0: argv
+ // -----------------------------------
// Clear the context before we push it when entering the JS frame.
- __ li(cp, Operand(0, RelocInfo::NONE));
+ __ mov(cp, zero_reg);
// Enter an internal frame.
__ EnterInternalFrame();
@@ -103,18 +1037,19 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address();
+ ExternalReference roots_address =
+ ExternalReference::roots_address(masm->isolate());
__ li(s6, Operand(roots_address));
// Push the function and the receiver onto the stack.
- __ MultiPushReversed(a1.bit() | a2.bit());
+ __ Push(a1, a2);
// Copy arguments to the stack in a loop.
// a3: argc
// s0: argv, ie points to first arg
Label loop, entry;
__ sll(t0, a3, kPointerSizeLog2);
- __ add(t2, s0, t0);
+ __ addu(t2, s0, t0);
__ b(&entry);
__ nop(); // Branch delay slot nop.
// t2 points past last arg.
@@ -122,48 +1057,30 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ lw(t0, MemOperand(s0)); // Read next parameter.
__ addiu(s0, s0, kPointerSize);
__ lw(t0, MemOperand(t0)); // Dereference handle.
- __ Push(t0); // Push parameter.
+ __ push(t0); // Push parameter.
__ bind(&entry);
- __ Branch(ne, &loop, s0, Operand(t2));
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: reveiver_pointer
- // a3: argc
- // s0: argv
- // s6: roots_address
- //
- // Stack:
- // arguments
- // receiver
- // function
- // arguments slots
- // handler frame
- // entry frame
- // callee saved registers + ra
- // 4 args slots
- // args
+ __ Branch(&loop, ne, s0, Operand(t2));
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
- __ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
- __ mov(s1, t4);
- __ mov(s2, t4);
- __ mov(s3, t4);
- __ mov(s4, s4);
- __ mov(s5, t4);
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ mov(s1, t0);
+ __ mov(s2, t0);
+ __ mov(s3, t0);
+ __ mov(s4, t0);
+ __ mov(s5, t0);
// s6 holds the root address. Do not clobber.
// s7 is cp. Do not init.
// Invoke the code and pass argc as a0.
__ mov(a0, a3);
if (is_construct) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x164);
+ __ Call(masm->isolate()->builtins()->JSConstructCall(),
+ RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
__ LeaveInternalFrame();
@@ -182,19 +1099,525 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Preserve the function.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ // Call the runtime function.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ // Calculate the entry point.
+ __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore saved function.
+ __ pop(a1);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ Jump(t9);
+}
+
+
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Preserve the function.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
+ // Calculate the entry point.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore saved function.
+ __ pop(a1);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ Jump(t9);
+}
+
+
+// These functions are called from C++ but cannot be used in live code.
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ __ Abort("Call to unimplemented function in builtins-mips.cc");
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ __ Abort("Call to unimplemented function in builtins-mips.cc");
+}
+
+
+void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
+ __ Abort("Call to unimplemented function in builtins-mips.cc");
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ __ Abort("Call to unimplemented function in builtins-mips.cc");
+}
+
+
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // 1. Make sure we have at least one argument.
+ // a0: actual number of arguments
+ { Label done;
+ __ Branch(&done, ne, a0, Operand(zero_reg));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ push(t2);
+ __ Addu(a0, a0, Operand(1));
+ __ bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ // a0: actual number of arguments
+ Label non_function;
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ lw(a1, MemOperand(at));
+ __ And(at, a1, Operand(kSmiTagMask));
+ __ Branch(&non_function, eq, at, Operand(zero_reg));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ // a0: actual number of arguments
+ // a1: function
+ Label shift_arguments;
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+
+ // Do not transform the receiver for native (Compilerhints already in a3).
+ __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+
+ // Compute the receiver in non-strict mode.
+ // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a2, sp, at);
+ __ lw(a2, MemOperand(a2, -kPointerSize));
+ // a0: actual number of arguments
+ // a1: function
+ // a2: first argument
+ __ JumpIfSmi(a2, &convert_to_object, t2);
+
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+ __ LoadRoot(a3, Heap::kNullValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ GetObjectType(a2, a3, a3);
+ __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ __ bind(&convert_to_object);
+ __ EnterInternalFrame(); // In order to preserve argument count.
+ __ sll(a0, a0, kSmiTagSize); // Smi tagged.
+ __ push(a0);
+
+ __ push(a2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a2, v0);
+
+ __ pop(a0);
+ __ sra(a0, a0, kSmiTagSize); // Un-tag.
+ __ LeaveInternalFrame();
+ // Restore the function to a1.
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ lw(a1, MemOperand(at));
+ __ Branch(&patch_receiver);
+
+ // Use the global receiver object from the called function as the
+ // receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalIndex =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ lw(a2, FieldMemOperand(cp, kGlobalIndex));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ lw(a2, FieldMemOperand(a2, kGlobalIndex));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+
+ __ bind(&patch_receiver);
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a3, sp, at);
+ __ sw(a2, MemOperand(a3, -kPointerSize));
+
+ __ Branch(&shift_arguments);
+ }
+
+ // 3b. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ // a0: actual number of arguments
+ // a1: function
+ __ bind(&non_function);
+ // Restore the function in case it has been modified.
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a2, sp, at);
+ __ sw(a1, MemOperand(a2, -kPointerSize));
+ // Clear a1 to indicate a non-function being called.
+ __ mov(a1, zero_reg);
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ // a0: actual number of arguments
+ // a1: function
+ __ bind(&shift_arguments);
+ { Label loop;
+ // Calculate the copy start address (destination). Copy end address is sp.
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a2, sp, at);
+
+ __ bind(&loop);
+ __ lw(at, MemOperand(a2, -kPointerSize));
+ __ sw(at, MemOperand(a2));
+ __ Subu(a2, a2, Operand(kPointerSize));
+ __ Branch(&loop, ne, a2, Operand(sp));
+ // Adjust the actual number of arguments and remove the top element
+ // (which is a copy of the last argument).
+ __ Subu(a0, a0, Operand(1));
+ __ Pop();
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+ // a0: actual number of arguments
+ // a1: function
+ { Label function;
+ __ Branch(&function, ne, a1, Operand(zero_reg));
+ __ mov(a2, zero_reg); // expected arguments is 0 for CALL_NON_FUNCTION
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ __ SetCallKind(t1, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ bind(&function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ // a0: actual number of arguments
+ // a1: function
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ sra(a2, a2, kSmiTagSize);
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ SetCallKind(t1, CALL_AS_METHOD);
+ // Check formal and actual parameter counts.
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
+
+ ParameterCount expected(0);
+ __ InvokeCode(a3, expected, expected, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ const int kIndexOffset = -5 * kPointerSize;
+ const int kLimitOffset = -4 * kPointerSize;
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
+
+ __ EnterInternalFrame();
+
+ __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
+ __ push(a0);
+ __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
+ __ push(a0);
+ // Returns (in v0) number of arguments to copy to stack as Smi.
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying need to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause a2 to become negative.
+ __ subu(a2, sp, a2);
+ // Check if the arguments will overflow the stack.
+ __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ Branch(&okay, gt, a2, Operand(t0)); // Signed comparison.
+
+ // Out of stack space.
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ push(a1);
+ __ push(v0);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // End of stack check.
+
+ // Push current limit and index.
+ __ bind(&okay);
+ __ push(v0); // Limit.
+ __ mov(a1, zero_reg); // Initial index.
+ __ push(a1);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ lw(a0, MemOperand(fp, kFunctionOffset));
+ __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in a0.
+ __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+
+ // Compute the receiver.
+ Label call_to_object, use_global_receiver, push_receiver;
+ __ lw(a0, MemOperand(fp, kRecvOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+ // Do not transform the receiver for native (Compilerhints already in a2).
+ __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+ // Compute the receiver in non-strict mode.
+ __ And(t0, a0, Operand(kSmiTagMask));
+ __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+
+ // Check if the receiver is already a JavaScript object.
+ // a0: receiver
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Convert the receiver to a regular object.
+ // a0: receiver
+ __ bind(&call_to_object);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
+ __ Branch(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ // a0: receiver
+ __ bind(&push_receiver);
+ __ push(a0);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ lw(a0, MemOperand(fp, kIndexOffset));
+ __ Branch(&entry);
+
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // a0: current argument index
+ __ bind(&loop);
+ __ lw(a1, MemOperand(fp, kArgsOffset));
+ __ push(a1);
+ __ push(a0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(v0);
+
+ // Use inline caching to access the arguments.
+ __ lw(a0, MemOperand(fp, kIndexOffset));
+ __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+ __ sw(a0, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ lw(a1, MemOperand(fp, kLimitOffset));
+ __ Branch(&loop, ne, a0, Operand(a1));
+ // Invoke the function.
+ ParameterCount actual(a0);
+ __ sra(a0, a0, kSmiTagSize);
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+
+ // Tear down the internal frame and remove function, receiver and args.
+ __ LeaveInternalFrame();
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ sll(a0, a0, kSmiTagSize);
+ __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
+ __ Addu(fp, sp, Operand(3 * kPointerSize));
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- v0 : result being passed through
+ // -----------------------------------
+ // Get the number of arguments passed (as a smi), tear down the frame and
+ // then tear down the parameters.
+ __ lw(a1, MemOperand(fp, -3 * kPointerSize));
+ __ mov(sp, fp);
+ __ MultiPop(fp.bit() | ra.bit());
+ __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(sp, sp, t0);
+ // Adjust for the receiver.
+ __ Addu(sp, sp, Operand(kPointerSize));
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x201);
+ // State setup as expected by MacroAssembler::InvokePrologue.
+ // ----------- S t a t e -------------
+ // -- a0: actual arguments count
+ // -- a1: function (passed through to callee)
+ // -- a2: expected arguments count
+ // -- a3: callee code entry
+ // -- t1: call kind information
+ // -----------------------------------
+
+ Label invoke, dont_adapt_arguments;
+
+ Label enough, too_few;
+ __ Branch(&dont_adapt_arguments, eq,
+ a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ // We use Uless as the number of argument should always be greater than 0.
+ __ Branch(&too_few, Uless, a0, Operand(a2));
+
+ { // Enough parameters: actual >= expected.
+ // a0: actual number of arguments as a smi
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ __ bind(&enough);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into a0 and copy end address into a2.
+ __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a0, fp, a0);
+ // Adjust for return address and receiver.
+ __ Addu(a0, a0, Operand(2 * kPointerSize));
+ // Compute copy end address.
+ __ sll(a2, a2, kPointerSizeLog2);
+ __ subu(a2, a0, a2);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // a0: copy start address
+ // a1: function
+ // a2: copy end address
+ // a3: code entry to call
+
+ Label copy;
+ __ bind(&copy);
+ __ lw(t0, MemOperand(a0));
+ __ push(t0);
+ __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a2));
+ __ addiu(a0, a0, -kPointerSize); // In delay slot.
+
+ __ jmp(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected.
+ __ bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // TODO(MIPS): Optimize these loops.
+
+ // Calculate copy start address into a0 and copy end address is fp.
+ // a0: actual number of arguments as a smi
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a0, fp, a0);
+ // Adjust for return address and receiver.
+ __ Addu(a0, a0, Operand(2 * kPointerSize));
+ // Compute copy end address. Also adjust for return address.
+ __ Addu(t3, fp, kPointerSize);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // a0: copy start address
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ // t3: copy end address
+ Label copy;
+ __ bind(&copy);
+ __ lw(t0, MemOperand(a0)); // Adjusted above for return addr and receiver.
+ __ push(t0);
+ __ Subu(a0, a0, kPointerSize);
+ __ Branch(&copy, ne, a0, Operand(t3));
+
+ // Fill the remaining expected arguments with undefined.
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ sll(t2, a2, kPointerSizeLog2);
+ __ Subu(a2, fp, Operand(t2));
+ __ Addu(a2, a2, Operand(-4 * kPointerSize)); // Adjust for frame.
+
+ Label fill;
+ __ bind(&fill);
+ __ push(t0);
+ __ Branch(&fill, ne, sp, Operand(a2));
+ }
+
+ // Call the entry point.
+ __ bind(&invoke);
+
+ __ Call(a3);
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
+
+
+ // -------------------------------------------
+ // Don't adapt arguments.
+ // -------------------------------------------
+ __ bind(&dont_adapt_arguments);
+ __ Jump(a3);
}
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
new file mode 100644
index 000000000..1aa1838be
--- /dev/null
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -0,0 +1,6896 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "codegen.h"
+#include "regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc,
+ bool never_nan_nan);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* rhs_not_nan,
+ Label* slow,
+ bool strict);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+
+
+// Check if the operand is a heap number.
+static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
+ Register scratch1, Register scratch2,
+ Label* not_a_heap_number) {
+ __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
+ __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
+ __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
+}
+
+
+void ToNumberStub::Generate(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in a0.
+ Label check_heap_number, call_builtin;
+ __ JumpIfNotSmi(a0, &check_heap_number);
+ __ mov(v0, a0);
+ __ Ret();
+
+ __ bind(&check_heap_number);
+ EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
+ __ mov(v0, a0);
+ __ Ret();
+
+ __ bind(&call_builtin);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+}
+
+
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Create a new closure from the given function info in new
+ // space. Set the context to the current context in cp.
+ Label gc;
+
+ // Pop the function info from the stack.
+ __ pop(a3);
+
+ // Attempt to allocate new JSFunction in new space.
+ __ AllocateInNewSpace(JSFunction::kSize,
+ v0,
+ a1,
+ a2,
+ &gc,
+ TAG_OBJECT);
+
+ int map_index = strict_mode_ == kStrictMode
+ ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+ : Context::FUNCTION_MAP_INDEX;
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
+ __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+ // Initialize the rest of the function. We don't have to update the
+ // write barrier because the allocated object is in new space.
+ __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
+ __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+ __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
+ __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
+ __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+ __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
+
+ // Return result. The argument function info has been popped already.
+ __ Ret();
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+ __ Push(cp, a3, t0);
+ __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+
+ // Attempt to allocate the context in new space.
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ v0,
+ a1,
+ a2,
+ &gc,
+ TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ lw(a3, MemOperand(sp, 0));
+
+ // Setup the object header.
+ __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
+ __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ li(a2, Operand(Smi::FromInt(length)));
+ __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+
+ // Setup the fixed slots.
+ __ li(a1, Operand(Smi::FromInt(0)));
+ __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ sw(v0, MemOperand(v0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
+
+ // Copy the global object from the previous context.
+ __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+ // Initialize the rest of the slots to undefined.
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, v0);
+ __ Pop();
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
+}
+
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ // [sp]: constant elements.
+ // [sp + kPointerSize]: literal index.
+ // [sp + (2 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into r3 and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+ __ lw(a0, MemOperand(sp, 1 * kPointerSize));
+ __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a3, t0);
+ __ lw(a3, MemOperand(t0));
+ __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+ __ Branch(&slow_case, eq, a3, Operand(t1));
+
+ if (FLAG_debug_code) {
+ const char* message;
+ Heap::RootListIndex expected_map_index;
+ if (mode_ == CLONE_ELEMENTS) {
+ message = "Expected (writable) fixed array";
+ expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else {
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ message = "Expected copy-on-write fixed array";
+ expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+ }
+ __ push(a3);
+ __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ LoadRoot(at, expected_map_index);
+ __ Assert(eq, message, a3, Operand(at));
+ __ pop(a3);
+ }
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ // Return new object in v0.
+ __ AllocateInNewSpace(size,
+ v0,
+ a1,
+ a2,
+ &slow_case,
+ TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ lw(a1, FieldMemOperand(a3, i));
+ __ sw(a1, FieldMemOperand(v0, i));
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ Addu(a2, v0, Operand(JSArray::kSize));
+ __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
+
+ // Copy the elements array.
+ __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+ }
+
+ // Return and remove the on-stack parameters.
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
+
+
+// Takes a Smi and converts to an IEEE 64 bit floating point value in two
+// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
+// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
+// scratch register. Destroys the source register. No GC occurs during this
+// stub so you don't have to set up the frame.
+class ConvertToDoubleStub : public CodeStub {
+ public:
+ ConvertToDoubleStub(Register result_reg_1,
+ Register result_reg_2,
+ Register source_reg,
+ Register scratch_reg)
+ : result1_(result_reg_1),
+ result2_(result_reg_2),
+ source_(source_reg),
+ zeros_(scratch_reg) { }
+
+ private:
+ Register result1_;
+ Register result2_;
+ Register source_;
+ Register zeros_;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 14> {};
+
+ Major MajorKey() { return ConvertToDouble; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return result1_.code() +
+ (result2_.code() << 4) +
+ (source_.code() << 8) +
+ (zeros_.code() << 12);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "ConvertToDoubleStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("ConvertToDoubleStub\n"); }
+#endif
+};
+
+
+void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
+#ifndef BIG_ENDIAN_FLOATING_POINT
+ Register exponent = result1_;
+ Register mantissa = result2_;
+#else
+ Register exponent = result2_;
+ Register mantissa = result1_;
+#endif
+ Label not_special;
+ // Convert from Smi to integer.
+ __ sra(source_, source_, kSmiTagSize);
+ // Move sign bit from source to destination. This works because the sign bit
+ // in the exponent word of the double has the same position and polarity as
+ // the 2's complement sign bit in a Smi.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ __ And(exponent, source_, Operand(HeapNumber::kSignMask));
+ // Subtract from 0 if source was negative.
+ __ subu(at, zero_reg, source_);
+ __ movn(source_, at, exponent);
+
+ // We have -1, 0 or 1, which we treat specially. Register source_ contains
+ // absolute value: it is either equal to 1 (special case of -1 and 1),
+ // greater than 1 (not a special case) or less than 1 (special case of 0).
+ __ Branch(&not_special, gt, source_, Operand(1));
+
+ // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
+ static const uint32_t exponent_word_for_1 =
+ HeapNumber::kExponentBias << HeapNumber::kExponentShift;
+ // Safe to use 'at' as dest reg here.
+ __ Or(at, exponent, Operand(exponent_word_for_1));
+ __ movn(exponent, at, source_); // Write exp when source not 0.
+ // 1, 0 and -1 all have 0 for the second word.
+ __ mov(mantissa, zero_reg);
+ __ Ret();
+
+ __ bind(&not_special);
+ // Count leading zeros.
+ // Gets the wrong answer for 0, but we already checked for that case above.
+ __ clz(zeros_, source_);
+ // Compute exponent and or it into the exponent register.
+ // We use mantissa as a scratch register here.
+ __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
+ __ subu(mantissa, mantissa, zeros_);
+ __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
+ __ Or(exponent, exponent, mantissa);
+
+ // Shift up the source chopping the top bit off.
+ __ Addu(zeros_, zeros_, Operand(1));
+ // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
+ __ sllv(source_, source_, zeros_);
+ // Compute lower part of fraction (last 12 bits).
+ __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
+ // And the top (top 20 bits).
+ __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
+ __ or_(exponent, exponent, source_);
+
+ __ Ret();
+}
+
+
+void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register scratch1,
+ Register scratch2) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ sra(scratch1, a0, kSmiTagSize);
+ __ mtc1(scratch1, f14);
+ __ cvt_d_w(f14, f14);
+ __ sra(scratch1, a1, kSmiTagSize);
+ __ mtc1(scratch1, f12);
+ __ cvt_d_w(f12, f12);
+ if (destination == kCoreRegisters) {
+ __ Move(a2, a3, f14);
+ __ Move(a0, a1, f12);
+ }
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Write Smi from a0 to a3 and a2 in double format.
+ __ mov(scratch1, a0);
+ ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
+ __ push(ra);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from a1 to a1 and a0 in double format.
+ __ mov(scratch1, a1);
+ ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(ra);
+ }
+}
+
+
+void FloatingPointHelper::LoadOperands(
+ MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* slow) {
+
+ // Load right operand (a0) to f12 or a2/a3.
+ LoadNumber(masm, destination,
+ a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
+
+ // Load left operand (a1) to f14 or a0/a1.
+ LoadNumber(masm, destination,
+ a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
+}
+
+
+void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
+ Destination destination,
+ Register object,
+ FPURegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+
+ Label is_smi, done;
+
+ __ JumpIfSmi(object, &is_smi);
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+
+ // Handle loading a double from a heap number.
+ if (CpuFeatures::IsSupported(FPU) &&
+ destination == kFPURegisters) {
+ CpuFeatures::Scope scope(FPU);
+ // Load the double from tagged HeapNumber to double register.
+
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
+ // point in generating even more instructions.
+ __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Load the double from heap number to dst1 and dst2 in double format.
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ lw(dst2, FieldMemOperand(object,
+ HeapNumber::kValueOffset + kPointerSize));
+ }
+ __ Branch(&done);
+
+ // Handle loading a double from a smi.
+ __ bind(&is_smi);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Convert smi to double using FPU instructions.
+ __ SmiUntag(scratch1, object);
+ __ mtc1(scratch1, dst);
+ __ cvt_d_w(dst, dst);
+ if (destination == kCoreRegisters) {
+ // Load the converted smi to dst1 and dst2 in double format.
+ __ Move(dst1, dst2, dst);
+ }
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Write smi to dst1 and dst2 double format.
+ __ mov(scratch1, object);
+ ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
+ __ push(ra);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(ra);
+ }
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_number) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ Label is_smi;
+ Label done;
+ Label not_in_int32_range;
+
+ __ JumpIfSmi(object, &is_smi);
+ __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
+ __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
+ __ ConvertToInt32(object,
+ dst,
+ scratch1,
+ scratch2,
+ double_scratch,
+ &not_in_int32_range);
+ __ jmp(&done);
+
+ __ bind(&not_in_int32_range);
+ __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ __ EmitOutOfInt32RangeTruncate(dst,
+ scratch1,
+ scratch2,
+ scratch3);
+
+ __ jmp(&done);
+
+ __ bind(&is_smi);
+ __ SmiUntag(dst, object);
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
+ Register int_scratch,
+ Destination destination,
+ FPURegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register scratch2,
+ FPURegister single_scratch) {
+ ASSERT(!int_scratch.is(scratch2));
+ ASSERT(!int_scratch.is(dst1));
+ ASSERT(!int_scratch.is(dst2));
+
+ Label done;
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(int_scratch, single_scratch);
+ __ cvt_d_w(double_dst, single_scratch);
+ if (destination == kCoreRegisters) {
+ __ Move(dst1, dst2, double_dst);
+ }
+ } else {
+ Label fewer_than_20_useful_bits;
+ // Expected output:
+ // | dst2 | dst1 |
+ // | s | exp | mantissa |
+
+ // Check for zero.
+ __ mov(dst2, int_scratch);
+ __ mov(dst1, int_scratch);
+ __ Branch(&done, eq, int_scratch, Operand(zero_reg));
+
+ // Preload the sign of the value.
+ __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
+ // Get the absolute value of the object (as an unsigned integer).
+ Label skip_sub;
+ __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
+ __ Subu(int_scratch, zero_reg, int_scratch);
+ __ bind(&skip_sub);
+
+ // Get mantisssa[51:20].
+
+ // Get the position of the first set bit.
+ __ clz(dst1, int_scratch);
+ __ li(scratch2, 31);
+ __ Subu(dst1, scratch2, dst1);
+
+ // Set the exponent.
+ __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+ __ Ins(dst2, scratch2,
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+
+ // Clear the first non null bit.
+ __ li(scratch2, Operand(1));
+ __ sllv(scratch2, scratch2, dst1);
+ __ li(at, -1);
+ __ Xor(scratch2, scratch2, at);
+ __ And(int_scratch, int_scratch, scratch2);
+
+ // Get the number of bits to set in the lower part of the mantissa.
+ __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
+ // Set the higher 20 bits of the mantissa.
+ __ srlv(at, int_scratch, scratch2);
+ __ or_(dst2, dst2, at);
+ __ li(at, 32);
+ __ subu(scratch2, at, scratch2);
+ __ sllv(dst1, int_scratch, scratch2);
+ __ Branch(&done);
+
+ __ bind(&fewer_than_20_useful_bits);
+ __ li(at, HeapNumber::kMantissaBitsInTopWord);
+ __ subu(scratch2, at, dst1);
+ __ sllv(scratch2, int_scratch, scratch2);
+ __ Or(dst2, dst2, scratch2);
+ // Set dst1 to 0.
+ __ mov(dst1, zero_reg);
+ }
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
+ Register object,
+ Destination destination,
+ FPURegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister single_scratch,
+ Label* not_int32) {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!heap_number_map.is(object) &&
+ !heap_number_map.is(scratch1) &&
+ !heap_number_map.is(scratch2));
+
+ Label done, obj_is_not_smi;
+
+ __ JumpIfNotSmi(object, &obj_is_not_smi);
+ __ SmiUntag(scratch1, object);
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
+ scratch2, single_scratch);
+ __ Branch(&done);
+
+ __ bind(&obj_is_not_smi);
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Load the number.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Load the double value.
+ __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+ // On MIPS a lot of things cannot be implemented the same way so right
+ // now it makes a lot more sense to just do things manually.
+
+ // Save FCSR.
+ __ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+ __ trunc_w_d(single_scratch, double_dst);
+ // Retrieve FCSR.
+ __ cfc1(scratch2, FCSR);
+ // Restore FCSR.
+ __ ctc1(scratch1, FCSR);
+
+ // Check for inexact conversion or exception.
+ __ And(scratch2, scratch2, kFCSRFlagMask);
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+
+ if (destination == kCoreRegisters) {
+ __ Move(dst1, dst2, double_dst);
+ }
+
+ } else {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ // Load the double value in the destination registers.
+ __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ // Check for 0 and -0.
+ __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
+ __ Or(scratch1, scratch1, Operand(dst2));
+ __ Branch(&done, eq, scratch1, Operand(zero_reg));
+
+ // Check that the value can be exactly represented by a 32-bit integer.
+ // Jump to not_int32 if that's not the case.
+ DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+
+ // dst1 and dst2 were trashed. Reload the double value.
+ __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ }
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_int32) {
+ ASSERT(!dst.is(object));
+ ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
+ ASSERT(!scratch1.is(scratch2) &&
+ !scratch1.is(scratch3) &&
+ !scratch2.is(scratch3));
+
+ Label done;
+
+ // Untag the object into the destination register.
+ __ SmiUntag(dst, object);
+ // Just return if the object is a smi.
+ __ JumpIfSmi(object, &done);
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Object is a heap number.
+ // Convert the floating point value to a 32-bit integer.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Load the double value.
+ __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+ // On MIPS a lot of things cannot be implemented the same way so right
+ // now it makes a lot more sense to just do things manually.
+
+ // Save FCSR.
+ __ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+ __ trunc_w_d(double_scratch, double_scratch);
+ // Retrieve FCSR.
+ __ cfc1(scratch2, FCSR);
+ // Restore FCSR.
+ __ ctc1(scratch1, FCSR);
+
+ // Check for inexact conversion or exception.
+ __ And(scratch2, scratch2, kFCSRFlagMask);
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+ // Get the result in the destination register.
+ __ mfc1(dst, double_scratch);
+
+ } else {
+ // Load the double value in the destination registers.
+ __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ // Check for 0 and -0.
+ __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
+ __ Or(dst, scratch2, Operand(dst));
+ __ Branch(&done, eq, dst, Operand(zero_reg));
+
+ DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
+
+ // Registers state after DoubleIs32BitInteger.
+ // dst: mantissa[51:20].
+ // scratch2: 1
+
+ // Shift back the higher bits of the mantissa.
+ __ srlv(dst, dst, scratch3);
+ // Set the implicit first bit.
+ __ li(at, 32);
+ __ subu(scratch3, at, scratch3);
+ __ sllv(scratch2, scratch2, scratch3);
+ __ Or(dst, dst, scratch2);
+ // Set the sign.
+ __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ Label skip_sub;
+ __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
+ __ Subu(dst, zero_reg, dst);
+ __ bind(&skip_sub);
+ }
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
+ Register src1,
+ Register src2,
+ Register dst,
+ Register scratch,
+ Label* not_int32) {
+ // Get exponent alone in scratch.
+ __ Ext(scratch,
+ src1,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Substract the bias from the exponent.
+ __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
+
+ // src1: higher (exponent) part of the double value.
+ // src2: lower (mantissa) part of the double value.
+ // scratch: unbiased exponent.
+
+ // Fast cases. Check for obvious non 32-bit integer values.
+ // Negative exponent cannot yield 32-bit integers.
+ __ Branch(not_int32, lt, scratch, Operand(zero_reg));
+ // Exponent greater than 31 cannot yield 32-bit integers.
+ // Also, a positive value with an exponent equal to 31 is outside of the
+ // signed 32-bit integer range.
+ // Another way to put it is that if (exponent - signbit) > 30 then the
+ // number cannot be represented as an int32.
+ Register tmp = dst;
+ __ srl(at, src1, 31);
+ __ subu(tmp, scratch, at);
+ __ Branch(not_int32, gt, tmp, Operand(30));
+ // - Bits [21:0] in the mantissa are not null.
+ __ And(tmp, src2, 0x3fffff);
+ __ Branch(not_int32, ne, tmp, Operand(zero_reg));
+
+ // Otherwise the exponent needs to be big enough to shift left all the
+ // non zero bits left. So we need the (30 - exponent) last bits of the
+ // 31 higher bits of the mantissa to be null.
+ // Because bits [21:0] are null, we can check instead that the
+ // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
+
+ // Get the 32 higher bits of the mantissa in dst.
+ __ Ext(dst,
+ src2,
+ HeapNumber::kMantissaBitsInTopWord,
+ 32 - HeapNumber::kMantissaBitsInTopWord);
+ __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
+ __ or_(dst, dst, at);
+
+ // Create the mask and test the lower bits (of the higher bits).
+ __ li(at, 32);
+ __ subu(scratch, at, scratch);
+ __ li(src2, 1);
+ __ sllv(src1, src2, scratch);
+ __ Subu(src1, src1, Operand(1));
+ __ And(src1, dst, src1);
+ __ Branch(not_int32, ne, src1, Operand(zero_reg));
+}
+
+
+void FloatingPointHelper::CallCCodeForDoubleOperation(
+ MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch) {
+ // Using core registers:
+ // a0: Left value (least significant part of mantissa).
+ // a1: Left value (sign, exponent, top of mantissa).
+ // a2: Right value (least significant part of mantissa).
+ // a3: Right value (sign, exponent, top of mantissa).
+
+ // Assert that heap_number_result is saved.
+ // We currently always use s0 to pass it.
+ ASSERT(heap_number_result.is(s0));
+
+ // Push the current return address before the C call.
+ __ push(ra);
+ __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
+ if (!IsMipsSoftFloatABI) {
+ CpuFeatures::Scope scope(FPU);
+ // We are not using MIPS FPU instructions, and parameters for the runtime
+ // function call are prepaired in a0-a3 registers, but function we are
+ // calling is compiled with hard-float flag and expecting hard float ABI
+ // (parameters in f12/f14 registers). We need to copy parameters from
+ // a0-a3 registers to f12/f14 register pairs.
+ __ Move(f12, a0, a1);
+ __ Move(f14, a2, a3);
+ }
+ // Call C routine that may not cause GC or other trouble.
+ __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
+ 4);
+ // Store answer in the overwritable heap number.
+ if (!IsMipsSoftFloatABI) {
+ CpuFeatures::Scope scope(FPU);
+ // Double returned in register f0.
+ __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ } else {
+ // Double returned in registers v0 and v1.
+ __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
+ __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
+ }
+ // Place heap_number_result in v0 and return to the pushed return address.
+ __ mov(v0, heap_number_result);
+ __ pop(ra);
+ __ Ret();
+}
+
+
+// See comment for class, this does NOT work for int32's that are in Smi range.
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
+ Label max_negative_int;
+ // the_int_ has the answer which is a signed int32 but not a Smi.
+ // We test for the special value that has a different exponent.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ // Test sign, and save for later conditionals.
+ __ And(sign_, the_int_, Operand(0x80000000u));
+ __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
+
+ // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+ uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ li(scratch_, Operand(non_smi_exponent));
+ // Set the sign bit in scratch_ if the value was negative.
+ __ or_(scratch_, scratch_, sign_);
+ // Subtract from 0 if the value was negative.
+ __ subu(at, zero_reg, the_int_);
+ __ movn(the_int_, at, sign_);
+ // We should be masking the implict first digit of the mantissa away here,
+ // but it just ends up combining harmlessly with the last digit of the
+ // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
+ // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+ ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ srl(at, the_int_, shift_distance);
+ __ or_(scratch_, scratch_, at);
+ __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kExponentOffset));
+ __ sll(scratch_, the_int_, 32 - shift_distance);
+ __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kMantissaOffset));
+ __ Ret();
+
+ __ bind(&max_negative_int);
+ // The max negative int32 is stored as a positive number in the mantissa of
+ // a double because it uses a sign bit instead of using two's complement.
+ // The actual mantissa bits stored are all 0 because the implicit most
+ // significant 1 bit is not stored.
+ non_smi_exponent += 1 << HeapNumber::kExponentShift;
+ __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
+ __ sw(scratch_,
+ FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+ __ mov(scratch_, zero_reg);
+ __ sw(scratch_,
+ FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+ __ Ret();
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc,
+ bool never_nan_nan) {
+ Label not_identical;
+ Label heap_number, return_equal;
+ Register exp_mask_reg = t5;
+
+ __ Branch(&not_identical, ne, a0, Operand(a1));
+
+ // The two objects are identical. If we know that one of them isn't NaN then
+ // we now know they test equal.
+ if (cc != eq || !never_nan_nan) {
+ __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+ // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == less || cc == greater) {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == less_equal || cc == greater_equal) {
+ __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&return_equal, ne, a0, Operand(t2));
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ li(v0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ li(v0, Operand(LESS));
+ }
+ __ Ret();
+ }
+ }
+ }
+ }
+
+ __ bind(&return_equal);
+ if (cc == less) {
+ __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
+ } else if (cc == greater) {
+ __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
+ } else {
+ __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
+ }
+ __ Ret();
+
+ if (cc != eq || !never_nan_nan) {
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ And(t3, t2, Operand(exp_mask_reg));
+ // If all bits not set (ne cond), then not a NaN, objects are equal.
+ __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
+ // Or with all low-bits of mantissa.
+ __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ Or(v0, t3, Operand(t2));
+ // For equal we already have the right value in v0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load v0 with the failing
+ // value if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq, v0, Operand(zero_reg));
+ if (cc == le) {
+ __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
+ }
+ }
+ __ Ret();
+ }
+ // No fall through here.
+ }
+
+ __ bind(&not_identical);
+}
+
+
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* both_loaded_as_doubles,
+ Label* slow,
+ bool strict) {
+ ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+ (lhs.is(a1) && rhs.is(a0)));
+
+ Label lhs_is_smi;
+ __ And(t0, lhs, Operand(kSmiTagMask));
+ __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
+ // Rhs is a Smi.
+ // Check whether the non-smi is a heap number.
+ __ GetObjectType(lhs, t4, t4);
+ if (strict) {
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // succeed. Return non-equal (lhs is already not zero).
+ __ mov(v0, lhs);
+ __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
+ }
+
+ // Rhs is a smi, lhs is a number.
+ // Convert smi rhs to double.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ sra(at, rhs, kSmiTagSize);
+ __ mtc1(at, f14);
+ __ cvt_d_w(f14, f14);
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ } else {
+ // Load lhs to a double in a2, a3.
+ __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+ __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+ // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
+ __ mov(t6, rhs);
+ ConvertToDoubleStub stub1(a1, a0, t6, t5);
+ __ push(ra);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ pop(ra);
+ }
+
+ // We now have both loaded as doubles.
+ __ jmp(both_loaded_as_doubles);
+
+ __ bind(&lhs_is_smi);
+ // Lhs is a Smi. Check whether the non-smi is a heap number.
+ __ GetObjectType(rhs, t4, t4);
+ if (strict) {
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // succeed. Return non-equal.
+ __ li(v0, Operand(1));
+ __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
+ }
+
+ // Lhs is a smi, rhs is a number.
+ // Convert smi lhs to double.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ sra(at, lhs, kSmiTagSize);
+ __ mtc1(at, f12);
+ __ cvt_d_w(f12, f12);
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+ // Convert lhs to a double format. t5 is scratch.
+ __ mov(t6, lhs);
+ ConvertToDoubleStub stub2(a3, a2, t6, t5);
+ __ push(ra);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(ra);
+ // Load rhs to a double in a1, a0.
+ if (rhs.is(a0)) {
+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+ }
+ }
+ // Fall through to both_loaded_as_doubles.
+}
+
+
+void EmitNanCheck(MacroAssembler* masm, Condition cc) {
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Lhs and rhs are already loaded to f12 and f14 register pairs.
+ __ Move(t0, t1, f14);
+ __ Move(t2, t3, f12);
+ } else {
+ // Lhs and rhs are already loaded to GP registers.
+ __ mov(t0, a0); // a0 has LS 32 bits of rhs.
+ __ mov(t1, a1); // a1 has MS 32 bits of rhs.
+ __ mov(t2, a2); // a2 has LS 32 bits of lhs.
+ __ mov(t3, a3); // a3 has MS 32 bits of lhs.
+ }
+ Register rhs_exponent = exp_first ? t0 : t1;
+ Register lhs_exponent = exp_first ? t2 : t3;
+ Register rhs_mantissa = exp_first ? t1 : t0;
+ Register lhs_mantissa = exp_first ? t3 : t2;
+ Label one_is_nan, neither_is_nan;
+ Label lhs_not_nan_exp_mask_is_loaded;
+
+ Register exp_mask_reg = t4;
+ __ li(exp_mask_reg, HeapNumber::kExponentMask);
+ __ and_(t5, lhs_exponent, exp_mask_reg);
+ __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
+
+ __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+ __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
+
+ __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
+
+ __ li(exp_mask_reg, HeapNumber::kExponentMask);
+ __ bind(&lhs_not_nan_exp_mask_is_loaded);
+ __ and_(t5, rhs_exponent, exp_mask_reg);
+
+ __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
+
+ __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+ __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
+
+ __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
+
+ __ bind(&one_is_nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in v0 to make the comparison fail.
+ if (cc == lt || cc == le) {
+ __ li(v0, Operand(GREATER));
+ } else {
+ __ li(v0, Operand(LESS));
+ }
+ __ Ret(); // Return.
+
+ __ bind(&neither_is_nan);
+}
+
+
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+ // f12 and f14 have the two doubles. Neither is a NaN.
+ // Call a native function to do a comparison between two non-NaNs.
+ // Call C routine that may not cause GC or other trouble.
+ // We use a call_was and return manually because we need arguments slots to
+ // be freed.
+
+ Label return_result_not_equal, return_result_equal;
+ if (cc == eq) {
+ // Doubles are not equal unless they have the same bit pattern.
+ // Exception: 0 and -0.
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Lhs and rhs are already loaded to f12 and f14 register pairs.
+ __ Move(t0, t1, f14);
+ __ Move(t2, t3, f12);
+ } else {
+ // Lhs and rhs are already loaded to GP registers.
+ __ mov(t0, a0); // a0 has LS 32 bits of rhs.
+ __ mov(t1, a1); // a1 has MS 32 bits of rhs.
+ __ mov(t2, a2); // a2 has LS 32 bits of lhs.
+ __ mov(t3, a3); // a3 has MS 32 bits of lhs.
+ }
+ Register rhs_exponent = exp_first ? t0 : t1;
+ Register lhs_exponent = exp_first ? t2 : t3;
+ Register rhs_mantissa = exp_first ? t1 : t0;
+ Register lhs_mantissa = exp_first ? t3 : t2;
+
+ __ xor_(v0, rhs_mantissa, lhs_mantissa);
+ __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
+
+ __ subu(v0, rhs_exponent, lhs_exponent);
+ __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
+ // 0, -0 case.
+ __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
+ __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
+ __ or_(t4, rhs_exponent, lhs_exponent);
+ __ or_(t4, t4, rhs_mantissa);
+
+ __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
+
+ __ bind(&return_result_equal);
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
+ }
+
+ __ bind(&return_result_not_equal);
+
+ if (!CpuFeatures::IsSupported(FPU)) {
+ __ push(ra);
+ __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments.
+ if (!IsMipsSoftFloatABI) {
+ // We are not using MIPS FPU instructions, and parameters for the runtime
+ // function call are prepaired in a0-a3 registers, but function we are
+ // calling is compiled with hard-float flag and expecting hard float ABI
+ // (parameters in f12/f14 registers). We need to copy parameters from
+ // a0-a3 registers to f12/f14 register pairs.
+ __ Move(f12, a0, a1);
+ __ Move(f14, a2, a3);
+ }
+ __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+ __ pop(ra); // Because this function returns int, result is in v0.
+ __ Ret();
+ } else {
+ CpuFeatures::Scope scope(FPU);
+ Label equal, less_than;
+ __ c(EQ, D, f12, f14);
+ __ bc1t(&equal);
+ __ nop();
+
+ __ c(OLT, D, f12, f14);
+ __ bc1t(&less_than);
+ __ nop();
+
+ // Not equal, not less, not NaN, must be greater.
+ __ li(v0, Operand(GREATER));
+ __ Ret();
+
+ __ bind(&equal);
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
+
+ __ bind(&less_than);
+ __ li(v0, Operand(LESS));
+ __ Ret();
+ }
+}
+
+
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ // If either operand is a JS object or an oddball value, then they are
+ // not equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ Label first_non_object;
+ // Get the type of the first operand into a2 and compare it with
+ // FIRST_SPEC_OBJECT_TYPE.
+ __ GetObjectType(lhs, a2, a2);
+ __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Return non-zero.
+ Label return_not_equal;
+ __ bind(&return_not_equal);
+ __ li(v0, Operand(1));
+ __ Ret();
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
+
+ __ GetObjectType(rhs, a3, a3);
+ __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Check for oddballs: true, false, null, undefined.
+ __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
+
+ // Now that we have the types we might as well check for symbol-symbol.
+ // Ensure that no non-strings have the symbol bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(t2, a2, Operand(a3));
+ __ And(t0, t2, Operand(kIsSymbolMask));
+ __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
+}
+
+
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* both_loaded_as_doubles,
+ Label* not_heap_numbers,
+ Label* slow) {
+ __ GetObjectType(lhs, a3, a2);
+ __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ // If first was a heap number & second wasn't, go to slow case.
+ __ Branch(slow, ne, a3, Operand(a2));
+
+ // Both are heap numbers. Load them up then jump to the code we have
+ // for that.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+ __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+ if (rhs.is(a0)) {
+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+ }
+ }
+ __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for symbol-to-symbol equality.
+static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+ (lhs.is(a1) && rhs.is(a0)));
+
+ // a2 is object type of lhs.
+ // Ensure that no non-strings have the symbol bit set.
+ Label object_test;
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(at, a2, Operand(kIsNotStringMask));
+ __ Branch(&object_test, ne, at, Operand(zero_reg));
+ __ And(at, a2, Operand(kIsSymbolMask));
+ __ Branch(possible_strings, eq, at, Operand(zero_reg));
+ __ GetObjectType(rhs, a3, a3);
+ __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+ __ And(at, a3, Operand(kIsSymbolMask));
+ __ Branch(possible_strings, eq, at, Operand(zero_reg));
+
+ // Both are symbols. We already checked they weren't the same pointer
+ // so they are not equal.
+ __ li(v0, Operand(1)); // Non-zero indicates not equal.
+ __ Ret();
+
+ __ bind(&object_test);
+ __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ GetObjectType(rhs, a2, a3);
+ __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // If both objects are undetectable, they are equal. Otherwise, they
+ // are not equal, since they are different objects and an object is not
+ // equal to undefined.
+ __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
+ __ and_(a0, a2, a3);
+ __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+ __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
+ __ Ret();
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ bool object_is_smi,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ __ sra(mask, mask, kSmiTagSize + 1);
+ __ Addu(mask, mask, -1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Isolate* isolate = masm->isolate();
+ Label is_smi;
+ Label load_result_from_cache;
+ if (!object_is_smi) {
+ __ JumpIfSmi(object, &is_smi);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ Addu(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ __ lw(scratch2, MemOperand(scratch1, kPointerSize));
+ __ lw(scratch1, MemOperand(scratch1, 0));
+ __ Xor(scratch1, scratch1, Operand(scratch2));
+ __ And(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+ __ Addu(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ __ lw(probe,
+ FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ __ c(EQ, D, f12, f14);
+ __ bc1t(&load_result_from_cache);
+ __ nop(); // bc1t() requires explicit fill of branch delay slot.
+ __ Branch(not_found);
+ } else {
+ // Note that there is no cache check for non-FPU case, even though
+ // it seems there could be. May be a tiny opimization for non-FPU
+ // cores.
+ __ Branch(not_found);
+ }
+ }
+
+ __ bind(&is_smi);
+ Register scratch = scratch1;
+ __ sra(scratch, object, 1); // Shift away the tag.
+ __ And(scratch, mask, Operand(scratch));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ sll(scratch, scratch, kPointerSizeLog2 + 1);
+ __ Addu(scratch, number_string_cache, scratch);
+
+ // Check if the entry is the smi we are looking for.
+ Register probe = mask;
+ __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ __ Branch(not_found, ne, object, Operand(probe));
+
+ // Get the result from the cache.
+ __ bind(&load_result_from_cache);
+ __ lw(result,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+ __ IncrementCounter(isolate->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ __ lw(a1, MemOperand(sp, 0));
+
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
+ __ Addu(sp, sp, Operand(1 * kPointerSize));
+ __ Ret();
+
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
+}
+
+
+// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
+// On exit, v0 is 0, positive, or negative (smi) to indicate the result
+// of the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles;
+
+
+ if (include_smi_compare_) {
+ Label not_two_smis, smi_done;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, &not_two_smis);
+ __ sra(a1, a1, 1);
+ __ sra(a0, a0, 1);
+ __ Subu(v0, a1, a0);
+ __ Ret();
+ __ bind(&not_two_smis);
+ } else if (FLAG_debug_code) {
+ __ Or(a2, a1, a0);
+ __ And(a2, a2, kSmiTagMask);
+ __ Assert(ne, "CompareStub: unexpected smi operands.",
+ a2, Operand(zero_reg));
+ }
+
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+
+ // If either is a Smi (we know that not both are), then they can only
+ // be strictly equal if the other is a HeapNumber.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ And(t2, lhs_, Operand(rhs_));
+ __ JumpIfNotSmi(t2, &not_smis, t0);
+ // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+ // 1) Return the answer.
+ // 2) Go to slow.
+ // 3) Fall through to both_loaded_as_doubles.
+ // 4) Jump to rhs_not_nan.
+ // In cases 3 and 4 we have found out we were dealing with a number-number
+ // comparison and the numbers have been loaded into f12 and f14 as doubles,
+ // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
+ EmitSmiNonsmiComparison(masm, lhs_, rhs_,
+ &both_loaded_as_doubles, &slow, strict_);
+
+ __ bind(&both_loaded_as_doubles);
+ // f12, f14 are the double representations of the left hand side
+ // and the right hand side if we have FPU. Otherwise a2, a3 represent
+ // left hand side and a0, a1 represent right hand side.
+
+ Isolate* isolate = masm->isolate();
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ Label nan;
+ __ li(t0, Operand(LESS));
+ __ li(t1, Operand(GREATER));
+ __ li(t2, Operand(EQUAL));
+
+ // Check if either rhs or lhs is NaN.
+ __ c(UN, D, f12, f14);
+ __ bc1t(&nan);
+ __ nop();
+
+ // Check if LESS condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(OLT, D, f12, f14);
+ __ movt(v0, t0);
+ // Use previous check to store conditionally to v0 oposite condition
+ // (GREATER). If rhs is equal to lhs, this will be corrected in next
+ // check.
+ __ movf(v0, t1);
+ // Check if EQUAL condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(EQ, D, f12, f14);
+ __ movt(v0, t2);
+
+ __ Ret();
+
+ __ bind(&nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in v0 to make the comparison fail.
+ if (cc_ == lt || cc_ == le) {
+ __ li(v0, Operand(GREATER));
+ } else {
+ __ li(v0, Operand(LESS));
+ }
+ __ Ret();
+ } else {
+ // Checks for NaN in the doubles we have loaded. Can return the answer or
+ // fall through if neither is a NaN. Also binds rhs_not_nan.
+ EmitNanCheck(masm, cc_);
+
+ // Compares two doubles that are not NaNs. Returns the answer.
+ // Never falls through.
+ EmitTwoNonNanDoubleComparison(masm, cc_);
+ }
+
+ __ bind(&not_smis);
+ // At this point we know we are dealing with two different objects,
+ // and neither of them is a Smi. The objects are in lhs_ and rhs_.
+ if (strict_) {
+ // This returns non-equal for some object types, or falls through if it
+ // was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ }
+
+ Label check_for_symbols;
+ Label flat_string_check;
+ // Check for heap-number-heap-number comparison. Can jump to slow case,
+ // or load both doubles and jump to the code that handles
+ // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // In this case a2 will contain the type of lhs_.
+ EmitCheckForTwoHeapNumbers(masm,
+ lhs_,
+ rhs_,
+ &both_loaded_as_doubles,
+ &check_for_symbols,
+ &flat_string_check);
+
+ __ bind(&check_for_symbols);
+ if (cc_ == eq && !strict_) {
+ // Returns an answer for two symbols or two detectable objects.
+ // Otherwise jumps to string case or not both strings case.
+ // Assumes that a2 is the type of lhs_ on entry.
+ EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ }
+
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ bind(&flat_string_check);
+
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
+
+ __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
+ if (cc_ == eq) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm,
+ lhs_,
+ rhs_,
+ a2,
+ a3,
+ t0);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ lhs_,
+ rhs_,
+ a2,
+ a3,
+ t0,
+ t1);
+ }
+ // Never falls through to here.
+
+ __ bind(&slow);
+ // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
+ // a1 (rhs) second.
+ __ Push(lhs_, rhs_);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ if (cc_ == eq) {
+ native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result.
+ if (cc_ == lt || cc_ == le) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
+ ncr = LESS;
+ }
+ __ li(a0, Operand(Smi::FromInt(ncr)));
+ __ push(a0);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+}
+
+
+// The stub returns zero for false, and a non-zero value for true.
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ // This stub uses FPU instructions.
+ CpuFeatures::Scope scope(FPU);
+
+ Label false_result;
+ Label not_heap_number;
+ Register scratch0 = t5.is(tos_) ? t3 : t5;
+
+ // undefined -> false
+ __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+ // Boolean -> its value
+ __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
+ __ Branch(&false_result, eq, tos_, Operand(scratch0));
+ __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
+ // "tos_" is a register and contains a non-zero value. Hence we implicitly
+ // return true if the equal condition is satisfied.
+ __ Ret(eq, tos_, Operand(scratch0));
+
+ // Smis: 0 -> false, all other -> true
+ __ And(scratch0, tos_, tos_);
+ __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
+ __ And(scratch0, tos_, Operand(kSmiTagMask));
+ // "tos_" is a register and contains a non-zero value. Hence we implicitly
+ // return true if the not equal condition is satisfied.
+ __ Ret(eq, scratch0, Operand(zero_reg));
+
+ // 'null' -> false
+ __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
+ __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+ // HeapNumber => false if +0, -0, or NaN.
+ __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&not_heap_number, ne, scratch0, Operand(at));
+
+ __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+ __ fcmp(f12, 0.0, UEQ);
+
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ movt(tos_, zero_reg);
+ __ Ret();
+
+ __ bind(&not_heap_number);
+
+ // It can be an undetectable object.
+ // Undetectable => false.
+ __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
+ __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
+
+ // JavaScript object => true.
+ __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Check for string.
+ __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
+
+ // String value => false iff empty, i.e., length is zero.
+ __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ // If length is zero, "tos_" contains zero ==> false.
+ // If length is not zero, "tos_" contains a non-zero value ==> true.
+ __ Ret();
+
+ // Return 0 in "tos_" for false.
+ __ bind(&false_result);
+ __ mov(tos_, zero_reg);
+ __ Ret();
+}
+
+
+const char* UnaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name = NULL; // Make g++ happy.
+ switch (mode_) {
+ case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "UnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ UnaryOpIC::GetName(operand_type_));
+ return name_;
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operand_type_) {
+ case UnaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case UnaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case UnaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case UnaryOpIC::GENERIC:
+ GenerateGenericStub(masm);
+ break;
+ }
+}
+
+
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ // Argument is in a0 and v0 at this point, so we can overwrite a0.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ li(a2, Operand(Smi::FromInt(MinorKey())));
+ __ li(a1, Operand(Smi::FromInt(op_)));
+ __ li(a0, Operand(Smi::FromInt(operand_type_)));
+
+ __ Push(v0, a2, a1, a0);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
+ masm->isolate()),
+ 4,
+ 1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateSmiStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateSmiStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+ Label non_smi;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+ Label* non_smi,
+ Label* slow) {
+ __ JumpIfNotSmi(a0, non_smi);
+
+ // The result of negating zero or the smallest negative smi is not a smi.
+ __ And(t0, a0, ~0x80000000);
+ __ Branch(slow, eq, t0, Operand(zero_reg));
+
+ // Return '0 - value'.
+ __ Subu(v0, zero_reg, a0);
+ __ Ret();
+}
+
+
+void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+ Label* non_smi) {
+ __ JumpIfNotSmi(a0, non_smi);
+
+ // Flip bits and revert inverted smi-tag.
+ __ Neg(v0, a0);
+ __ And(v0, v0, ~kSmiTagMask);
+ __ Ret();
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateHeapNumberStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateHeapNumberStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+ Label non_smi, slow, call_builtin;
+ GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+ __ bind(&call_builtin);
+ GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+ Label* slow) {
+ EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
+ // a0 is a heap number. Get a new heap number in a1.
+ if (mode_ == UNARY_OVERWRITE) {
+ __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ } else {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(a0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(a1, v0);
+ __ pop(a0);
+ __ LeaveInternalFrame();
+
+ __ bind(&heapnumber_allocated);
+ __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
+ __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
+ __ mov(v0, a1);
+ }
+ __ Ret();
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(
+ MacroAssembler* masm,
+ Label* slow) {
+ Label impossible;
+
+ EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
+ // Convert the heap number in a0 to an untagged integer in a1.
+ __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
+
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ Neg(a1, a1);
+ __ Addu(a2, a1, Operand(0x40000000));
+ __ Branch(&try_float, lt, a2, Operand(zero_reg));
+
+ // Tag the result as a smi and we're done.
+ __ SmiTag(v0, a1);
+ __ Ret();
+
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (mode_ == UNARY_NO_OVERWRITE) {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ // Allocate a new heap number without zapping v0, which we need if it fails.
+ __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(v0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(a2, v0); // Move the new heap number into a2.
+ // Get the heap number into v0, now that the new heap number is in a2.
+ __ pop(v0);
+ __ LeaveInternalFrame();
+
+ // Convert the heap number in v0 to an untagged integer in a1.
+ // This can't go slow-case because it's the same number we already
+ // converted once again.
+ __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
+ // Negate the result.
+ __ Xor(a1, a1, -1);
+
+ __ bind(&heapnumber_allocated);
+ __ mov(v0, a2); // Move newly allocated heap number to v0.
+ }
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(a1, f0);
+ __ cvt_d_w(f0, f0);
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ } else {
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&impossible);
+ if (FLAG_debug_code) {
+ __ stop("Incorrect assumption in bit-not stub");
+ }
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateGenericStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateGenericStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericCodeFallback(
+ MacroAssembler* masm) {
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ push(a0);
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ __ Push(a1, a0);
+
+ __ li(a2, Operand(Smi::FromInt(MinorKey())));
+ __ li(a1, Operand(Smi::FromInt(op_)));
+ __ li(a0, Operand(Smi::FromInt(operands_type_)));
+ __ Push(a2, a1, a0);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+ masm->isolate()),
+ 5,
+ 1);
+}
+
+
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+ MacroAssembler* masm) {
+ UNIMPLEMENTED();
+}
+
+
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operands_type_) {
+ case BinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case BinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case BinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case BinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case BinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
+ case BinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
+ case BinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case BinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+const char* BinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
+ return name_;
+}
+
+
+
+void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
+ Register left = a1;
+ Register right = a0;
+
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+
+ ASSERT(right.is(a0));
+ STATIC_ASSERT(kSmiTag == 0);
+
+ Label not_smi_result;
+ switch (op_) {
+ case Token::ADD:
+ __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+ __ RetOnNoOverflow(scratch1);
+ // No need to revert anything - right and left are intact.
+ break;
+ case Token::SUB:
+ __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+ __ RetOnNoOverflow(scratch1);
+ // No need to revert anything - right and left are intact.
+ break;
+ case Token::MUL: {
+ // Remove tag from one of the operands. This way the multiplication result
+ // will be a smi if it fits the smi range.
+ __ SmiUntag(scratch1, right);
+ // Do multiplication.
+ // lo = lower 32 bits of scratch1 * left.
+ // hi = higher 32 bits of scratch1 * left.
+ __ Mult(left, scratch1);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ mflo(scratch1);
+ __ mfhi(scratch2);
+ __ sra(scratch1, scratch1, 31);
+ __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
+ // Go slow on zero result to handle -0.
+ __ mflo(v0);
+ __ Ret(ne, v0, Operand(zero_reg));
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ Addu(scratch2, right, left);
+ Label skip;
+ // ARM uses the 'pl' condition, which is 'ge'.
+ // Negating it results in 'lt'.
+ __ Branch(&skip, lt, scratch2, Operand(zero_reg));
+ ASSERT(Smi::FromInt(0) == 0);
+ __ mov(v0, zero_reg);
+ __ Ret(); // Return smi 0 if the non-zero one was positive.
+ __ bind(&skip);
+ // We fall through here if we multiplied a negative number with 0, because
+ // that would mean we should produce -0.
+ }
+ break;
+ case Token::DIV: {
+ Label done;
+ __ SmiUntag(scratch2, right);
+ __ SmiUntag(scratch1, left);
+ __ Div(scratch1, scratch2);
+ // A minor optimization: div may be calculated asynchronously, so we check
+ // for division by zero before getting the result.
+ __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
+ // If the result is 0, we need to make sure the dividsor (right) is
+ // positive, otherwise it is a -0 case.
+ // Quotient is in 'lo', remainder is in 'hi'.
+ // Check for no remainder first.
+ __ mfhi(scratch1);
+ __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
+ __ mflo(scratch1);
+ __ Branch(&done, ne, scratch1, Operand(zero_reg));
+ __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+ __ bind(&done);
+ // Check that the signed result fits in a Smi.
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+ __ SmiTag(v0, scratch1);
+ __ Ret();
+ }
+ break;
+ case Token::MOD: {
+ Label done;
+ __ SmiUntag(scratch2, right);
+ __ SmiUntag(scratch1, left);
+ __ Div(scratch1, scratch2);
+ // A minor optimization: div may be calculated asynchronously, so we check
+ // for division by 0 before calling mfhi.
+ // Check for zero on the right hand side.
+ __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
+ // If the result is 0, we need to make sure the dividend (left) is
+ // positive (or 0), otherwise it is a -0 case.
+ // Remainder is in 'hi'.
+ __ mfhi(scratch2);
+ __ Branch(&done, ne, scratch2, Operand(zero_reg));
+ __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+ __ bind(&done);
+ // Check that the signed result fits in a Smi.
+ __ Addu(scratch1, scratch2, Operand(0x40000000));
+ __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+ __ SmiTag(v0, scratch2);
+ __ Ret();
+ }
+ break;
+ case Token::BIT_OR:
+ __ Or(v0, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_AND:
+ __ And(v0, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_XOR:
+ __ Xor(v0, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::SAR:
+ // Remove tags from right operand.
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ srav(scratch1, left, scratch1);
+ // Smi tag result.
+ __ And(v0, scratch1, Operand(~kSmiTagMask));
+ __ Ret();
+ break;
+ case Token::SHR:
+ // Remove tags from operands. We can't do this on a 31 bit number
+ // because then the 0s get shifted into bit 30 instead of bit 31.
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ srlv(v0, scratch1, scratch2);
+ // Unsigned shift is not allowed to produce a negative number, so
+ // check the sign bit and the sign bit after Smi tagging.
+ __ And(scratch1, v0, Operand(0xc0000000));
+ __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
+ // Smi tag result.
+ __ SmiTag(v0);
+ __ Ret();
+ break;
+ case Token::SHL:
+ // Remove tags from operands.
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ sllv(scratch1, scratch1, scratch2);
+ // Check that the signed result fits in a Smi.
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+ __ SmiTag(v0, scratch1);
+ __ Ret();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&not_smi_result);
+}
+
+
+void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required) {
+ Register left = a1;
+ Register right = a0;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+ Register scratch3 = t0;
+
+ ASSERT(smi_operands || (not_numbers != NULL));
+ if (smi_operands && FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+
+ Register heap_number_map = t2;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
+ // depending on whether FPU is available or not.
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(FPU) &&
+ op_ != Token::MOD ?
+ FloatingPointHelper::kFPURegisters :
+ FloatingPointHelper::kCoreRegisters;
+
+ // Allocate new heap number for result.
+ Register result = s0;
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
+
+ // Load the operands.
+ if (smi_operands) {
+ FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+ } else {
+ FloatingPointHelper::LoadOperands(masm,
+ destination,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ not_numbers);
+ }
+
+ // Calculate the result.
+ if (destination == FloatingPointHelper::kFPURegisters) {
+ // Using FPU registers:
+ // f12: Left value.
+ // f14: Right value.
+ CpuFeatures::Scope scope(FPU);
+ switch (op_) {
+ case Token::ADD:
+ __ add_d(f10, f12, f14);
+ break;
+ case Token::SUB:
+ __ sub_d(f10, f12, f14);
+ break;
+ case Token::MUL:
+ __ mul_d(f10, f12, f14);
+ break;
+ case Token::DIV:
+ __ div_d(f10, f12, f14);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into sdc1 so
+ // there's no point in generating even more instructions.
+ __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
+ __ mov(v0, result);
+ __ Ret();
+ } else {
+ // Call the C function to handle the double operation.
+ FloatingPointHelper::CallCCodeForDoubleOperation(masm,
+ op_,
+ result,
+ scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
+ }
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ if (smi_operands) {
+ __ SmiUntag(a3, left);
+ __ SmiUntag(a2, right);
+ } else {
+ // Convert operands to 32-bit integers. Right in a2 and left in a3.
+ FloatingPointHelper::ConvertNumberToInt32(masm,
+ left,
+ a3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ f0,
+ not_numbers);
+ FloatingPointHelper::ConvertNumberToInt32(masm,
+ right,
+ a2,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ f0,
+ not_numbers);
+ }
+ Label result_not_a_smi;
+ switch (op_) {
+ case Token::BIT_OR:
+ __ Or(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_XOR:
+ __ Xor(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_AND:
+ __ And(a2, a3, Operand(a2));
+ break;
+ case Token::SAR:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(a2, a2, 5);
+ __ srav(a2, a3, a2);
+ break;
+ case Token::SHR:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(a2, a2, 5);
+ __ srlv(a2, a3, a2);
+ // SHR is special because it is required to produce a positive answer.
+ // The code below for writing into heap numbers isn't capable of
+ // writing the register as an unsigned int so we go to slow case if we
+ // hit this case.
+ if (CpuFeatures::IsSupported(FPU)) {
+ __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
+ } else {
+ __ Branch(not_numbers, lt, a2, Operand(zero_reg));
+ }
+ break;
+ case Token::SHL:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(a2, a2, 5);
+ __ sllv(a2, a3, a2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // Check that the *signed* result fits in a smi.
+ __ Addu(a3, a2, Operand(0x40000000));
+ __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
+ __ SmiTag(v0, a2);
+ __ Ret();
+
+ // Allocate new heap number for result.
+ __ bind(&result_not_a_smi);
+ Register result = t1;
+ if (smi_operands) {
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ } else {
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ }
+
+ // a2: Answer as signed int32.
+ // t1: Heap number to write answer into.
+
+ // Nothing can go wrong now, so move the heap number to v0, which is the
+ // result.
+ __ mov(v0, t1);
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ // Convert the int32 in a2 to the heap number in a0. As
+ // mentioned above SHR needs to always produce a positive result.
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(a2, f0);
+ if (op_ == Token::SHR) {
+ __ Cvt_d_uw(f0, f0);
+ } else {
+ __ cvt_d_w(f0, f0);
+ }
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into sdc1 so
+ // there's no point in generating even more instructions.
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in a2 to the heap number in v0, using
+ // a3 and a0 as scratch. v0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
+ __ TailCallStub(&stub);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+// Generate the smi code. If the operation on smis are successful this return is
+// generated. If the result is not a smi and heap number allocation is not
+// requested the code falls through. If number allocation is requested but a
+// heap number cannot be allocated the code jumps to the lable gc_required.
+void BinaryOpStub::GenerateSmiCode(
+ MacroAssembler* masm,
+ Label* use_runtime,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ Label not_smis;
+
+ Register left = a1;
+ Register right = a0;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+
+ // Perform combined smi check on both operands.
+ __ Or(scratch1, left, Operand(right));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(scratch1, &not_smis);
+
+ // If the smi-smi operation results in a smi return is generated.
+ GenerateSmiSmiOperation(masm);
+
+ // If heap number results are possible generate the result in an allocated
+ // heap number.
+ if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+ GenerateFPOperation(masm, true, use_runtime, gc_required);
+ }
+ __ bind(&not_smis);
+}
+
+
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label not_smis, call_runtime;
+
+ if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+ result_type_ == BinaryOpIC::SMI) {
+ // Only allow smi results.
+ GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
+ } else {
+ // Allow heap number result and don't make a transition if a heap number
+ // cannot be allocated.
+ GenerateSmiCode(masm,
+ &call_runtime,
+ &call_runtime,
+ ALLOW_HEAPNUMBER_RESULTS);
+ }
+
+ // Code falls through if the result is not returned as either a smi or heap
+ // number.
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::STRING);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // BinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ GetObjectType(left, a2, a2);
+ __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ // Test if right operand is a string.
+ __ JumpIfSmi(right, &call_runtime);
+ __ GetObjectType(right, a2, a2);
+ __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
+ GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::INT32);
+
+ Register left = a1;
+ Register right = a0;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+ FPURegister double_scratch = f0;
+ FPURegister single_scratch = f6;
+
+ Register heap_number_result = no_reg;
+ Register heap_number_map = t2;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ Label call_runtime;
+ // Labels for type transition, used for wrong input or output types.
+ // Both label are currently actually bound to the same position. We use two
+ // different label to differentiate the cause leading to type transition.
+ Label transition;
+
+ // Smi-smi fast case.
+ Label skip;
+ __ Or(scratch1, left, right);
+ __ JumpIfNotSmi(scratch1, &skip);
+ GenerateSmiSmiOperation(masm);
+ // Fall through if the result is not a smi.
+ __ bind(&skip);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Load both operands and check that they are 32-bit integer.
+ // Jump to type transition if they are not. The registers a0 and a1 (right
+ // and left) are preserved for the runtime call.
+ FloatingPointHelper::Destination destination =
+ (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
+ ? FloatingPointHelper::kFPURegisters
+ : FloatingPointHelper::kCoreRegisters;
+
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ right,
+ destination,
+ f14,
+ a2,
+ a3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ f2,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ left,
+ destination,
+ f12,
+ t0,
+ t1,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ f2,
+ &transition);
+
+ if (destination == FloatingPointHelper::kFPURegisters) {
+ CpuFeatures::Scope scope(FPU);
+ Label return_heap_number;
+ switch (op_) {
+ case Token::ADD:
+ __ add_d(f10, f12, f14);
+ break;
+ case Token::SUB:
+ __ sub_d(f10, f12, f14);
+ break;
+ case Token::MUL:
+ __ mul_d(f10, f12, f14);
+ break;
+ case Token::DIV:
+ __ div_d(f10, f12, f14);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (op_ != Token::DIV) {
+ // These operations produce an integer result.
+ // Try to return a smi if we can.
+ // Otherwise return a heap number if allowed, or jump to type
+ // transition.
+
+ // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+ // On MIPS a lot of things cannot be implemented the same way so right
+ // now it makes a lot more sense to just do things manually.
+
+ // Save FCSR.
+ __ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+ __ trunc_w_d(single_scratch, f10);
+ // Retrieve FCSR.
+ __ cfc1(scratch2, FCSR);
+ // Restore FCSR.
+ __ ctc1(scratch1, FCSR);
+
+ // Check for inexact conversion or exception.
+ __ And(scratch2, scratch2, kFCSRFlagMask);
+
+ if (result_type_ <= BinaryOpIC::INT32) {
+ // If scratch2 != 0, result does not fit in a 32-bit integer.
+ __ Branch(&transition, ne, scratch2, Operand(zero_reg));
+ }
+
+ // Check if the result fits in a smi.
+ __ mfc1(scratch1, single_scratch);
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ // If not try to return a heap number.
+ __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
+ // Check for minus zero. Return heap number for minus zero.
+ Label not_zero;
+ __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
+ __ mfc1(scratch2, f11);
+ __ And(scratch2, scratch2, HeapNumber::kSignMask);
+ __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
+ __ bind(&not_zero);
+
+ // Tag the result and return.
+ __ SmiTag(v0, scratch1);
+ __ Ret();
+ } else {
+ // DIV just falls through to allocating a heap number.
+ }
+
+ __ bind(&return_heap_number);
+ // Return a heap number, or fall through to type transition or runtime
+ // call if we can't.
+ if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+ : BinaryOpIC::INT32)) {
+ // We are using FPU registers so s0 is available.
+ heap_number_result = s0;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+ __ mov(v0, heap_number_result);
+ __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ }
+
+ // A DIV operation expecting an integer result falls through
+ // to type transition.
+
+ } else {
+ // We preserved a0 and a1 to be able to call runtime.
+ // Save the left value on the stack.
+ __ Push(t1, t0);
+
+ Label pop_and_call_runtime;
+
+ // Allocate a heap number to store the result.
+ heap_number_result = s0;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime);
+
+ // Load the left value from the value saved on the stack.
+ __ Pop(a1, a0);
+
+ // Call the C function to handle the double operation.
+ FloatingPointHelper::CallCCodeForDoubleOperation(
+ masm, op_, heap_number_result, scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
+
+ __ bind(&pop_and_call_runtime);
+ __ Drop(2);
+ __ Branch(&call_runtime);
+ }
+
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ Label return_heap_number;
+ Register scratch3 = t1;
+ // Convert operands to 32-bit integers. Right in a2 and left in a3. The
+ // registers a0 and a1 (right and left) are preserved for the runtime
+ // call.
+ FloatingPointHelper::LoadNumberAsInt32(masm,
+ left,
+ a3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ f0,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32(masm,
+ right,
+ a2,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ f0,
+ &transition);
+
+ // The ECMA-262 standard specifies that, for shift operations, only the
+ // 5 least significant bits of the shift value should be used.
+ switch (op_) {
+ case Token::BIT_OR:
+ __ Or(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_XOR:
+ __ Xor(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_AND:
+ __ And(a2, a3, Operand(a2));
+ break;
+ case Token::SAR:
+ __ And(a2, a2, Operand(0x1f));
+ __ srav(a2, a3, a2);
+ break;
+ case Token::SHR:
+ __ And(a2, a2, Operand(0x1f));
+ __ srlv(a2, a3, a2);
+ // SHR is special because it is required to produce a positive answer.
+ // We only get a negative result if the shift value (a2) is 0.
+ // This result cannot be respresented as a signed 32-bit integer, try
+ // to return a heap number if we can.
+ // The non FPU code does not support this special case, so jump to
+ // runtime if we don't support it.
+ if (CpuFeatures::IsSupported(FPU)) {
+ __ Branch((result_type_ <= BinaryOpIC::INT32)
+ ? &transition
+ : &return_heap_number,
+ lt,
+ a2,
+ Operand(zero_reg));
+ } else {
+ __ Branch((result_type_ <= BinaryOpIC::INT32)
+ ? &transition
+ : &call_runtime,
+ lt,
+ a2,
+ Operand(zero_reg));
+ }
+ break;
+ case Token::SHL:
+ __ And(a2, a2, Operand(0x1f));
+ __ sllv(a2, a3, a2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Check if the result fits in a smi.
+ __ Addu(scratch1, a2, Operand(0x40000000));
+ // If not try to return a heap number. (We know the result is an int32.)
+ __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
+ // Tag the result and return.
+ __ SmiTag(v0, a2);
+ __ Ret();
+
+ __ bind(&return_heap_number);
+ heap_number_result = t1;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ if (op_ != Token::SHR) {
+ // Convert the result to a floating point value.
+ __ mtc1(a2, double_scratch);
+ __ cvt_d_w(double_scratch, double_scratch);
+ } else {
+ // The result must be interpreted as an unsigned 32-bit integer.
+ __ mtc1(a2, double_scratch);
+ __ Cvt_d_uw(double_scratch, double_scratch);
+ }
+
+ // Store the result.
+ __ mov(v0, heap_number_result);
+ __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in a2 to the heap number in v0, using
+ // a3 and a1 as scratch. v0 is preserved and returned.
+ __ mov(a0, t1);
+ WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
+ __ TailCallStub(&stub);
+ }
+
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+
+ // We never expect DIV to yield an integer result, so we always generate
+ // type transition code for DIV operations expecting an integer result: the
+ // code will fall through to this type transition.
+ if (transition.is_linked() ||
+ ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
+ }
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (op_ == Token::ADD) {
+ // Handle string addition here, because it is the only operation
+ // that does not do a ToNumber conversion on the operands.
+ GenerateAddStrings(masm);
+ }
+
+ // Convert oddball arguments to numbers.
+ Label check, done;
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&check, ne, a1, Operand(t0));
+ if (Token::IsBitOp(op_)) {
+ __ li(a1, Operand(Smi::FromInt(0)));
+ } else {
+ __ LoadRoot(a1, Heap::kNanValueRootIndex);
+ }
+ __ jmp(&done);
+ __ bind(&check);
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done, ne, a0, Operand(t0));
+ if (Token::IsBitOp(op_)) {
+ __ li(a0, Operand(Smi::FromInt(0)));
+ } else {
+ __ LoadRoot(a0, Heap::kNanValueRootIndex);
+ }
+ __ bind(&done);
+
+ GenerateHeapNumberStub(masm);
+}
+
+
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ Label call_runtime;
+ GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+ Label call_runtime, call_string_add_or_runtime;
+
+ GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+ GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
+
+ __ bind(&call_string_add_or_runtime);
+ if (op_ == Token::ADD) {
+ GenerateAddStrings(masm);
+ }
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD);
+ Label left_not_string, call_runtime;
+
+ Register left = a1;
+ Register right = a0;
+
+ // Check if left argument is a string.
+ __ JumpIfSmi(left, &left_not_string);
+ __ GetObjectType(left, a2, a2);
+ __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
+
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
+ __ JumpIfSmi(right, &call_runtime);
+ __ GetObjectType(right, a2, a2);
+ __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_right_stub);
+
+ // At least one argument is not a string.
+ __ bind(&call_runtime);
+}
+
+
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::GenerateHeapResultAllocation(
+ MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+
+ // Code below will scratch result if allocation fails. To keep both arguments
+ // intact for the runtime call result cannot be one of these.
+ ASSERT(!result.is(a0) && !result.is(a1));
+
+ if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+ Label skip_allocation, allocated;
+ Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
+ // If the overwritable operand is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
+ // Allocate a heap number for the result.
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ __ Branch(&allocated);
+ __ bind(&skip_allocation);
+ // Use object holding the overwritable operand for result.
+ __ mov(result, overwritable_operand);
+ __ bind(&allocated);
+ } else {
+ ASSERT(mode_ == NO_OVERWRITE);
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ }
+}
+
+
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ Push(a1, a0);
+}
+
+
+
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Untagged case: double input in f4, double result goes
+ // into f4.
+ // Tagged case: tagged input on top of stack and in a0,
+ // tagged result (heap number) goes into v0.
+
+ Label input_not_smi;
+ Label loaded;
+ Label calculate;
+ Label invalid_cache;
+ const Register scratch0 = t5;
+ const Register scratch1 = t3;
+ const Register cache_entry = a0;
+ const bool tagged = (argument_type_ == TAGGED);
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ if (tagged) {
+ // Argument is a number and is on stack and in a0.
+ // Load argument and check if it is a smi.
+ __ JumpIfNotSmi(a0, &input_not_smi);
+
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into a2, a3.
+ __ sra(t0, a0, kSmiTagSize);
+ __ mtc1(t0, f4);
+ __ cvt_d_w(f4, f4);
+ __ Move(a2, a3, f4);
+ __ Branch(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(a0,
+ a1,
+ Heap::kHeapNumberMapRootIndex,
+ &calculate,
+ DONT_DO_SMI_CHECK);
+ // Input is a HeapNumber. Store the
+ // low and high words into a2, a3.
+ __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
+ __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
+ } else {
+ // Input is untagged double in f4. Output goes to f4.
+ __ Move(a2, a3, f4);
+ }
+ __ bind(&loaded);
+ // a2 = low 32 bits of double value.
+ // a3 = high 32 bits of double value.
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ Xor(a1, a2, a3);
+ __ sra(t0, a1, 16);
+ __ Xor(a1, a1, t0);
+ __ sra(t0, a1, 8);
+ __ Xor(a1, a1, t0);
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
+
+ // a2 = low 32 bits of double value.
+ // a3 = high 32 bits of double value.
+ // a1 = TranscendentalCache::hash(double value).
+ __ li(cache_entry, Operand(
+ ExternalReference::transcendental_cache_array_address(
+ masm->isolate())));
+ // a0 points to cache array.
+ __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
+ Isolate::Current()->transcendental_cache()->caches_[0])));
+ // a0 points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
+
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::SubCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
+#endif
+
+ // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
+ __ sll(t0, a1, 1);
+ __ Addu(a1, a1, t0);
+ __ sll(t0, a1, 2);
+ __ Addu(cache_entry, cache_entry, t0);
+
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ __ lw(t0, MemOperand(cache_entry, 0));
+ __ lw(t1, MemOperand(cache_entry, 4));
+ __ lw(t2, MemOperand(cache_entry, 8));
+ __ Addu(cache_entry, cache_entry, 12);
+ __ Branch(&calculate, ne, a2, Operand(t0));
+ __ Branch(&calculate, ne, a3, Operand(t1));
+ // Cache hit. Load result, cleanup and return.
+ if (tagged) {
+ // Pop input value from stack and load result into v0.
+ __ Drop(1);
+ __ mov(v0, t2);
+ } else {
+ // Load result into f4.
+ __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+ }
+ __ Ret();
+ } // if (CpuFeatures::IsSupported(FPU))
+
+ __ bind(&calculate);
+ if (tagged) {
+ __ bind(&invalid_cache);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
+ masm->isolate()),
+ 1,
+ 1);
+ } else {
+ if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
+ CpuFeatures::Scope scope(FPU);
+
+ Label no_update;
+ Label skip_cache;
+ const Register heap_number_map = t2;
+
+ // Call C function to calculate the result and update the cache.
+ // Register a0 holds precalculated cache entry address; preserve
+ // it on the stack and pop it into register cache_entry after the
+ // call.
+ __ push(cache_entry);
+ GenerateCallCFunction(masm, scratch0);
+ __ GetCFunctionDoubleResult(f4);
+
+ // Try to update the cache. If we cannot allocate a
+ // heap number, we return the result without updating.
+ __ pop(cache_entry);
+ __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
+ __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+
+ __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
+ __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
+ __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
+
+ __ mov(v0, cache_entry);
+ __ Ret();
+
+ __ bind(&invalid_cache);
+ // The cache is invalid. Call runtime which will recreate the
+ // cache.
+ __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
+ __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
+ __ EnterInternalFrame();
+ __ push(a0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ __ LeaveInternalFrame();
+ __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+
+ __ bind(&skip_cache);
+ // Call C function to calculate the result and answer directly
+ // without updating the cache.
+ GenerateCallCFunction(masm, scratch0);
+ __ GetCFunctionDoubleResult(f4);
+ __ bind(&no_update);
+
+ // We return the value in f4 without adding it to the cache, but
+ // we cause a scavenging GC so that future allocations will succeed.
+ __ EnterInternalFrame();
+
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ li(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ __ LeaveInternalFrame();
+ __ Ret();
+ }
+}
+
+
+void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
+ Register scratch) {
+ __ push(ra);
+ __ PrepareCallCFunction(2, scratch);
+ if (IsMipsSoftFloatABI) {
+ __ Move(v0, v1, f4);
+ } else {
+ __ mov_d(f12, f4);
+ }
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ CallCFunction(
+ ExternalReference::math_sin_double_function(masm->isolate()), 2);
+ break;
+ case TranscendentalCache::COS:
+ __ CallCFunction(
+ ExternalReference::math_cos_double_function(masm->isolate()), 2);
+ break;
+ case TranscendentalCache::LOG:
+ __ CallCFunction(
+ ExternalReference::math_log_double_function(masm->isolate()), 2);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ __ pop(ra);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ case TranscendentalCache::LOG: return Runtime::kMath_log;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ Label base_not_smi;
+ Label exponent_not_smi;
+ Label convert_exponent;
+
+ const Register base = a0;
+ const Register exponent = a2;
+ const Register heapnumbermap = t1;
+ const Register heapnumber = s0; // Callee-saved register.
+ const Register scratch = t2;
+ const Register scratch2 = t3;
+
+ // Alocate FP values in the ABI-parameter-passing regs.
+ const DoubleRegister double_base = f12;
+ const DoubleRegister double_exponent = f14;
+ const DoubleRegister double_result = f0;
+ const DoubleRegister double_scratch = f2;
+
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+ __ lw(base, MemOperand(sp, 1 * kPointerSize));
+ __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
+
+ // Convert base to double value and store it in f0.
+ __ JumpIfNotSmi(base, &base_not_smi);
+ // Base is a Smi. Untag and convert it.
+ __ SmiUntag(base);
+ __ mtc1(base, double_scratch);
+ __ cvt_d_w(double_base, double_scratch);
+ __ Branch(&convert_exponent);
+
+ __ bind(&base_not_smi);
+ __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+ __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+ // Base is a heapnumber. Load it into double register.
+ __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+
+ __ bind(&convert_exponent);
+ __ JumpIfNotSmi(exponent, &exponent_not_smi);
+ __ SmiUntag(exponent);
+
+ // The base is in a double register and the exponent is
+ // an untagged smi. Allocate a heap number and call a
+ // C function for integer exponents. The register containing
+ // the heap number is callee-saved.
+ __ AllocateHeapNumber(heapnumber,
+ scratch,
+ scratch2,
+ heapnumbermap,
+ &call_runtime);
+ __ push(ra);
+ __ PrepareCallCFunction(3, scratch);
+ __ SetCallCDoubleArguments(double_base, exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()), 3);
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+ __ sdc1(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ __ mov(v0, heapnumber);
+ __ DropAndRet(2 * kPointerSize);
+
+ __ bind(&exponent_not_smi);
+ __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+ __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+ // Exponent is a heapnumber. Load it into double register.
+ __ ldc1(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+
+ // The base and the exponent are in double registers.
+ // Allocate a heap number and call a C function for
+ // double exponents. The register containing
+ // the heap number is callee-saved.
+ __ AllocateHeapNumber(heapnumber,
+ scratch,
+ scratch2,
+ heapnumbermap,
+ &call_runtime);
+ __ push(ra);
+ __ PrepareCallCFunction(4, scratch);
+ // ABI (o32) for func(double a, double b): a in f12, b in f14.
+ ASSERT(double_base.is(f12));
+ ASSERT(double_exponent.is(f14));
+ __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()), 4);
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+ __ sdc1(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ __ mov(v0, heapnumber);
+ __ DropAndRet(2 * kPointerSize);
+ }
+
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+ return true;
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ __ Throw(v0);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ __ ThrowUncatchable(type, v0);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate) {
+ // v0: result parameter for PerformGC, if any
+ // s0: number of arguments including receiver (C callee-saved)
+ // s1: pointer to the first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ if (do_gc) {
+ // Move result passed in v0 into a0 to call PerformGC.
+ __ mov(a0, v0);
+ __ PrepareCallCFunction(1, a1);
+ __ CallCFunction(
+ ExternalReference::perform_gc_function(masm->isolate()), 1);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
+ if (always_allocate) {
+ __ li(a0, Operand(scope_depth));
+ __ lw(a1, MemOperand(a0));
+ __ Addu(a1, a1, Operand(1));
+ __ sw(a1, MemOperand(a0));
+ }
+
+ // Prepare arguments for C routine: a0 = argc, a1 = argv
+ __ mov(a0, s0);
+ __ mov(a1, s1);
+
+ // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
+ // also need to reserve the 4 argument slots on the stack.
+
+ __ AssertStackIsAligned();
+
+ __ li(a2, Operand(ExternalReference::isolate_address()));
+
+ // To let the GC traverse the return address of the exit frames, we need to
+ // know where the return address is. The CEntryStub is unmovable, so
+ // we can store the address on the stack to be able to find it again and
+ // we never have to restore it, because it will not change.
+ { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+ // This branch-and-link sequence is needed to find the current PC on mips,
+ // saved to the ra register.
+ // Use masm-> here instead of the double-underscore macro since extra
+ // coverage code can interfere with the proper calculation of ra.
+ Label find_ra;
+ masm->bal(&find_ra); // bal exposes branch delay slot.
+ masm->nop(); // Branch delay slot nop.
+ masm->bind(&find_ra);
+
+ // Adjust the value in ra to point to the correct return location, 2nd
+ // instruction past the real call into C code (the jalr(t9)), and push it.
+ // This is the return address of the exit frame.
+ const int kNumInstructionsToJump = 6;
+ masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
+ masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
+ masm->Subu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
+ // Stack is still aligned.
+
+ // Call the C routine.
+ masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
+ masm->jalr(t9);
+ masm->nop(); // Branch delay slot nop.
+ // Make sure the stored 'ra' points to this position.
+ ASSERT_EQ(kNumInstructionsToJump,
+ masm->InstructionsGeneratedSince(&find_ra));
+ }
+
+ // Restore stack (remove arg slots).
+ __ Addu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
+
+ if (always_allocate) {
+ // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
+ __ li(a2, Operand(scope_depth));
+ __ lw(a3, MemOperand(a2));
+ __ Subu(a3, a3, Operand(1));
+ __ sw(a3, MemOperand(a2));
+ }
+
+ // Check for failure result.
+ Label failure_returned;
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ __ addiu(a2, v0, 1);
+ __ andi(t0, a2, kFailureTagMask);
+ __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
+
+ // Exit C frame and return.
+ // v0:v1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ __ LeaveExitFrame(save_doubles_, s0);
+ __ Ret();
+
+ // Check if we should retry or throw exception.
+ Label retry;
+ __ bind(&failure_returned);
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
+ __ Branch(&retry, eq, t0, Operand(zero_reg));
+
+ // Special handling of out of memory exceptions.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ Branch(throw_out_of_memory_exception, eq,
+ v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+
+ // Retrieve the pending exception and clear the variable.
+ __ li(t0,
+ Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+ __ lw(a3, MemOperand(t0));
+ __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ masm->isolate())));
+ __ lw(v0, MemOperand(t0));
+ __ sw(a3, MemOperand(t0));
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ Branch(throw_termination_exception, eq,
+ v0, Operand(masm->isolate()->factory()->termination_exception()));
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ __ bind(&retry);
+ // Last failure (v0) will be moved to (a0) for parameter when retrying.
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // a0: number of arguments including receiver
+ // a1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ // Compute the argv pointer in a callee-saved register.
+ __ sll(s1, a0, kPointerSizeLog2);
+ __ Addu(s1, sp, s1);
+ __ Subu(s1, s1, Operand(kPointerSize));
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame(save_doubles_);
+
+ // Setup argc and the builtin function in callee-saved registers.
+ __ mov(s0, a0);
+ __ mov(s2, a1);
+
+ // s0: number of arguments (C callee-saved)
+ // s1: pointer to first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, exit;
+
+ // Registers:
+ // a0: entry address
+ // a1: function
+ // a2: reveiver
+ // a3: argc
+ //
+ // Stack:
+ // 4 args slots
+ // args
+
+ // Save callee saved registers on the stack.
+ __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
+
+ // Load argv in s0 register.
+ __ lw(s0, MemOperand(sp, kNumCalleeSaved * kPointerSize +
+ StandardFrameConstants::kCArgsSlotsSize));
+
+ // We build an EntryFrame.
+ __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ li(t2, Operand(Smi::FromInt(marker)));
+ __ li(t1, Operand(Smi::FromInt(marker)));
+ __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
+ masm->isolate())));
+ __ lw(t0, MemOperand(t0));
+ __ Push(t3, t2, t1, t0);
+ // Setup frame pointer for the frame to be pushed.
+ __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: reveiver_pointer
+ // a3: argc
+ // s0: argv
+ //
+ // Stack:
+ // caller fp |
+ // function slot | entry frame
+ // context slot |
+ // bad fp (0xff...f) |
+ // callee saved registers + ra
+ // 4 args slots
+ // args
+
+ #ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
+ masm->isolate());
+ __ li(t1, Operand(ExternalReference(js_entry_sp)));
+ __ lw(t2, MemOperand(t1));
+ __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
+ __ sw(fp, MemOperand(t1));
+ __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ Label cont;
+ __ b(&cont);
+ __ nop(); // Branch delay slot nop.
+ __ bind(&non_outermost_js);
+ __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+ __ bind(&cont);
+ __ push(t0);
+ #endif
+
+ // Call a faked try-block that does the invoke.
+ __ bal(&invoke); // bal exposes branch delay slot.
+ __ nop(); // Branch delay slot nop.
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ // Coming in here the fp will be invalid because the PushTryHandler below
+ // sets it to 0 to signal the existence of the JSEntry frame.
+ __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ masm->isolate())));
+ __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
+ __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ b(&exit); // b exposes branch delay slot.
+ __ nop(); // Branch delay slot nop.
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bal(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ li(t0,
+ Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+ __ lw(t1, MemOperand(t0));
+ __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ masm->isolate())));
+ __ sw(t1, MemOperand(t0));
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: reveiver_pointer
+ // a3: argc
+ // s0: argv
+ //
+ // Stack:
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // 4 args slots
+ // args
+
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+ masm->isolate());
+ __ li(t0, Operand(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
+ __ li(t0, Operand(entry));
+ }
+ __ lw(t9, MemOperand(t0)); // Deref address.
+
+ // Call JSEntryTrampoline.
+ __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(t9);
+
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
+
+ __ bind(&exit); // v0 holds result
+ #ifdef ENABLE_LOGGING_AND_PROFILING
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(t1);
+ __ Branch(&non_outermost_js_2, ne, t1,
+ Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ li(t1, Operand(ExternalReference(js_entry_sp)));
+ __ sw(zero_reg, MemOperand(t1));
+ __ bind(&non_outermost_js_2);
+ #endif
+
+ // Restore the top frame descriptors from the stack.
+ __ pop(t1);
+ __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
+ masm->isolate())));
+ __ sw(t1, MemOperand(t0));
+
+ // Reset the stack to the callee saved registers.
+ __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Restore callee saved registers from the stack.
+ __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
+ // Return.
+ __ Jump(ra);
+}
+
+
+// Uses registers a0 to t0.
+// Expected input (depending on whether args are in registers or on the stack):
+// * object: a0 or at sp + 1 * kPointerSize.
+// * function: a1 or at sp.
+//
+// Inlined call site patching is a crankshaft-specific feature that is not
+// implemented on MIPS.
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // This is a crankshaft-specific feature that has not been implemented yet.
+ ASSERT(!HasCallSiteInlineCheck());
+ // Call site inlining and patching implies arguments in registers.
+ ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+ // ReturnTrueFalse is only implemented for inlined call sites.
+ ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+
+ // Fixed register usage throughout the stub:
+ const Register object = a0; // Object (lhs).
+ Register map = a3; // Map of the object.
+ const Register function = a1; // Function (rhs).
+ const Register prototype = t0; // Prototype of the function.
+ const Register inline_site = t5;
+ const Register scratch = a2;
+
+ Label slow, loop, is_instance, is_not_instance, not_js_object;
+
+ if (!HasArgsInRegisters()) {
+ __ lw(object, MemOperand(sp, 1 * kPointerSize));
+ __ lw(function, MemOperand(sp, 0));
+ }
+
+ // Check that the left hand is a JS object and load map.
+ __ JumpIfSmi(object, &not_js_object);
+ __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
+
+ // If there is a call site cache don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ Label miss;
+ __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
+ __ Branch(&miss, ne, function, Operand(t1));
+ __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
+ __ Branch(&miss, ne, map, Operand(t1));
+ __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&miss);
+ }
+
+ // Get the prototype of the function.
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(prototype, &slow);
+ __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (!HasCallSiteInlineCheck()) {
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ } else {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ // Register mapping: a3 is object map and t0 is function prototype.
+ // Get prototype of object into a2.
+ __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+
+ // We don't need map any more. Use it as a scratch register.
+ Register scratch2 = map;
+ map = no_reg;
+
+ // Loop through the prototype chain looking for the function prototype.
+ __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
+ __ bind(&loop);
+ __ Branch(&is_instance, eq, scratch, Operand(prototype));
+ __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
+ __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
+ __ Branch(&loop);
+
+ __ bind(&is_instance);
+ ASSERT(Smi::FromInt(0) == 0);
+ if (!HasCallSiteInlineCheck()) {
+ __ mov(v0, zero_reg);
+ __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ UNIMPLEMENTED_MIPS();
+ }
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&is_not_instance);
+ if (!HasCallSiteInlineCheck()) {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ UNIMPLEMENTED_MIPS();
+ }
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ Label object_not_null, object_not_null_or_smi;
+ __ bind(&not_js_object);
+ // Before null, smi and string value checks, check that the rhs is a function
+ // as for a non-function rhs an exception needs to be thrown.
+ __ JumpIfSmi(function, &slow);
+ __ GetObjectType(function, scratch2, scratch);
+ __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+ // Null is not instance of anything.
+ __ Branch(&object_not_null, ne, scratch,
+ Operand(masm->isolate()->factory()->null_value()));
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&object_not_null);
+ // Smi values are not instances of anything.
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&object_not_null_or_smi);
+ // String values are not instances of anything.
+ __ IsObjectJSStringType(object, scratch, &slow);
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ // Slow-case. Tail call builtin.
+ __ bind(&slow);
+ if (!ReturnTrueFalseObject()) {
+ if (HasArgsInRegisters()) {
+ __ Push(a0, a1);
+ }
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+ } else {
+ __ EnterInternalFrame();
+ __ Push(a0, a1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ mov(a0, v0);
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+ }
+}
+
+
+Register InstanceofStub::left() { return a0; }
+
+
+Register InstanceofStub::right() { return a1; }
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The displacement is the offset of the last parameter (if any)
+ // relative to the frame pointer.
+ static const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+ // Check that the key is a smiGenerateReadElement.
+ Label slow;
+ __ JumpIfNotSmi(a1, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor,
+ eq,
+ a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Check index (a1) against formal parameters count limit passed in
+ // through register a0. Use unsigned comparison to get negative
+ // check for free.
+ __ Branch(&slow, hs, a1, Operand(a0));
+
+ // Read the argument from the stack and return it.
+ __ subu(a3, a0, a1);
+ __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, fp, Operand(t3));
+ __ lw(v0, MemOperand(a3, kDisplacement));
+ __ Ret();
+
+ // Arguments adaptor case: Check index (a1) against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
+
+ // Read the argument from the adaptor frame and return it.
+ __ subu(a3, a0, a1);
+ __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, a2, Operand(t3));
+ __ lw(v0, MemOperand(a3, kDisplacement));
+ __ Ret();
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ push(a1);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne,
+ a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Patch the arguments.length and the parameters pointer in the current frame.
+ __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sw(a2, MemOperand(sp, 0 * kPointerSize));
+ __ sll(t3, a2, 1);
+ __ Addu(a3, a3, Operand(t3));
+ __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
+ __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // Stack layout:
+ // sp[0] : number of parameters (tagged)
+ // sp[4] : address of receiver argument
+ // sp[8] : function
+ // Registers used over whole function:
+ // t2 : allocated object (tagged)
+ // t5 : mapped parameter count (tagged)
+
+ __ lw(a1, MemOperand(sp, 0 * kPointerSize));
+ // a1 = parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a2,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // No adaptor, parameter count = argument count.
+ __ mov(a2, a1);
+ __ b(&try_allocate);
+ __ nop(); // Branch delay slot nop.
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sll(t6, a2, 1);
+ __ Addu(a3, a3, Operand(t6));
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+ // a1 = parameter count (tagged)
+ // a2 = argument count (tagged)
+ // Compute the mapped parameter count = min(a1, a2) in a1.
+ Label skip_min;
+ __ Branch(&skip_min, lt, a1, Operand(a2));
+ __ mov(a1, a2);
+ __ bind(&skip_min);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ Label param_map_size;
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
+ __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
+ __ sll(t5, a1, 1);
+ __ addiu(t5, t5, kParameterMapHeaderSize);
+ __ bind(&param_map_size);
+
+ // 2. Backing store.
+ __ sll(t6, a2, 1);
+ __ Addu(t5, t5, Operand(t6));
+ __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
+
+ // v0 = address of new object(s) (tagged)
+ // a2 = argument count (tagged)
+ // Get the arguments boilerplate from the current (global) context into t0.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+ Label skip2_ne, skip2_eq;
+ __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
+ __ lw(t0, MemOperand(t0, kNormalOffset));
+ __ bind(&skip2_ne);
+
+ __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
+ __ lw(t0, MemOperand(t0, kAliasedOffset));
+ __ bind(&skip2_eq);
+
+ // v0 = address of new object (tagged)
+ // a1 = mapped parameter count (tagged)
+ // a2 = argument count (tagged)
+ // t0 = address of boilerplate object (tagged)
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ lw(a3, FieldMemOperand(t0, i));
+ __ sw(a3, FieldMemOperand(v0, i));
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ sw(a2, FieldMemOperand(v0, kLengthOffset));
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, t0 will point there, otherwise
+ // it will point to the backing store.
+ __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // v0 = address of new object (tagged)
+ // a1 = mapped parameter count (tagged)
+ // a2 = argument count (tagged)
+ // t0 = address of parameter map or backing store (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ Label skip3;
+ __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
+ // Move backing store address to a3, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(a3, t0);
+ __ bind(&skip3);
+
+ __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
+
+ __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
+ __ Addu(t2, a1, Operand(Smi::FromInt(2)));
+ __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ sll(t6, a1, 1);
+ __ Addu(t2, t0, Operand(t6));
+ __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
+ __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(t2, a1);
+ __ lw(t5, MemOperand(sp, 0 * kPointerSize));
+ __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ Subu(t5, t5, Operand(a1));
+ __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
+ __ sll(t6, t2, 1);
+ __ Addu(a3, t0, Operand(t6));
+ __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
+
+ // t2 = loop variable (tagged)
+ // a1 = mapping index (tagged)
+ // a3 = address of backing store (tagged)
+ // t0 = address of parameter map (tagged)
+ // t1 = temporary scratch (a.o., for address calculation)
+ // t3 = the hole value
+ __ jmp(&parameters_test);
+
+ __ bind(&parameters_loop);
+ __ Subu(t2, t2, Operand(Smi::FromInt(1)));
+ __ sll(t1, t2, 1);
+ __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ Addu(t6, t0, t1);
+ __ sw(t5, MemOperand(t6));
+ __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ Addu(t6, a3, t1);
+ __ sw(t3, MemOperand(t6));
+ __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+ __ bind(&parameters_test);
+ __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
+
+ __ bind(&skip_parameter_map);
+ // a2 = argument count (tagged)
+ // a3 = address of backing store (tagged)
+ // t1 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+ __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
+ __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ mov(t5, a1);
+ __ lw(t0, MemOperand(sp, 1 * kPointerSize));
+ __ sll(t6, t5, 1);
+ __ Subu(t0, t0, Operand(t6));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ Subu(t0, t0, Operand(kPointerSize));
+ __ lw(t2, MemOperand(t0, 0));
+ __ sll(t6, t5, 1);
+ __ Addu(t1, a3, Operand(t6));
+ __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ Branch(&arguments_loop, lt, t5, Operand(a2));
+
+ // Return and remove the on-stack parameters.
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // a2 = argument count (taggged)
+ __ bind(&runtime);
+ __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame,
+ eq,
+ a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Get the length from the frame.
+ __ lw(a1, MemOperand(sp, 0));
+ __ Branch(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sw(a1, MemOperand(sp, 0));
+ __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, a2, Operand(at));
+
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+ // Try the new space allocation. Start out with computing the size
+ // of the arguments object and the elements array in words.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
+ __ srl(a1, a1, kSmiTagSize);
+
+ __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ bind(&add_arguments_object);
+ __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(a1,
+ v0,
+ a2,
+ a3,
+ &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT |
+ SIZE_IN_WORDS));
+
+ // Get the arguments boilerplate from the current (global) context.
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+ __ lw(t0, MemOperand(t0, Context::SlotOffset(
+ Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+
+ // Copy the JS object part.
+ __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ lw(a1, MemOperand(sp, 0 * kPointerSize));
+ __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+
+ Label done;
+ __ Branch(&done, eq, a1, Operand(zero_reg));
+
+ // Get the parameters pointer from the stack.
+ __ lw(a2, MemOperand(sp, 1 * kPointerSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
+ __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
+ __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ // Untag the length for the loop.
+ __ srl(a1, a1, kSmiTagSize);
+
+ // Copy the fixed array slots.
+ Label loop;
+ // Setup t0 to point to the first array slot.
+ __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ // Pre-decrement a2 with kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ Addu(a2, a2, Operand(-kPointerSize));
+ __ lw(a3, MemOperand(a2));
+ // Post-increment t0 with kPointerSize on each iteration.
+ __ sw(a3, MemOperand(t0));
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ Subu(a1, a1, Operand(1));
+ __ Branch(&loop, ne, a1, Operand(zero_reg));
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // sp[0]: last_match_info (expected JSArray)
+ // sp[4]: previous index
+ // sp[8]: subject string
+ // sp[12]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 0 * kPointerSize;
+ static const int kPreviousIndexOffset = 1 * kPointerSize;
+ static const int kSubjectOffset = 2 * kPointerSize;
+ static const int kJSRegExpOffset = 3 * kPointerSize;
+
+ Label runtime, invoke_regexp;
+
+ // Allocation of registers for this function. These are in callee save
+ // registers and will be preserved by the call to the native RegExp code, as
+ // this code is called using the normal C calling convention. When calling
+ // directly from generated code the native RegExp code will not do a GC and
+ // therefore the content of these registers are safe to use after the call.
+ // MIPS - using s0..s2, since we are not using CEntry Stub.
+ Register subject = s0;
+ Register regexp_data = s1;
+ Register last_match_info_elements = s2;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(
+ masm->isolate());
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
+ __ li(a0, Operand(address_of_regexp_stack_memory_size));
+ __ lw(a0, MemOperand(a0, 0));
+ __ Branch(&runtime, eq, a0, Operand(zero_reg));
+
+ // Check that the first argument is a JSRegExp object.
+ __ lw(a0, MemOperand(sp, kJSRegExpOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(a0, &runtime);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ And(t0, regexp_data, Operand(kSmiTagMask));
+ __ Check(nz,
+ "Unexpected type for RegExp data, FixedArray expected",
+ t0,
+ Operand(zero_reg));
+ __ GetObjectType(regexp_data, a0, a0);
+ __ Check(eq,
+ "Unexpected type for RegExp data, FixedArray expected",
+ a0,
+ Operand(FIXED_ARRAY_TYPE));
+ }
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ lw(a2,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2. This
+ // uses the asumption that smis are 2 * their untagged value.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ Addu(a2, a2, Operand(2)); // a2 was a smi.
+ // Check that the static offsets vector buffer is large enough.
+ __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+
+ // a2: Number of capture registers
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the second argument is a string.
+ __ lw(subject, MemOperand(sp, kSubjectOffset));
+ __ JumpIfSmi(subject, &runtime);
+ __ GetObjectType(subject, a0, a0);
+ __ And(a0, a0, Operand(kIsNotStringMask));
+ STATIC_ASSERT(kStringTag == 0);
+ __ Branch(&runtime, ne, a0, Operand(zero_reg));
+
+ // Get the length of the string to r3.
+ __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
+
+ // a2: Number of capture registers
+ // a3: Length of subject string as a smi
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
+ __ And(at, a0, Operand(kSmiTagMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+ __ Branch(&runtime, ls, a3, Operand(a0));
+
+ // a2: Number of capture registers
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the fourth object is a JSArray object.
+ __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(a0, &runtime);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
+ // Check that the JSArray is in fast case.
+ __ lw(last_match_info_elements,
+ FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ Branch(&runtime, ne, a0, Operand(
+ masm->isolate()->factory()->fixed_array_map()));
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ lw(a0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
+ __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
+ __ Branch(&runtime, gt, a2, Operand(at));
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_string;
+ __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+ // First check for flat string.
+ __ And(at, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ __ Branch(&seq_string, eq, at, Operand(zero_reg));
+
+ // subject: Subject string
+ // a0: instance type if Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
+ __ And(at, a0, Operand(kIsNotStringMask | kExternalStringTag));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+ __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
+ __ Branch(&runtime, ne, a0, Operand(a1));
+ __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+ __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+ // Is first part a flat string?
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(at, a0, Operand(kStringRepresentationMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+
+ __ bind(&seq_string);
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // a0: Instance type of subject string
+ STATIC_ASSERT(kStringEncodingMask == 4);
+ STATIC_ASSERT(kAsciiStringTag == 4);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ // Find the code object based on the assumptions above.
+ __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ascii.
+ __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
+ __ sra(a3, a0, 2); // a3 is 1 for ascii, 0 for UC16 (usyed below).
+ __ lw(t0, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+ __ movz(t9, t0, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it
+ // contains the hole.
+ __ GetObjectType(t9, a0, a0);
+ __ Branch(&runtime, ne, a0, Operand(CODE_TYPE));
+
+ // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // t9: code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
+ __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
+
+ // a1: previous index
+ // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // t9: code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
+ 1, a0, a2);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ static const int kRegExpExecuteArguments = 8;
+ static const int kParameterRegisters = 4;
+ __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
+
+ // Stack pointer now points to cell where return address is to be written.
+ // Arguments are before that on the stack or in registers, meaning we
+ // treat the return address as argument 5. Thus every argument after that
+ // needs to be shifted back by 1. Since DirectCEntryStub will handle
+ // allocating space for the c argument slots, we don't need to calculate
+ // that into the argument positions on the stack. This is how the stack will
+ // look (sp meaning the value of sp at this moment):
+ // [sp + 4] - Argument 8
+ // [sp + 3] - Argument 7
+ // [sp + 2] - Argument 6
+ // [sp + 1] - Argument 5
+ // [sp + 0] - saved ra
+
+ // Argument 8: Pass current isolate address.
+ // CFunctionArgumentOperand handles MIPS stack argument slots.
+ __ li(a0, Operand(ExternalReference::isolate_address()));
+ __ sw(a0, MemOperand(sp, 4 * kPointerSize));
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ li(a0, Operand(1));
+ __ sw(a0, MemOperand(sp, 3 * kPointerSize));
+
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ __ li(a0, Operand(address_of_regexp_stack_memory_address));
+ __ lw(a0, MemOperand(a0, 0));
+ __ li(a2, Operand(address_of_regexp_stack_memory_size));
+ __ lw(a2, MemOperand(a2, 0));
+ __ addu(a0, a0, a2);
+ __ sw(a0, MemOperand(sp, 2 * kPointerSize));
+
+ // Argument 5: static offsets vector buffer.
+ __ li(a0, Operand(
+ ExternalReference::address_of_static_offsets_vector(masm->isolate())));
+ __ sw(a0, MemOperand(sp, 1 * kPointerSize));
+
+ // For arguments 4 and 3 get string length, calculate start of string data
+ // and calculate the shift of the index (0 for ASCII and 1 for two byte).
+ __ lw(a0, FieldMemOperand(subject, String::kLengthOffset));
+ __ sra(a0, a0, kSmiTagSize);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ Addu(t0, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
+ // Argument 4 (a3): End of string data
+ // Argument 3 (a2): Start of string data
+ __ sllv(t1, a1, a3);
+ __ addu(a2, t0, t1);
+ __ sllv(t1, a0, a3);
+ __ addu(a3, t0, t1);
+
+ // Argument 2 (a1): Previous index.
+ // Already there
+
+ // Argument 1 (a0): Subject string.
+ __ mov(a0, subject);
+
+ // Locate the code entry and call it.
+ __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm, t9);
+
+ __ LeaveExitFrame(false, no_reg);
+
+ // v0: result
+ // subject: subject string (callee saved)
+ // regexp_data: RegExp data (callee saved)
+ // last_match_info_elements: Last match info elements (callee saved)
+
+ // Check the result.
+
+ Label success;
+ __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+ Label failure;
+ __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ __ li(a1, Operand(
+ ExternalReference::the_hole_value_location(masm->isolate())));
+ __ lw(a1, MemOperand(a1, 0));
+ __ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ masm->isolate())));
+ __ lw(v0, MemOperand(a2, 0));
+ __ Branch(&runtime, eq, v0, Operand(a1));
+
+ __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
+
+ // Check if the exception is a termination. If so, throw as uncatchable.
+ __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
+ Label termination_exception;
+ __ Branch(&termination_exception, eq, v0, Operand(a0));
+
+ __ Throw(a0); // Expects thrown value in v0.
+
+ __ bind(&termination_exception);
+ __ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0.
+
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ li(v0, Operand(masm->isolate()->factory()->null_value()));
+ __ Addu(sp, sp, Operand(4 * kPointerSize));
+ __ Ret();
+
+ // Process the result from the native regexp code.
+ __ bind(&success);
+ __ lw(a1,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ Addu(a1, a1, Operand(2)); // a1 was a smi.
+
+ // a1: number of capture registers
+ // subject: subject string
+ // Store the capture count.
+ __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
+ __ sw(a2, FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset));
+ // Store last subject and last input.
+ __ mov(a3, last_match_info_elements); // Moved up to reduce latency.
+ __ sw(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset));
+ __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
+ __ sw(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset));
+ __ mov(a3, last_match_info_elements);
+ __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(masm->isolate());
+ __ li(a2, Operand(address_of_static_offsets_vector));
+
+ // a1: number of capture registers
+ // a2: offsets vector
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wrapping after zero.
+ __ Addu(a0,
+ last_match_info_elements,
+ Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+ __ bind(&next_capture);
+ __ Subu(a1, a1, Operand(1));
+ __ Branch(&done, lt, a1, Operand(zero_reg));
+ // Read the value from the static offsets vector buffer.
+ __ lw(a3, MemOperand(a2, 0));
+ __ addiu(a2, a2, kPointerSize);
+ // Store the smi value in the last match info.
+ __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
+ __ sw(a3, MemOperand(a0, 0));
+ __ Branch(&next_capture, USE_DELAY_SLOT);
+ __ addiu(a0, a0, kPointerSize); // In branch delay slot.
+
+ __ bind(&done);
+
+ // Return last match info.
+ __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
+ __ Addu(sp, sp, Operand(4 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
+ const int kMaxInlineLength = 100;
+ Label slowcase;
+ Label done;
+ __ lw(a1, MemOperand(sp, kPointerSize * 2));
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ JumpIfNotSmi(a1, &slowcase);
+ __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
+ // Smi-tagging is equivalent to multiplying by 2.
+ // Allocate RegExpResult followed by FixedArray with size in ebx.
+ // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
+ // Elements: [Map][Length][..elements..]
+ // Size of JSArray with two in-object properties and the header of a
+ // FixedArray.
+ int objects_size =
+ (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
+ __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
+ __ Addu(a2, t1, Operand(objects_size));
+ __ AllocateInNewSpace(
+ a2, // In: Size, in words.
+ v0, // Out: Start of allocation (tagged).
+ a3, // Scratch register.
+ t0, // Scratch register.
+ &slowcase,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+ // v0: Start of allocated area, object-tagged.
+ // a1: Number of elements in array, as smi.
+ // t1: Number of elements, untagged.
+
+ // Set JSArray map to global.regexp_result_map().
+ // Set empty properties FixedArray.
+ // Set elements to point to FixedArray allocated right after the JSArray.
+ // Interleave operations for better latency.
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
+ __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+ // Set input, index and length fields from arguments.
+ __ lw(a1, MemOperand(sp, kPointerSize * 0));
+ __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
+ __ lw(a1, MemOperand(sp, kPointerSize * 1));
+ __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
+ __ lw(a1, MemOperand(sp, kPointerSize * 2));
+ __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
+
+ // Fill out the elements FixedArray.
+ // v0: JSArray, tagged.
+ // a3: FixedArray, tagged.
+ // t1: Number of elements in array, untagged.
+
+ // Set map.
+ __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
+ // Set FixedArray length.
+ __ sll(t2, t1, kSmiTagSize);
+ __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+ // Fill contents of fixed-array with the-hole.
+ __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
+ __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // Fill fixed array elements with hole.
+ // v0: JSArray, tagged.
+ // a2: the hole.
+ // a3: Start of elements in FixedArray.
+ // t1: Number of elements to fill.
+ Label loop;
+ __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
+ __ addu(t1, t1, a3); // Point past last element to store.
+ __ bind(&loop);
+ __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
+ __ sw(a2, MemOperand(a3));
+ __ Branch(&loop, USE_DELAY_SLOT);
+ __ addiu(a3, a3, kPointerSize); // In branch delay slot.
+
+ __ bind(&done);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&slowcase);
+ __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // The receiver might implicitly be the global object. This is
+ // indicated by passing the hole as the receiver to the call
+ // function stub.
+ if (ReceiverMightBeImplicit()) {
+ Label call;
+ // Get the receiver from the stack.
+ // function, receiver [, arguments]
+ __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
+ // Call as function is indicated with the hole.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&call, ne, t0, Operand(at));
+ // Patch the receiver on the stack with the global receiver object.
+ __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+ __ bind(&call);
+ }
+
+ // Get the function to call from the stack.
+ // function, receiver [, arguments]
+ __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
+ // Check that the function is really a JavaScript function.
+ // a1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &slow);
+ // Get the map of the function object.
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // Fast-case: Invoke the function now.
+ // a1: pushed function
+ ParameterCount actual(argc_);
+
+ if (ReceiverMightBeImplicit()) {
+ Label call_as_function;
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&call_as_function, eq, t0, Operand(at));
+ __ InvokeFunction(a1,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_METHOD);
+ __ bind(&call_as_function);
+ }
+ __ InvokeFunction(a1,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+ __ li(a0, Operand(argc_)); // Setup the number of arguments.
+ __ mov(a2, zero_reg);
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+ ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+ (lhs_.is(a1) && rhs_.is(a0)));
+
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+
+ const char* cc_name;
+ switch (cc_) {
+ case lt: cc_name = "LT"; break;
+ case gt: cc_name = "GT"; break;
+ case le: cc_name = "LE"; break;
+ case ge: cc_name = "GE"; break;
+ case eq: cc_name = "EQ"; break;
+ case ne: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+
+ const char* lhs_name = lhs_.is(a0) ? "_a0" : "_a1";
+ const char* rhs_name = rhs_.is(a0) ? "_a0" : "_a1";
+
+ const char* strict_name = "";
+ if (strict_ && (cc_ == eq || cc_ == ne)) {
+ strict_name = "_STRICT";
+ }
+
+ const char* never_nan_nan_name = "";
+ if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
+ never_nan_nan_name = "_NO_NAN";
+ }
+
+ const char* include_number_compare_name = "";
+ if (!include_number_compare_) {
+ include_number_compare_name = "_NO_NUMBER";
+ }
+
+ const char* include_smi_compare_name = "";
+ if (!include_smi_compare_) {
+ include_smi_compare_name = "_NO_SMI";
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "CompareStub_%s%s%s%s%s%s",
+ cc_name,
+ lhs_name,
+ rhs_name,
+ strict_name,
+ never_nan_nan_name,
+ include_number_compare_name,
+ include_smi_compare_name);
+ return name_;
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the two parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
+ ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+ (lhs_.is(a1) && rhs_.is(a0)));
+ return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(lhs_.is(a0))
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+ | IncludeSmiCompareField::encode(include_smi_compare_);
+}
+
+
+// StringCharCodeAtGenerator.
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ ASSERT(!t0.is(scratch_));
+ ASSERT(!t0.is(index_));
+ ASSERT(!t0.is(result_));
+ ASSERT(!t0.is(object_));
+
+ // If the receiver is a smi trigger the non-string case.
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ And(t0, result_, Operand(kIsNotStringMask));
+ __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
+ __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(t0, result_, Operand(kStringRepresentationMask));
+ __ Branch(&flat_string, eq, t0, Operand(zero_reg));
+
+ // Handle non-flat strings.
+ __ And(t0, result_, Operand(kIsConsStringMask));
+ __ Branch(&call_runtime_, eq, t0, Operand(zero_reg));
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+ __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
+ __ Branch(&call_runtime_, ne, result_, Operand(t0));
+
+ // Get the first of the two strings and load its instance type.
+ __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+
+ __ And(t0, result_, Operand(kStringRepresentationMask));
+ __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ And(t0, result_, Operand(kStringEncodingMask));
+ __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register. We can
+ // add without shifting since the smi tag size is the log2 of the
+ // number of bytes in a two-byte character.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+ __ Addu(scratch_, object_, Operand(scratch_));
+ __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+ __ Branch(&got_char_code);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+
+ __ srl(t0, scratch_, kSmiTagSize);
+ __ Addu(scratch_, object_, t0);
+
+ __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+
+ __ bind(&got_char_code);
+ __ sll(result_, result_, kSmiTagSize);
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ scratch_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
+ call_helper.BeforeCall(masm);
+ // Consumed by runtime conversion function:
+ __ Push(object_, index_, index_);
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+
+ __ Move(scratch_, v0);
+
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ Branch(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+
+ __ Move(result_, v0);
+
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+
+ ASSERT(!t0.is(result_));
+ ASSERT(!t0.is(code_));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ __ And(t0,
+ code_,
+ Operand(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ASCII char code.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(result_, result_, t0);
+ __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&slow_case_, eq, result_, Operand(t0));
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ Move(result_, v0);
+
+ call_helper.AfterCall(masm);
+ __ Branch(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
+}
+
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersLong adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags);
+
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register r0.
+ // Contents of both c1 and c2 registers are modified. At the exit c1 is
+ // guaranteed to contain halfword with low and high bytes equal to
+ // initial contents of c1 and c2 respectively.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ Label loop;
+ Label done;
+ // This loop just copies one character at a time, as it is only used for
+ // very short strings.
+ if (!ascii) {
+ __ addu(count, count, count);
+ }
+ __ Branch(&done, eq, count, Operand(zero_reg));
+ __ addu(count, dest, count); // Count now points to the last dest byte.
+
+ __ bind(&loop);
+ __ lbu(scratch, MemOperand(src));
+ __ addiu(src, src, 1);
+ __ sb(scratch, MemOperand(dest));
+ __ addiu(dest, dest, 1);
+ __ Branch(&loop, lt, dest, Operand(count));
+
+ __ bind(&done);
+}
+
+
+enum CopyCharactersFlags {
+ COPY_ASCII = 1,
+ DEST_ALWAYS_ALIGNED = 2
+};
+
+
+void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags) {
+ bool ascii = (flags & COPY_ASCII) != 0;
+ bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
+
+ if (dest_always_aligned && FLAG_debug_code) {
+ // Check that destination is actually word aligned if the flag says
+ // that it is.
+ __ And(scratch4, dest, Operand(kPointerAlignmentMask));
+ __ Check(eq,
+ "Destination of copy not aligned.",
+ scratch4,
+ Operand(zero_reg));
+ }
+
+ const int kReadAlignment = 4;
+ const int kReadAlignmentMask = kReadAlignment - 1;
+ // Ensure that reading an entire aligned word containing the last character
+ // of a string will not read outside the allocated area (because we pad up
+ // to kObjectAlignment).
+ STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
+ // Assumes word reads and writes are little endian.
+ // Nothing to do for zero characters.
+ Label done;
+
+ if (!ascii) {
+ __ addu(count, count, count);
+ }
+ __ Branch(&done, eq, count, Operand(zero_reg));
+
+ Label byte_loop;
+ // Must copy at least eight bytes, otherwise just do it one byte at a time.
+ __ Subu(scratch1, count, Operand(8));
+ __ Addu(count, dest, Operand(count));
+ Register limit = count; // Read until src equals this.
+ __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
+
+ if (!dest_always_aligned) {
+ // Align dest by byte copying. Copies between zero and three bytes.
+ __ And(scratch4, dest, Operand(kReadAlignmentMask));
+ Label dest_aligned;
+ __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
+ Label aligned_loop;
+ __ bind(&aligned_loop);
+ __ lbu(scratch1, MemOperand(src));
+ __ addiu(src, src, 1);
+ __ sb(scratch1, MemOperand(dest));
+ __ addiu(dest, dest, 1);
+ __ addiu(scratch4, scratch4, 1);
+ __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
+ __ bind(&dest_aligned);
+ }
+
+ Label simple_loop;
+
+ __ And(scratch4, src, Operand(kReadAlignmentMask));
+ __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
+
+ // Loop for src/dst that are not aligned the same way.
+ // This loop uses lwl and lwr instructions. These instructions
+ // depend on the endianness, and the implementation assumes little-endian.
+ {
+ Label loop;
+ __ bind(&loop);
+ __ lwr(scratch1, MemOperand(src));
+ __ Addu(src, src, Operand(kReadAlignment));
+ __ lwl(scratch1, MemOperand(src, -1));
+ __ sw(scratch1, MemOperand(dest));
+ __ Addu(dest, dest, Operand(kReadAlignment));
+ __ Subu(scratch2, limit, dest);
+ __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+ }
+
+ __ Branch(&byte_loop);
+
+ // Simple loop.
+ // Copy words from src to dest, until less than four bytes left.
+ // Both src and dest are word aligned.
+ __ bind(&simple_loop);
+ {
+ Label loop;
+ __ bind(&loop);
+ __ lw(scratch1, MemOperand(src));
+ __ Addu(src, src, Operand(kReadAlignment));
+ __ sw(scratch1, MemOperand(dest));
+ __ Addu(dest, dest, Operand(kReadAlignment));
+ __ Subu(scratch2, limit, dest);
+ __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+ }
+
+ // Copy bytes from src to dest until dest hits limit.
+ __ bind(&byte_loop);
+ // Test if dest has already reached the limit.
+ __ Branch(&done, ge, dest, Operand(limit));
+ __ lbu(scratch1, MemOperand(src));
+ __ addiu(src, src, 1);
+ __ sb(scratch1, MemOperand(dest));
+ __ addiu(dest, dest, 1);
+ __ Branch(&byte_loop);
+
+ __ bind(&done);
+}
+
+
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found) {
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ Subu(scratch, c1, Operand(static_cast<int>('0')));
+ __ Branch(&not_array_index,
+ Ugreater,
+ scratch,
+ Operand(static_cast<int>('9' - '0')));
+ __ Subu(scratch, c2, Operand(static_cast<int>('0')));
+
+ // If check failed combine both characters into single halfword.
+ // This is required by the contract of the method: code at the
+ // not_found branch expects this combination in c1 register.
+ Label tmp;
+ __ sll(scratch1, c2, kBitsPerByte);
+ __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
+ __ Or(c1, c1, scratch1);
+ __ bind(&tmp);
+ __ Branch(not_found,
+ Uless_equal,
+ scratch,
+ Operand(static_cast<int>('9' - '0')));
+
+ __ bind(&not_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ StringHelper::GenerateHashInit(masm, hash, c1);
+ StringHelper::GenerateHashAddCharacter(masm, hash, c2);
+ StringHelper::GenerateHashGetHash(masm, hash);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ sll(scratch, c2, kBitsPerByte);
+ __ Or(chars, chars, scratch);
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load symbol table.
+ // Load address of first element of the symbol table.
+ Register symbol_table = c2;
+ __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+ Register undefined = scratch4;
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ sra(mask, mask, 1);
+ __ Addu(mask, mask, -1);
+
+ // Calculate untagged address of the first element of the symbol table.
+ Register first_symbol_table_element = symbol_table;
+ __ Addu(first_symbol_table_element, symbol_table,
+ Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+
+ // Registers.
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string
+ // mask: capacity mask
+ // first_symbol_table_element: address of the first element of
+ // the symbol table
+ // undefined: the undefined object
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes];
+ Register candidate = scratch5; // Scratch register contains candidate.
+ for (int i = 0; i < kProbes; i++) {
+ // Calculate entry in symbol table.
+ if (i > 0) {
+ __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+ } else {
+ __ mov(candidate, hash);
+ }
+
+ __ And(candidate, candidate, Operand(mask));
+
+ // Load the entry from the symble table.
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ __ sll(scratch, candidate, kPointerSizeLog2);
+ __ Addu(scratch, scratch, first_symbol_table_element);
+ __ lw(candidate, MemOperand(scratch));
+
+ // If entry is undefined no string with this hash can be found.
+ Label is_string;
+ __ GetObjectType(candidate, scratch, scratch);
+ __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
+
+ __ Branch(not_found, eq, undefined, Operand(candidate));
+ // Must be null (deleted entry).
+ if (FLAG_debug_code) {
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ Assert(eq, "oddball in symbol table is not undefined or null",
+ scratch, Operand(candidate));
+ }
+ __ jmp(&next_probe[i]);
+
+ __ bind(&is_string);
+
+ // Check that the candidate is a non-external ASCII string. The instance
+ // type is still in the scratch register from the CompareObjectType
+ // operation.
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
+
+ // If length is not 2 the string is not a candidate.
+ __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
+ __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
+
+ // Check if the two characters match.
+ // Assumes that word load is little endian.
+ __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = candidate;
+ __ bind(&found_in_symbol_table);
+ __ mov(v0, result);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ // hash = character + (character << 10);
+ __ sll(hash, character, 10);
+ __ addu(hash, hash, character);
+ // hash ^= hash >> 6;
+ __ sra(at, hash, 6);
+ __ xor_(hash, hash, at);
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ // hash += character;
+ __ addu(hash, hash, character);
+ // hash += hash << 10;
+ __ sll(at, hash, 10);
+ __ addu(hash, hash, at);
+ // hash ^= hash >> 6;
+ __ sra(at, hash, 6);
+ __ xor_(hash, hash, at);
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash) {
+ // hash += hash << 3;
+ __ sll(at, hash, 3);
+ __ addu(hash, hash, at);
+ // hash ^= hash >> 11;
+ __ sra(at, hash, 11);
+ __ xor_(hash, hash, at);
+ // hash += hash << 15;
+ __ sll(at, hash, 15);
+ __ addu(hash, hash, at);
+
+ // if (hash == 0) hash = 27;
+ __ ori(at, zero_reg, 27);
+ __ movz(hash, at, hash);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label sub_string_runtime;
+ // Stack frame on entry.
+ // ra: return address
+ // sp[0]: to
+ // sp[4]: from
+ // sp[8]: string
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length.
+ // If any of these assumptions fail, we call the runtime system.
+
+ static const int kToOffset = 0 * kPointerSize;
+ static const int kFromOffset = 1 * kPointerSize;
+ static const int kStringOffset = 2 * kPointerSize;
+
+ Register to = t2;
+ Register from = t3;
+
+ // Check bounds and smi-ness.
+ __ lw(to, MemOperand(sp, kToOffset));
+ __ lw(from, MemOperand(sp, kFromOffset));
+ STATIC_ASSERT(kFromOffset == kToOffset + 4);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+
+ __ JumpIfNotSmi(from, &sub_string_runtime);
+ __ JumpIfNotSmi(to, &sub_string_runtime);
+
+ __ sra(a3, from, kSmiTagSize); // Remove smi tag.
+ __ sra(t5, to, kSmiTagSize); // Remove smi tag.
+
+ // a3: from index (untagged smi)
+ // t5: to index (untagged smi)
+
+ __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg)); // From < 0.
+
+ __ subu(a2, t5, a3);
+ __ Branch(&sub_string_runtime, gt, a3, Operand(t5)); // Fail if from > to.
+
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
+ __ Branch(&sub_string_runtime, lt, a2, Operand(2));
+
+ // Both to and from are smis.
+
+ // a2: result string length
+ // a3: from index (untagged smi)
+ // t2: (a.k.a. to): to (smi)
+ // t3: (a.k.a. from): from offset (smi)
+ // t5: to index (untagged smi)
+
+ // Make sure first argument is a sequential (or flat) string.
+ __ lw(t1, MemOperand(sp, kStringOffset));
+ __ Branch(&sub_string_runtime, eq, t1, Operand(kSmiTagMask));
+
+ __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+ __ And(t4, a1, Operand(kIsNotStringMask));
+
+ __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
+
+ // a1: instance type
+ // a2: result string length
+ // a3: from index (untagged smi)
+ // t1: string
+ // t2: (a.k.a. to): to (smi)
+ // t3: (a.k.a. from): from offset (smi)
+ // t5: to index (untagged smi)
+
+ Label seq_string;
+ __ And(t0, a1, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+
+ // External strings go to runtime.
+ __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
+
+ // Sequential strings are handled directly.
+ __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
+
+ // Cons string. Try to recurse (once) on the first substring.
+ // (This adds a little more generality than necessary to handle flattened
+ // cons strings, but not much).
+ __ lw(t1, FieldMemOperand(t1, ConsString::kFirstOffset));
+ __ lw(t0, FieldMemOperand(t1, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ // Cons and External strings go to runtime.
+ __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
+
+ // Definitly a sequential string.
+ __ bind(&seq_string);
+
+ // a1: instance type
+ // a2: result string length
+ // a3: from index (untagged smi)
+ // t1: string
+ // t2: (a.k.a. to): to (smi)
+ // t3: (a.k.a. from): from offset (smi)
+ // t5: to index (untagged smi)
+
+ __ lw(t0, FieldMemOperand(t1, String::kLengthOffset));
+ __ Branch(&sub_string_runtime, lt, t0, Operand(to)); // Fail if to > length.
+ to = no_reg;
+
+ // a1: instance type
+ // a2: result string length
+ // a3: from index (untagged smi)
+ // t1: string
+ // t3: (a.k.a. from): from offset (smi)
+ // t5: to index (untagged smi)
+
+ // Check for flat ASCII string.
+ Label non_ascii_flat;
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+
+ __ And(t4, a1, Operand(kStringEncodingMask));
+ __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
+
+ Label result_longer_than_two;
+ __ Branch(&result_longer_than_two, gt, a2, Operand(2));
+
+ // Sub string of length 2 requested.
+ // Get the two characters forming the sub string.
+ __ Addu(t1, t1, Operand(a3));
+ __ lbu(a3, FieldMemOperand(t1, SeqAsciiString::kHeaderSize));
+ __ lbu(t0, FieldMemOperand(t1, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+
+ // a2: result string length.
+ // a3: two characters combined into halfword in little endian byte order.
+ __ bind(&make_two_character_string);
+ __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
+ __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&result_longer_than_two);
+
+ // Allocate the result.
+ __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
+
+ // v0: result string.
+ // a2: result string length.
+ // a3: from index (untagged smi)
+ // t1: string.
+ // t3: (a.k.a. from): from offset (smi)
+ // Locate first character of result.
+ __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate 'from' character of string.
+ __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(t1, t1, Operand(a3));
+
+ // v0: result string.
+ // a1: first character of result string.
+ // a2: result string length.
+ // t1: first character of sub string to copy.
+ STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharactersLong(
+ masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii_flat);
+ // a2: result string length.
+ // t1: string.
+ // t3: (a.k.a. from): from offset (smi)
+ // Check for flat two byte string.
+
+ // Allocate the result.
+ __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
+
+ // v0: result string.
+ // a2: result string length.
+ // t1: string.
+ // Locate first character of result.
+ __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate 'from' character of string.
+ __ Addu(t1, t1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // As "from" is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ __ Addu(t1, t1, Operand(from));
+ from = no_reg;
+
+ // v0: result string.
+ // a1: first character of result.
+ // a2: result length.
+ // t1: first character of string to copy.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharactersLong(
+ masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&sub_string_runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ Register length = scratch1;
+
+ // Compare lengths.
+ Label strings_not_equal, check_zero_length;
+ __ lw(length, FieldMemOperand(left, String::kLengthOffset));
+ __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Branch(&check_zero_length, eq, length, Operand(scratch2));
+ __ bind(&strings_not_equal);
+ __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
+ __ Ret();
+
+ // Check if the length is zero.
+ Label compare_chars;
+ __ bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&compare_chars, ne, length, Operand(zero_reg));
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+
+ // Compare characters.
+ __ bind(&compare_chars);
+
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, length, scratch2, scratch3, v0,
+ &strings_not_equal);
+
+ // Characters are equal.
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ Label result_not_equal, compare_lengths;
+ // Find minimum length and length difference.
+ __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Subu(scratch3, scratch1, Operand(scratch2));
+ Register length_delta = scratch3;
+ __ slt(scratch4, scratch2, scratch1);
+ __ movn(scratch1, scratch2, scratch4);
+ Register min_length = scratch1;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
+
+ // Compare loop.
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, min_length, scratch2, scratch4, v0,
+ &result_not_equal);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ // Use length_delta as result if it's zero.
+ __ mov(scratch2, length_delta);
+ __ mov(scratch4, zero_reg);
+ __ mov(v0, zero_reg);
+
+ __ bind(&result_not_equal);
+ // Conditionally update the result based either on length_delta or
+ // the last comparion performed in the loop above.
+ Label ret;
+ __ Branch(&ret, eq, scratch2, Operand(scratch4));
+ __ li(v0, Operand(Smi::FromInt(GREATER)));
+ __ Branch(&ret, gt, scratch2, Operand(scratch4));
+ __ li(v0, Operand(Smi::FromInt(LESS)));
+ __ bind(&ret);
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* chars_not_equal) {
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ Addu(scratch1, length,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(left, left, Operand(scratch1));
+ __ Addu(right, right, Operand(scratch1));
+ __ Subu(length, zero_reg, length);
+ Register index = length; // index = -length;
+
+
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ __ Addu(scratch3, left, index);
+ __ lbu(scratch1, MemOperand(scratch3));
+ __ Addu(scratch3, right, index);
+ __ lbu(scratch2, MemOperand(scratch3));
+ __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
+ __ Addu(index, index, 1);
+ __ Branch(&loop, ne, index, Operand(zero_reg));
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ Counters* counters = masm->isolate()->counters();
+
+ // Stack frame on entry.
+ // sp[0]: right string
+ // sp[4]: left string
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
+ __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
+
+ Label not_same;
+ __ Branch(&not_same, ne, a0, Operand(a1));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&not_same);
+
+ // Check that both objects are sequential ASCII strings.
+ __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
+
+ // Compare flat ASCII strings natively. Remove arguments from stack first.
+ __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime, call_builtin;
+ Builtins::JavaScript builtin_id = Builtins::ADD;
+
+ Counters* counters = masm->isolate()->counters();
+
+ // Stack on entry:
+ // sp[0]: second argument (right).
+ // sp[4]: first argument (left).
+
+ // Load the two arguments.
+ __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
+ __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (flags_ == NO_STRING_ADD_FLAGS) {
+ __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
+ // Load instance types.
+ __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kStringTag == 0);
+ // If either is not a string, go to runtime.
+ __ Or(t4, t0, Operand(t1));
+ __ And(t4, t4, Operand(kIsNotStringMask));
+ __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+ } else {
+ // Here at least one of the arguments is definitely a string.
+ // We convert the one that is not known to be a string.
+ if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+ GenerateConvertArgument(
+ masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+ GenerateConvertArgument(
+ masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
+ }
+ }
+
+ // Both arguments are strings.
+ // a0: first string
+ // a1: second string
+ // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ {
+ Label strings_not_empty;
+ // Check if either of the strings are empty. In that case return the other.
+ // These tests use zero-length check on string-length whch is an Smi.
+ // Assert that Smi::FromInt(0) is really 0.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT(Smi::FromInt(0) == 0);
+ __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
+ __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
+ __ mov(v0, a0); // Assume we'll return first string (from a0).
+ __ movz(v0, a1, a2); // If first is empty, return second (from a1).
+ __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
+ __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
+ __ and_(t4, t4, t5); // Branch if both strings were non-empty.
+ __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
+
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&strings_not_empty);
+ }
+
+ // Untag both string-lengths.
+ __ sra(a2, a2, kSmiTagSize);
+ __ sra(a3, a3, kSmiTagSize);
+
+ // Both strings are non-empty.
+ // a0: first string
+ // a1: second string
+ // a2: length of first string
+ // a3: length of second string
+ // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // Look at the length of the result of adding the two strings.
+ Label string_add_flat_result, longer_than_two;
+ // Adding two lengths can't overflow.
+ STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
+ __ Addu(t2, a2, Operand(a3));
+ // Use the symbol table when adding two one character strings, as it
+ // helps later optimizations to return a symbol here.
+ __ Branch(&longer_than_two, ne, t2, Operand(2));
+
+ // Check that both strings are non-external ASCII strings.
+ if (flags_ != NO_STRING_ADD_FLAGS) {
+ __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ }
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
+ &string_add_runtime);
+
+ // Get the two characters forming the sub string.
+ __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
+ __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&make_two_character_string);
+ // Resulting string has length 2 and first chars of two strings
+ // are combined into single halfword in a2 register.
+ // So we can fill resulting string without two loops by a single
+ // halfword store instruction (which assumes that processor is
+ // in a little endian mode).
+ __ li(t2, Operand(2));
+ __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
+ __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&longer_than_two);
+ // Check if resulting string will be flat.
+ __ Branch(&string_add_flat_result, lt, t2,
+ Operand(String::kMinNonFlatLength));
+ // Handle exceptionally long strings in the runtime system.
+ STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
+ ASSERT(IsPowerOf2(String::kMaxLength + 1));
+ // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
+ __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
+
+ // If result is not supposed to be flat, allocate a cons string object.
+ // If both strings are ASCII the result is an ASCII cons string.
+ if (flags_ != NO_STRING_ADD_FLAGS) {
+ __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ }
+ Label non_ascii, allocated, ascii_data;
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ // Branch to non_ascii if either string-encoding field is zero (non-ascii).
+ __ And(t4, t0, Operand(t1));
+ __ And(t4, t4, Operand(kStringEncodingMask));
+ __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
+
+ // Allocate an ASCII cons string.
+ __ bind(&ascii_data);
+ __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
+ __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
+ __ mov(v0, t3);
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ASCII characters.
+ // t0: first instance type.
+ // t1: second instance type.
+ // Branch to if _both_ instances have kAsciiDataHintMask set.
+ __ And(at, t0, Operand(kAsciiDataHintMask));
+ __ and_(at, at, t1);
+ __ Branch(&ascii_data, ne, at, Operand(zero_reg));
+
+ __ xor_(t0, t0, t1);
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+
+ // Allocate a two byte cons string.
+ __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
+ __ Branch(&allocated);
+
+ // Handle creating a flat result. First check that both strings are
+ // sequential and that they have the same encoding.
+ // a0: first string
+ // a1: second string
+ // a2: length of first string
+ // a3: length of second string
+ // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // t2: sum of lengths.
+ __ bind(&string_add_flat_result);
+ if (flags_ != NO_STRING_ADD_FLAGS) {
+ __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ }
+ // Check that both strings are sequential, meaning that we
+ // branch to runtime if either string tag is non-zero.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ Or(t4, t0, Operand(t1));
+ __ And(t4, t4, Operand(kStringRepresentationMask));
+ __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+
+ // Now check if both strings have the same encoding (ASCII/Two-byte).
+ // a0: first string
+ // a1: second string
+ // a2: length of first string
+ // a3: length of second string
+ // t0: first string instance type
+ // t1: second string instance type
+ // t2: sum of lengths.
+ Label non_ascii_string_add_flat_result;
+ ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
+ __ xor_(t3, t1, t0);
+ __ And(t3, t3, Operand(kStringEncodingMask));
+ __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
+ // And see if it's ASCII (0) or two-byte (1).
+ __ And(t3, t0, Operand(kStringEncodingMask));
+ __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
+
+ // Both strings are sequential ASCII strings. We also know that they are
+ // short (since the sum of the lengths is less than kMinNonFlatLength).
+ // t2: length of resulting flat string
+ __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
+ // Locate first character of result.
+ __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // a0: first character of first string.
+ // a1: second string.
+ // a2: length of first string.
+ // a3: length of second string.
+ // t2: first character of result.
+ // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
+
+ // Load second argument and locate first character.
+ __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // a1: first character of second string.
+ // a3: length of second string.
+ // t2: next character of result.
+ // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
+ __ mov(v0, t3);
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii_string_add_flat_result);
+ // Both strings are sequential two byte strings.
+ // a0: first string.
+ // a1: second string.
+ // a2: length of first string.
+ // a3: length of second string.
+ // t2: sum of length of strings.
+ __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
+ // a0: first string.
+ // a1: second string.
+ // a2: length of first string.
+ // a3: length of second string.
+ // t3: result string.
+
+ // Locate first character of result.
+ __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // a0: first character of first string.
+ // a1: second string.
+ // a2: length of first string.
+ // a3: length of second string.
+ // t2: first character of result.
+ // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
+
+ // Locate first character of second argument.
+ __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // a1: first character of second string.
+ // a3: length of second string.
+ // t2: next character of result (after copy of first string).
+ // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
+
+ __ mov(v0, t3);
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+ if (call_builtin.is_linked()) {
+ __ bind(&call_builtin);
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* slow) {
+ // First check if the argument is already a string.
+ Label not_string, done;
+ __ JumpIfSmi(arg, &not_string);
+ __ GetObjectType(arg, scratch1, scratch1);
+ __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
+
+ // Check the number to string cache.
+ Label not_cached;
+ __ bind(&not_string);
+ // Puts the cached result into scratch1.
+ NumberToStringStub::GenerateLookupNumberStringCache(masm,
+ arg,
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ false,
+ &not_cached);
+ __ mov(arg, scratch1);
+ __ sw(arg, MemOperand(sp, stack_offset));
+ __ jmp(&done);
+
+ // Check if the argument is a safe string wrapper.
+ __ bind(&not_cached);
+ __ JumpIfSmi(arg, slow);
+ __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
+ __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
+ __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+ __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ And(scratch2, scratch2, scratch4);
+ __ Branch(slow, ne, scratch2, Operand(scratch4));
+ __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
+ __ sw(arg, MemOperand(sp, stack_offset));
+
+ __ bind(&done);
+}
+
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::SMIS);
+ Label miss;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, &miss);
+
+ if (GetCondition() == eq) {
+ // For equality we do not care about the sign of the result.
+ __ Subu(v0, a0, a1);
+ } else {
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(a1);
+ __ SmiUntag(a0);
+ __ Subu(v0, a1, a0);
+ }
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+ Label generic_stub;
+ Label unordered;
+ Label miss;
+ __ And(a2, a1, Operand(a0));
+ __ JumpIfSmi(a2, &generic_stub);
+
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+
+ // Inlining the double comparison and falling back to the general compare
+ // stub if NaN is involved or FPU is unsupported.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ // Load left and right operand.
+ __ Subu(a2, a1, Operand(kHeapObjectTag));
+ __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Subu(a2, a0, Operand(kHeapObjectTag));
+ __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+
+ Label fpu_eq, fpu_lt, fpu_gt;
+ // Compare operands (test if unordered).
+ __ c(UN, D, f0, f2);
+ // Don't base result on status bits when a NaN is involved.
+ __ bc1t(&unordered);
+ __ nop();
+
+ // Test if equal.
+ __ c(EQ, D, f0, f2);
+ __ bc1t(&fpu_eq);
+ __ nop();
+
+ // Test if unordered or less (unordered case is already handled).
+ __ c(ULT, D, f0, f2);
+ __ bc1t(&fpu_lt);
+ __ nop();
+
+ // Otherwise it's greater.
+ __ bc1f(&fpu_gt);
+ __ nop();
+
+ // Return a result of -1, 0, or 1.
+ __ bind(&fpu_eq);
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
+
+ __ bind(&fpu_lt);
+ __ li(v0, Operand(LESS));
+ __ Ret();
+
+ __ bind(&fpu_gt);
+ __ li(v0, Operand(GREATER));
+ __ Ret();
+
+ __ bind(&unordered);
+ }
+
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+ __ bind(&generic_stub);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::SYMBOLS);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+ Register tmp1 = a2;
+ Register tmp2 = a3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are symbols.
+ __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(tmp1, tmp1, Operand(tmp2));
+ __ And(tmp1, tmp1, kIsSymbolMask);
+ __ Branch(&miss, eq, tmp1, Operand(zero_reg));
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(v0, right);
+ // Symbols are compared by identity.
+ __ Ret(ne, left, Operand(right));
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRINGS);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+ Register tmp1 = a2;
+ Register tmp2 = a3;
+ Register tmp3 = t0;
+ Register tmp4 = t1;
+ Register tmp5 = t2;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are strings. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ Or(tmp3, tmp1, tmp2);
+ __ And(tmp5, tmp3, Operand(kIsNotStringMask));
+ __ Branch(&miss, ne, tmp5, Operand(zero_reg));
+
+ // Fast check for identical strings.
+ Label left_ne_right;
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
+ __ mov(v0, zero_reg); // In the delay slot.
+ __ Ret();
+ __ bind(&left_ne_right);
+
+ // Handle not identical strings.
+
+ // Check that both strings are symbols. If they are, we're done
+ // because we already know they are not identical.
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(tmp3, tmp1, Operand(tmp2));
+ __ And(tmp5, tmp3, Operand(kIsSymbolMask));
+ Label is_symbol;
+ __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
+ __ mov(v0, a0); // In the delay slot.
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ __ Ret();
+ __ bind(&is_symbol);
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
+ &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2, tmp3);
+
+ // Handle more complex cases in runtime.
+ __ bind(&runtime);
+ __ Push(left, right);
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::OBJECTS);
+ Label miss;
+ __ And(a2, a1, Operand(a0));
+ __ JumpIfSmi(a2, &miss);
+
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+
+ ASSERT(GetCondition() == eq);
+ __ Subu(v0, a0, Operand(a1));
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+ __ Push(a1, a0);
+ __ push(ra);
+
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+ masm->isolate());
+ __ EnterInternalFrame();
+ __ Push(a1, a0);
+ __ li(t0, Operand(Smi::FromInt(op_)));
+ __ push(t0);
+ __ CallExternalReference(miss, 3);
+ __ LeaveInternalFrame();
+ // Compute the entry point of the rewritten stub.
+ __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore registers.
+ __ pop(ra);
+ __ pop(a0);
+ __ pop(a1);
+ __ Jump(a2);
+}
+
+
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // No need to pop or drop anything, LeaveExitFrame will restore the old
+ // stack, thus dropping the allocated space for the return value.
+ // The saved ra is after the reserved stack space for the 4 args.
+ __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
+
+ if (FLAG_debug_code && EnableSlowAsserts()) {
+ // In case of an error the return address may point to a memory area
+ // filled with kZapValue by the GC.
+ // Dereference the address and check for this.
+ __ lw(t0, MemOperand(t9));
+ __ Assert(ne, "Received invalid return address.", t0,
+ Operand(reinterpret_cast<uint32_t>(kZapValue)));
+ }
+ __ Jump(t9);
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ ExternalReference function) {
+ __ li(t9, Operand(function));
+ this->GenerateCall(masm, t9);
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ Register target) {
+ __ Move(t9, target);
+ __ AssertStackIsAligned();
+ // Allocate space for arg slots.
+ __ Subu(sp, sp, kCArgsSlotsSize);
+
+ // Block the trampoline pool through the whole function to make sure the
+ // number of generated instructions is constant.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+
+ // We need to get the current 'pc' value, which is not available on MIPS.
+ Label find_ra;
+ masm->bal(&find_ra); // ra = pc + 8.
+ masm->nop(); // Branch delay slot nop.
+ masm->bind(&find_ra);
+
+ const int kNumInstructionsToJump = 6;
+ masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
+ // Push return address (accessible to GC through exit frame pc).
+ // This spot for ra was reserved in EnterExitFrame.
+ masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
+ masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+ RelocInfo::CODE_TARGET), true);
+ // Call the function.
+ masm->Jump(t9);
+ // Make sure the stored 'ra' points to this position.
+ ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
+}
+
+
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ String* name,
+ Register scratch0) {
+// If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch0;
+ // Capacity is smi 2^n.
+ __ lw(index, FieldMemOperand(properties, kCapacityOffset));
+ __ Subu(index, index, Operand(1));
+ __ And(index, index, Operand(
+ Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ // index *= 3.
+ __ mov(at, index);
+ __ sll(index, index, 1);
+ __ Addu(index, index, at);
+
+ Register entity_name = scratch0;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ Register tmp = properties;
+
+ __ sll(scratch0, index, 1);
+ __ Addu(tmp, properties, scratch0);
+ __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ ASSERT(!tmp.is(entity_name));
+ __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+ __ Branch(done, eq, entity_name, Operand(tmp));
+
+ if (i != kInlinedProbes - 1) {
+ // Stop if found the property.
+ __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
+
+ // Check if the entry name is not a symbol.
+ __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ lbu(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ And(scratch0, entity_name, Operand(kIsSymbolMask));
+ __ Branch(miss, eq, scratch0, Operand(zero_reg));
+
+ // Restore the properties.
+ __ lw(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ }
+ }
+
+ const int spill_mask =
+ (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
+ a2.bit() | a1.bit() | a0.bit());
+
+ __ MultiPush(spill_mask);
+ __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ li(a1, Operand(Handle<String>(name)));
+ StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ MaybeObject* result = masm->TryCallStub(&stub);
+ if (result->IsFailure()) return result;
+ __ MultiPop(spill_mask);
+
+ __ Branch(done, eq, v0, Operand(zero_reg));
+ __ Branch(miss, ne, v0, Operand(zero_reg));
+ return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
+ // Assert that name contains a string.
+ if (FLAG_debug_code) __ AbortIfNotString(name);
+
+ // Compute the capacity mask.
+ __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
+ __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
+ __ Subu(scratch1, scratch1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(StringDictionary::GetProbeOffset(i) <
+ 1 << (32 - String::kHashFieldOffset));
+ __ Addu(scratch2, scratch2, Operand(
+ StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ }
+ __ srl(scratch2, scratch2, String::kHashShift);
+ __ And(scratch2, scratch1, scratch2);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ // scratch2 = scratch2 * 3.
+
+ __ mov(at, scratch2);
+ __ sll(scratch2, scratch2, 1);
+ __ Addu(scratch2, scratch2, at);
+
+ // Check if the key is identical to the name.
+ __ sll(at, scratch2, 2);
+ __ Addu(scratch2, elements, at);
+ __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
+ __ Branch(done, eq, name, Operand(at));
+ }
+
+ const int spill_mask =
+ (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
+ a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
+ ~(scratch1.bit() | scratch2.bit());
+
+ __ MultiPush(spill_mask);
+ __ Move(a0, elements);
+ __ Move(a1, name);
+ StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ mov(scratch2, a2);
+ __ MultiPop(spill_mask);
+
+ __ Branch(done, ne, v0, Operand(zero_reg));
+ __ Branch(miss, eq, v0, Operand(zero_reg));
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // Registers:
+ // result: StringDictionary to probe
+ // a1: key
+ // : StringDictionary to probe.
+ // index_: will hold an index of entry if lookup is successful.
+ // might alias with result_.
+ // Returns:
+ // result_ is zero if lookup failed, non zero otherwise.
+
+ Register result = v0;
+ Register dictionary = a0;
+ Register key = a1;
+ Register index = a2;
+ Register mask = a3;
+ Register hash = t0;
+ Register undefined = t1;
+ Register entry_key = t2;
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
+ __ sra(mask, mask, kSmiTagSize);
+ __ Subu(mask, mask, Operand(1));
+
+ __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ // Capacity is smi 2^n.
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(StringDictionary::GetProbeOffset(i) <
+ 1 << (32 - String::kHashFieldOffset));
+ __ Addu(index, hash, Operand(
+ StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ } else {
+ __ mov(index, hash);
+ }
+ __ srl(index, index, String::kHashShift);
+ __ And(index, mask, index);
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ // index *= 3.
+ __ mov(at, index);
+ __ sll(index, index, 1);
+ __ Addu(index, index, at);
+
+
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ sll(index, index, 2);
+ __ Addu(index, index, dictionary);
+ __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+ // Having undefined at this place means the name is not contained.
+ __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
+
+ // Stop if found the property.
+ __ Branch(&in_dictionary, eq, entry_key, Operand(key));
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a symbol.
+ __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ lbu(entry_key,
+ FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ And(result, entry_key, Operand(kIsSymbolMask));
+ __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
+ }
+ }
+
+ __ bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ mov(result, zero_reg);
+ __ Ret();
+ }
+
+ __ bind(&in_dictionary);
+ __ li(result, 1);
+ __ Ret();
+
+ __ bind(&not_in_dictionary);
+ __ mov(result, zero_reg);
+ __ Ret();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
new file mode 100644
index 000000000..e2323c174
--- /dev/null
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -0,0 +1,668 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_CODE_STUBS_ARM_H_
+#define V8_MIPS_CODE_STUBS_ARM_H_
+
+#include "ic-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ enum ArgumentType {
+ TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
+ UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
+ };
+
+ TranscendentalCacheStub(TranscendentalCache::Type type,
+ ArgumentType argument_type)
+ : type_(type), argument_type_(argument_type) { }
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ ArgumentType argument_type_;
+ void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
+
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_ | argument_type_; }
+ Runtime::FunctionId RuntimeFunction();
+};
+
+
+class UnaryOpStub: public CodeStub {
+ public:
+ UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operand_type_(UnaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ }
+
+ UnaryOpStub(
+ int key,
+ UnaryOpIC::TypeInfo operand_type)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ operand_type_(operand_type),
+ name_(NULL) {
+ }
+
+ private:
+ Token::Value op_;
+ UnaryOverwriteMode mode_;
+
+ // Operand type information determined at runtime.
+ UnaryOpIC::TypeInfo operand_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("UnaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ UnaryOpIC::GetName(operand_type_));
+ }
+#endif
+
+ class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+ class OpBits: public BitField<Token::Value, 1, 7> {};
+ class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
+
+ Major MajorKey() { return UnaryOp; }
+ int MinorKey() {
+ return ModeBits::encode(mode_)
+ | OpBits::encode(op_)
+ | OperandTypeInfoBits::encode(operand_type_);
+ }
+
+ // Note: A lot of the helper functions below will vanish when we use virtual
+ // function instead of switch more often.
+ void Generate(MacroAssembler* masm);
+
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateSmiStubSub(MacroAssembler* masm);
+ void GenerateSmiStubBitNot(MacroAssembler* masm);
+ void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
+ void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
+
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateHeapNumberStubSub(MacroAssembler* masm);
+ void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+ void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateGenericStubSub(MacroAssembler* masm);
+ void GenerateGenericStubBitNot(MacroAssembler* masm);
+ void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return UnaryOpIC::ToState(operand_type_);
+ }
+
+ virtual void FinishCode(Code* code) {
+ code->set_unary_op_type(operand_type_);
+ }
+};
+
+
+class BinaryOpStub: public CodeStub {
+ public:
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operands_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ use_fpu_ = CpuFeatures::IsSupported(FPU);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ BinaryOpStub(
+ int key,
+ BinaryOpIC::TypeInfo operands_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ use_fpu_(FPUBits::decode(key)),
+ operands_type_(operands_type),
+ result_type_(result_type),
+ name_(NULL) { }
+
+ private:
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool use_fpu_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo operands_type_;
+ BinaryOpIC::TypeInfo result_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("BinaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ BinaryOpIC::GetName(operands_type_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class FPUBits: public BitField<bool, 9, 1> {};
+ class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+ class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
+
+ Major MajorKey() { return BinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FPUBits::encode(use_fpu_)
+ | OperandTypeInfoBits::encode(operands_type_)
+ | ResultTypeInfoBits::encode(result_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateSmiSmiOperation(MacroAssembler* masm);
+ void GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required);
+ void GenerateSmiCode(MacroAssembler* masm,
+ Label* use_runtime,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
+
+ void GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(operands_type_);
+ }
+
+ virtual void FinishCode(Code* code) {
+ code->set_binary_op_type(operands_type_);
+ code->set_binary_op_result_type(result_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ // Omit left string check in stub (left is definitely a string).
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ // Omit right string check in stub (right is definitely a string).
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ // Omit both string checks in stub.
+ NO_STRING_CHECK_IN_STUB =
+ NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return flags_; }
+
+ void Generate(MacroAssembler* masm);
+
+ void GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* slow);
+
+ const StringAddFlags flags_;
+};
+
+
+class SubStringStub: public CodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public CodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compare two flat ASCII strings and returns result in v0.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Compares two flat ASCII strings for equality and returns result
+ // in v0.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* chars_not_equal);
+};
+
+
+// This stub can convert a signed int32 to a heap number (double). It does
+// not work for int32s that are in Smi range! No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public CodeStub {
+ public:
+ WriteInt32ToHeapNumberStub(Register the_int,
+ Register the_heap_number,
+ Register scratch,
+ Register scratch2)
+ : the_int_(the_int),
+ the_heap_number_(the_heap_number),
+ scratch_(scratch),
+ sign_(scratch2) { }
+
+ private:
+ Register the_int_;
+ Register the_heap_number_;
+ Register scratch_;
+ Register sign_;
+
+ // Minor key encoding in 16 bits.
+ class IntRegisterBits: public BitField<int, 0, 4> {};
+ class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
+ class ScratchRegisterBits: public BitField<int, 8, 4> {};
+
+ Major MajorKey() { return WriteInt32ToHeapNumber; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return IntRegisterBits::encode(the_int_.code())
+ | HeapNumberRegisterBits::encode(the_heap_number_.code())
+ | ScratchRegisterBits::encode(scratch_.code());
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
+#endif
+};
+
+
+class NumberToStringStub: public CodeStub {
+ public:
+ NumberToStringStub() { }
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ bool object_is_smi,
+ Label* not_found);
+
+ private:
+ Major MajorKey() { return NumberToString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("NumberToStringStub\n");
+ }
+#endif
+};
+
+
+// Enter C code from generated RegExp code in a way that allows
+// the C code to fix the return address in case of a GC.
+// Currently only needed on ARM and MIPS.
+class RegExpCEntryStub: public CodeStub {
+ public:
+ RegExpCEntryStub() {}
+ virtual ~RegExpCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return RegExpCEntry; }
+ int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+
+ const char* GetName() { return "RegExpCEntryStub"; }
+};
+
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub: public CodeStub {
+ public:
+ DirectCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+ void GenerateCall(MacroAssembler* masm,
+ ExternalReference function);
+ void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+ Major MajorKey() { return DirectCEntry; }
+ int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+
+ const char* GetName() { return "DirectCEntryStub"; }
+};
+
+class FloatingPointHelper : public AllStatic {
+ public:
+ enum Destination {
+ kFPURegisters,
+ kCoreRegisters
+ };
+
+
+ // Loads smis from a0 and a1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+ // is floating point registers FPU must be supported. If core registers are
+ // requested when FPU is supported f12 and f14 will be scratched.
+ static void LoadSmis(MacroAssembler* masm,
+ Destination destination,
+ Register scratch1,
+ Register scratch2);
+
+ // Loads objects from a0 and a1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+ // is floating point registers FPU must be supported. If core registers are
+ // requested when FPU is supported f12 and f14 will still be scratched. If
+ // either a0 or a1 is not a number (not smi and not heap number object) the
+ // not_number label is jumped to with a0 and a1 intact.
+ static void LoadOperands(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+
+ // Convert the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1.
+ static void ConvertNumberToInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_int32);
+
+ // Converts the integer (untagged smi) in |int_scratch| to a double, storing
+ // the result either in |double_dst| or |dst2:dst1|, depending on
+ // |destination|.
+ // Warning: The value in |int_scratch| will be changed in the process!
+ static void ConvertIntToDouble(MacroAssembler* masm,
+ Register int_scratch,
+ Destination destination,
+ FPURegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register scratch2,
+ FPURegister single_scratch);
+
+ // Load the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ static void LoadNumberAsInt32Double(MacroAssembler* masm,
+ Register object,
+ Destination destination,
+ FPURegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister single_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ // scratch3 is not used when FPU is supported.
+ static void LoadNumberAsInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_int32);
+
+ // Generate non FPU code to check if a double can be exactly represented by a
+ // 32-bit integer. This does not check for 0 or -0, which need
+ // to be checked for separately.
+ // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+ // through otherwise.
+ // src1 and src2 will be cloberred.
+ //
+ // Expected input:
+ // - src1: higher (exponent) part of the double value.
+ // - src2: lower (mantissa) part of the double value.
+ // Output status:
+ // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+ // - src2: contains 1.
+ // - other registers are clobbered.
+ static void DoubleIs32BitInteger(MacroAssembler* masm,
+ Register src1,
+ Register src2,
+ Register dst,
+ Register scratch,
+ Label* not_int32);
+
+ // Generates code to call a C function to do a double operation using core
+ // registers. (Used when FPU is not supported.)
+ // This code never falls through, but returns with a heap number containing
+ // the result in v0.
+ // Register heapnumber_result must be a heap number in which the
+ // result of the operation will be stored.
+ // Requires the following layout on entry:
+ // a0: Left value (least significant part of mantissa).
+ // a1: Left value (sign, exponent, top of mantissa).
+ // a2: Right value (least significant part of mantissa).
+ // a3: Right value (sign, exponent, top of mantissa).
+ static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch);
+
+ private:
+ static void LoadNumber(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register object,
+ FPURegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+};
+
+
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ String* name,
+ Register scratch0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1);
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("StringDictionaryLookupStub\n");
+ }
+#endif
+
+ Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+ int MinorKey() {
+ return LookupModeBits::encode(mode_);
+ }
+
+ class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+ LookupMode mode_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_CODE_STUBS_ARM_H_
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 79801f07b..4400b643a 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,1413 +25,28 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
-#include "bootstrapper.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "register-allocator-inl.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-
+#include "codegen.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-
-
-
-// -----------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-
-void DeferredCode::SaveRegisters() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void DeferredCode::RestoreRegisters() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- true_target_(NULL),
- false_target_(NULL),
- previous_(NULL) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target)
- : owner_(owner),
- true_target_(true_target),
- false_target_(false_target),
- previous_(owner->state()) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeGenerator implementation
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- frame_(NULL),
- allocator_(NULL),
- cc_reg_(cc_always),
- state_(NULL),
- function_return_is_shadowed_(false) {
-}
-
-
-// Calling conventions:
-// fp: caller's frame pointer
-// sp: stack pointer
-// a1: called JS function
-// cp: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- // Record the position for debugging purposes.
- CodeForFunctionPosition(info->function());
-
- // Initialize state.
- info_ = info;
- ASSERT(allocator_ == NULL);
- RegisterAllocator register_allocator(this);
- allocator_ = &register_allocator;
- ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame();
- cc_reg_ = cc_always;
-
- {
- CodeGenState state(this);
-
- // Registers:
- // a1: called JS function
- // ra: return address
- // fp: caller's frame pointer
- // sp: stack pointer
- // cp: callee's context
- //
- // Stack:
- // arguments
- // receiver
-
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
-
- // Initialize the function return target.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
-
- VirtualFrame::SpilledScope spilled_scope;
- if (scope()->num_heap_slots() > 0) {
- UNIMPLEMENTED_MIPS();
- }
-
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
-
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope()->num_parameters(); i++) {
- UNIMPLEMENTED_MIPS();
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in the
- // context.
- if (scope()->arguments() != NULL) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Generate code to 'execute' declarations and initialize functions
- // (source elements). In case of an illegal redeclaration we need to
- // handle that instead of processing the declarations.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ illegal redeclarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope()->declarations());
- // Bail out if a stack-overflow exception occurred when processing
- // declarations.
- if (HasStackOverflow()) return;
- }
-
- if (FLAG_trace) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Compile the body of the function in a vanilla state. Don't
- // bother compiling all the code if the scope has an illegal
- // redeclaration.
- if (!scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
- bool is_builtin = Bootstrapper::IsActive();
- bool should_trace =
- is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
- if (should_trace) {
- UNIMPLEMENTED_MIPS();
- }
-#endif
- VisitStatementsAndSpill(info->function()->body());
- }
- }
-
- if (has_valid_frame() || function_return_.is_linked()) {
- if (!function_return_.is_linked()) {
- CodeForReturnPosition(info->function());
- }
- // Registers:
- // v0: result
- // sp: stack pointer
- // fp: frame pointer
- // cp: callee's context
-
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-
- function_return_.Bind();
- if (FLAG_trace) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-
- masm_->mov(sp, fp);
- masm_->lw(fp, MemOperand(sp, 0));
- masm_->lw(ra, MemOperand(sp, 4));
- masm_->addiu(sp, sp, 8);
-
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
- // TODO(MIPS): Should we be able to use more than 0x1ffe parameters?
- masm_->addiu(sp, sp, (scope()->num_parameters() + 1) * kPointerSize);
- masm_->Jump(ra);
- // The Jump automatically generates a nop in the branch delay slot.
-
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(kJSReturnSequenceLength,
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
- }
-
- // Code generation state must be reset.
- ASSERT(!has_cc());
- ASSERT(state_ == NULL);
- ASSERT(!function_return_is_shadowed_);
- function_return_.Unuse();
- DeleteFrame();
-
- // Process any deferred code using the register allocator.
- if (!HasStackOverflow()) {
- ProcessDeferred();
- }
-
- allocator_ = NULL;
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- UNIMPLEMENTED_MIPS();
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->slot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- UNIMPLEMENTED_MIPS();
- }
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope;
- // Pop a reference from the stack while preserving TOS.
- Comment cmnt(masm_, "[ UnloadReference");
- int size = ref->size();
- if (size > 0) {
- frame_->EmitPop(a0);
- frame_->Drop(size);
- frame_->EmitPush(a0);
- }
- ref->set_unloaded();
-}
-
-
-MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- UNIMPLEMENTED_MIPS();
- return MemOperand(no_reg, 0);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- UNIMPLEMENTED_MIPS();
- return MemOperand(no_reg, 0);
- }
-
- default:
- UNREACHABLE();
- return MemOperand(no_reg, 0);
- }
-}
-
-
-// Loads a value on TOS. If it is a boolean value, the result may have been
-// (partially) translated into branches, or it may have set the condition
-// code register. If force_cc is set, the value is forced to set the
-// condition code register and no value is pushed. If the condition code
-// register was set, has_cc() is true and cc_reg_ contains the condition to
-// test for 'true'.
-void CodeGenerator::LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc) {
- ASSERT(!has_cc());
- int original_height = frame_->height();
-
- { CodeGenState new_state(this, true_target, false_target);
- Visit(x);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- has_valid_frame() &&
- !has_cc() &&
- frame_->height() == original_height) {
- true_target->Jump();
- }
- }
- if (force_cc && frame_ != NULL && !has_cc()) {
- // Convert the TOS value to a boolean in the condition code register.
- UNIMPLEMENTED_MIPS();
- }
- ASSERT(!force_cc || !has_valid_frame() || has_cc());
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::Load(Expression* x) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- JumpTarget true_target;
- JumpTarget false_target;
- LoadCondition(x, &true_target, &false_target, false);
-
- if (has_cc()) {
- UNIMPLEMENTED_MIPS();
- }
-
- if (true_target.is_linked() || false_target.is_linked()) {
- UNIMPLEMENTED_MIPS();
- }
- ASSERT(has_valid_frame());
- ASSERT(!has_cc());
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
- VirtualFrame::SpilledScope spilled_scope;
- __ lw(a0, GlobalObject());
- frame_->EmitPush(a0);
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- VirtualFrame::SpilledScope spilled_scope;
- if (slot->type() == Slot::LOOKUP) {
- UNIMPLEMENTED_MIPS();
- } else {
- __ lw(a0, SlotOperand(slot, a2));
- frame_->EmitPush(a0);
- if (slot->var()->mode() == Variable::CONST) {
- UNIMPLEMENTED_MIPS();
- }
- }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP) {
- UNIMPLEMENTED_MIPS();
- } else {
- ASSERT(!slot->var()->is_dynamic());
-
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- UNIMPLEMENTED_MIPS();
- }
-
- // We must execute the store. Storing a variable must keep the
- // (new) value on the stack. This is necessary for compiling
- // assignment expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will
- // initialize consts to 'the hole' value and by doing so, end up
- // calling this code. a2 may be loaded with context; used below in
- // RecordWrite.
- frame_->EmitPop(a0);
- __ sw(a0, SlotOperand(slot, a2));
- frame_->EmitPush(a0);
- if (slot->type() == Slot::CONTEXT) {
- UNIMPLEMENTED_MIPS();
- }
- // If we definitely did not jump over the assignment, we do not need
- // to bind the exit label. Doing so can defeat peephole
- // optimization.
- if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
- exit.Bind();
- }
- }
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
- VirtualFrame::SpilledScope spilled_scope;
- for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
- VisitAndSpill(statements->at(i));
- }
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- VirtualFrame::SpilledScope spilled_scope;
- frame_->EmitPush(cp);
- __ li(t0, Operand(pairs));
- frame_->EmitPush(t0);
- __ li(t0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- frame_->EmitPush(t0);
- frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
- // The result is discarded.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- LoadAndSpill(expression);
- frame_->Drop();
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ReturnStatement");
-
- CodeForStatementPosition(node);
- LoadAndSpill(node->expression());
- if (function_return_is_shadowed_) {
- frame_->EmitPop(v0);
- function_return_.Jump();
- } else {
- // Pop the result from the frame and prepare the frame for
- // returning thus making it easier to merge.
- frame_->EmitPop(v0);
- frame_->PrepareForReturn();
-
- function_return_.Jump();
- }
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
-void CodeGenerator::VisitConditional(Conditional* node) {
- UNIMPLEMENTED_MIPS();
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterInternalFrame();
}
-void CodeGenerator::VisitSlot(Slot* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Slot");
- LoadFromSlot(node, typeof_state());
- ASSERT(frame_->height() == original_height + 1);
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveInternalFrame();
}
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ VariableProxy");
-
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- Reference ref(this, node);
- ref.GetValueAndSpill();
- }
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Literal");
- __ li(t0, Operand(node->handle()));
- frame_->EmitPush(t0);
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Assignment");
-
- { Reference target(this, node->target());
- if (target.is_illegal()) {
- // Fool the virtual frame into thinking that we left the assignment's
- // value on the frame.
- frame_->EmitPush(zero_reg);
- ASSERT(frame_->height() == original_height + 1);
- return;
- }
-
- if (node->op() == Token::ASSIGN ||
- node->op() == Token::INIT_VAR ||
- node->op() == Token::INIT_CONST) {
- LoadAndSpill(node->value());
- } else {
- UNIMPLEMENTED_MIPS();
- }
-
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- if (var != NULL &&
- (var->mode() == Variable::CONST) &&
- node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
- // Assignment ignored - leave the value on the stack.
- } else {
- CodeForSourcePosition(node->position());
- if (node->op() == Token::INIT_CONST) {
- // Dynamic constant initializations must use the function context
- // and initialize the actual constant declared. Dynamic variable
- // initializations are simply assignments and use SetValue.
- target.SetValue(CONST_INIT);
- } else {
- target.SetValue(NOT_CONST_INIT);
- }
- }
- }
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Call");
-
- Expression* function = node->expression();
- ZoneList<Expression*>* args = node->arguments();
-
- // Standard function call.
- // Check if the function is a variable or a property.
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
-
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
-
- if (var != NULL && var->is_possibly_eval()) {
- UNIMPLEMENTED_MIPS();
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
-
- int arg_count = args->length();
-
- // We need sp to be 8 bytes aligned when calling the stub.
- __ SetupAlignedCall(t0, arg_count);
-
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
-
- // Load the arguments.
- for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
- }
-
- // Setup the receiver register and call the IC initialization code.
- __ li(a2, Operand(var->name()));
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
- CodeForSourcePosition(node->position());
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
- arg_count + 1);
- __ ReturnFromAlignedCall();
- __ lw(cp, frame_->Context());
- // Remove the function from the stack.
- frame_->EmitPush(v0);
-
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- UNIMPLEMENTED_MIPS();
- } else if (property != NULL) {
- UNIMPLEMENTED_MIPS();
- } else {
- UNIMPLEMENTED_MIPS();
- }
-
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// This should generate code that performs a charCodeAt() call or returns
-// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
-// It is not yet implemented on ARM, so it always goes to the slow case.
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() { return true; }
-#endif
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-// -----------------------------------------------------------------------------
-// Reference support
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
-
-
-Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>(String::cast(*raw_name->handle()));
- }
-}
-
-
-void Reference::GetValue() {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- case NAMED: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- case KEYED: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void Reference::SetValue(InitState init_state) {
- ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
- MacroAssembler* masm = cgen_->masm();
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
- cgen_->StoreToSlot(slot, init_state);
- cgen_->UnloadReference(this);
- break;
- }
-
- case NAMED: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- case KEYED: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-// On entry a0 and a1 are the things to be compared. On exit v0 is 0,
-// positive or negative to indicate the result of the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x765);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- UNIMPLEMENTED_MIPS();
- return Handle<Code>::null();
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x790);
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x808);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x815);
-}
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- // s0: number of arguments including receiver (C callee-saved)
- // s1: pointer to the first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
-
- if (do_gc) {
- UNIMPLEMENTED_MIPS();
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
- if (always_allocate) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Call C built-in.
- // a0 = argc, a1 = argv
- __ mov(a0, s0);
- __ mov(a1, s1);
-
- __ CallBuiltin(s2);
-
- if (always_allocate) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Check for failure result.
- Label failure_returned;
- ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ addiu(a2, v0, 1);
- __ andi(t0, a2, kFailureTagMask);
- __ Branch(eq, &failure_returned, t0, Operand(zero_reg));
-
- // Exit C frame and return.
- // v0:v1: result
- // sp: stack pointer
- // fp: frame pointer
- __ LeaveExitFrame(mode_);
-
- // Check if we should retry or throw exception.
- Label retry;
- __ bind(&failure_returned);
- ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
- __ Branch(eq, &retry, t0, Operand(zero_reg));
-
- // Special handling of out of memory exceptions.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ Branch(eq, throw_out_of_memory_exception,
- v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
-
- // Retrieve the pending exception and clear the variable.
- __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
- __ lw(a3, MemOperand(t0));
- __ LoadExternalReference(t0,
- ExternalReference(Top::k_pending_exception_address));
- __ lw(v0, MemOperand(t0));
- __ sw(a3, MemOperand(t0));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ Branch(eq, throw_termination_exception,
- v0, Operand(Factory::termination_exception()));
-
- // Handle normal exception.
- __ b(throw_normal_exception);
- __ nop(); // Branch delay slot nop.
-
- __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
-}
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // a0: number of arguments including receiver
- // a1: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode_, s0, s1, s2);
-
- // s0: number of arguments (C callee-saved)
- // s1: pointer to first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
-
- // Registers:
- // a0: entry address
- // a1: function
- // a2: reveiver
- // a3: argc
- //
- // Stack:
- // 4 args slots
- // args
-
- // Save callee saved registers on the stack.
- __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
-
- // We build an EntryFrame.
- __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ li(t2, Operand(Smi::FromInt(marker)));
- __ li(t1, Operand(Smi::FromInt(marker)));
- __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
- __ lw(t0, MemOperand(t0));
- __ MultiPush(t0.bit() | t1.bit() | t2.bit() | t3.bit());
-
- // Setup frame pointer for the frame to be pushed.
- __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
-
- // Load argv in s0 register.
- __ lw(s0, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize +
- StandardFrameConstants::kCArgsSlotsSize));
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: reveiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // caller fp |
- // function slot | entry frame
- // context slot |
- // bad fp (0xff...f) |
- // callee saved registers + ra
- // 4 args slots
- // args
-
- // Call a faked try-block that does the invoke.
- __ bal(&invoke);
- __ nop(); // Branch delay slot nop.
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- // Coming in here the fp will be invalid because the PushTryHandler below
- // sets it to 0 to signal the existence of the JSEntry frame.
- __ LoadExternalReference(t0,
- ExternalReference(Top::k_pending_exception_address));
- __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
- __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
- __ b(&exit);
- __ nop(); // Branch delay slot nop.
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the bal(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Clear any pending exceptions.
- __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
- __ lw(t1, MemOperand(t0));
- __ LoadExternalReference(t0,
- ExternalReference(Top::k_pending_exception_address));
- __ sw(t1, MemOperand(t0));
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: reveiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // handler frame
- // entry frame
- // callee saved registers + ra
- // 4 args slots
- // args
-
- if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ LoadExternalReference(t0, construct_entry);
- } else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ LoadExternalReference(t0, entry);
- }
- __ lw(t9, MemOperand(t0)); // deref address
-
- // Call JSEntryTrampoline.
- __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
- __ CallBuiltin(t9);
-
- // Unlink this frame from the handler chain. When reading the
- // address of the next handler, there is no need to use the address
- // displacement since the current stack pointer (sp) points directly
- // to the stack handler.
- __ lw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
- __ LoadExternalReference(t0, ExternalReference(Top::k_handler_address));
- __ sw(t1, MemOperand(t0));
-
- // This restores sp to its position before PushTryHandler.
- __ addiu(sp, sp, StackHandlerConstants::kSize);
-
- __ bind(&exit); // v0 holds result
- // Restore the top frame descriptors from the stack.
- __ Pop(t1);
- __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
- __ sw(t1, MemOperand(t0));
-
- // Reset the stack to the callee saved registers.
- __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
-
- // Restore callee saved registers from the stack.
- __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
- // Return.
- __ Jump(ra);
-}
-
-
-// This stub performs an instanceof, calling the builtin function if
-// necessary. Uses a1 for the object, a0 for the function that it may
-// be an instance of (these are fetched from the stack).
-void InstanceofStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x845);
-}
-
-
-void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x851);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x857);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x863);
-}
-
-
-const char* CompareStub::GetName() {
- UNIMPLEMENTED_MIPS();
- return NULL; // UNIMPLEMENTED RETURN
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the two parameters in a unique 16 bit value.
- ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
- return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
-}
-
-
-#undef __
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 66f891bd7..fecd321fa 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,153 +29,25 @@
#ifndef V8_MIPS_CODEGEN_MIPS_H_
#define V8_MIPS_CODEGEN_MIPS_H_
+
+#include "ast.h"
+#include "code-stubs-mips.h"
+#include "ic-inl.h"
+
namespace v8 {
namespace internal {
// Forward declarations
class CompilationInfo;
-class DeferredCode;
-class RegisterAllocator;
-class RegisterFile;
-enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-// -----------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Generate code to pop a reference, push the value of the reference,
- // and then spill the stack frame.
- inline void GetValueAndSpill();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state);
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- // Keep the reference on the stack after get, so it can be used by set later.
- bool persist_after_get_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair). It is threaded through the
-// call stack. Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
- // Create a code generator state based on a code generator's current
- // state. The new state has its own typeof state and pair of branch
- // labels.
- CodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- ~CodeGenState();
-
- TypeofState typeof_state() const { return typeof_state_; }
- JumpTarget* true_target() const { return true_target_; }
- JumpTarget* false_target() const { return false_target_; }
-
- private:
- // The owning code generator.
- CodeGenerator* owner_;
-
- // A flag indicating whether we are compiling the immediate subexpression
- // of a typeof expression.
- TypeofState typeof_state_;
-
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-
- // The previous state of the owning code generator, restored when
- // this state is destroyed.
- CodeGenState* previous_;
-};
-
-
-
-// -----------------------------------------------------------------------------
+// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
public:
- // Compilation mode. Either the compiler is used as the primary
- // compiler and needs to setup everything or the compiler is used as
- // the secondary compiler for split compilation and has to handle
- // bailouts.
- enum Mode {
- PRIMARY,
- SECONDARY
- };
-
- // Takes a function literal, generates code for it. This function should only
- // be called by compiler.cc.
- static Handle<Code> MakeCode(CompilationInfo* info);
+ static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(CompilationInfo* info);
@@ -185,6 +57,9 @@ class CodeGenerator: public AstVisitor {
Code::Flags flags,
CompilationInfo* info);
+ // Print the code after compiling it.
+ static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
@@ -194,234 +69,26 @@ class CodeGenerator: public AstVisitor {
bool is_toplevel,
Handle<Script> script);
- static void RecordPositions(MacroAssembler* masm, int pos);
-
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
-
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
-
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
- static const int kUnknownIntValue = -1;
-
- // Number of instructions used for the JS return sequence. The constant is
- // used by the debugger to patch the JS return sequence.
- static const int kJSReturnSequenceLength = 7;
+ static bool RecordPositions(MacroAssembler* masm,
+ int pos,
+ bool right_here = false);
- // If the name is an inline runtime function call return the number of
- // expected arguments. Otherwise return -1.
- static int InlineRuntimeCallArgumentsCount(Handle<String> name);
-
- private:
- // Construction/Destruction.
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors.
- inline bool is_eval();
- inline Scope* scope();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- // State
- bool has_cc() const { return cc_reg_ != cc_always; }
- TypeofState typeof_state() const { return state_->typeof_state(); }
- JumpTarget* true_target() const { return state_->true_target(); }
- JumpTarget* false_target() const { return state_->false_target(); }
-
- // We don't track loop nesting level on mips yet.
- int loop_nesting() const { return 0; }
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
-#define DEF_VISIT(type) \
- void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Visit a statement and then spill the virtual frame if control flow can
- // reach the end of the statement (ie, it does not exit via break,
- // continue, return, or throw). This function is used temporarily while
- // the code generator is being transformed.
- inline void VisitAndSpill(Statement* statement);
-
- // Visit a list of statements and then spill the virtual frame if control
- // flow can reach the end of the list.
- inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
- void UnloadReference(Reference* ref);
-
- MemOperand ContextOperand(Register context, int index) const {
- return MemOperand(context, Context::SlotOffset(index));
+ // Constants related to patching of inlined load/store.
+ static int GetInlinedKeyedLoadInstructionsAfterPatch() {
+ // This is in correlation with the padding in MacroAssembler::Abort.
+ return FLAG_debug_code ? 45 : 20;
}
- MemOperand SlotOperand(Slot* slot, Register tmp);
+ static const int kInlinedKeyedStoreInstructionsAfterPatch = 13;
- // Expressions
- MemOperand GlobalObject() const {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
+ static int GetInlinedNamedStoreInstructionsAfterPatch() {
+ ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
+ // Magic number 5: instruction count after patched map load:
+ // li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1
+ return Isolate::Current()->inlined_write_barrier_size() + 5;
}
- void LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc);
- void Load(Expression* x);
- void LoadGlobal();
-
- // Generate code to push the value of an expression on top of the frame
- // and then spill the frame fully to memory. This function is used
- // temporarily while the code generator is being transformed.
- inline void LoadAndSpill(Expression* expression);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- // Store the value on top of the stack to a slot.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- struct InlineRuntimeLUT {
- void (CodeGenerator::*method)(ZoneList<Expression*>*);
- const char* name;
- int nargs;
- };
-
- static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- static Handle<Code> ComputeLazyCompile(int argc);
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Support for type checks.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateStringAdd(ZoneList<Expression*>* args);
- void GenerateSubString(ZoneList<Expression*>* args);
- void GenerateStringCompare(ZoneList<Expression*>* args);
- void GenerateRegExpExec(ZoneList<Expression*>* args);
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* node);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block.
- bool HasValidEntryRegisters();
-#endif
-
- bool is_eval_; // Tells whether code is generated for eval.
-
- Handle<Script> script_;
- List<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- Condition cc_reg_;
- CodeGenState* state_;
-
- // Jump targets
- BreakTarget function_return_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- static InlineRuntimeLUT kInlineRuntimeLUT[];
-
- friend class VirtualFrame;
- friend class JumpTarget;
- friend class Reference;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
-
+ private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index 49502bdec..96a23338d 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,14 +31,12 @@
#include "constants-mips.h"
-namespace assembler {
-namespace mips {
-
-namespace v8i = v8::internal;
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
-// Registers
+// Registers.
// These register names are defined in a way to match the native disassembler
@@ -102,20 +100,20 @@ int Registers::Number(const char* name) {
}
-const char* FPURegister::names_[kNumFPURegister] = {
+const char* FPURegisters::names_[kNumFPURegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
"f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
};
// List of alias names which can be used when referring to MIPS registers.
-const FPURegister::RegisterAlias FPURegister::aliases_[] = {
+const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
{kInvalidRegister, NULL}
};
-const char* FPURegister::Name(int creg) {
+const char* FPURegisters::Name(int creg) {
const char* result;
- if ((0 <= creg) && (creg < kNumFPURegister)) {
+ if ((0 <= creg) && (creg < kNumFPURegisters)) {
result = names_[creg];
} else {
result = "nocreg";
@@ -124,9 +122,9 @@ const char* FPURegister::Name(int creg) {
}
-int FPURegister::Number(const char* name) {
+int FPURegisters::Number(const char* name) {
// Look through the canonical names.
- for (int i = 0; i < kNumSimuRegisters; i++) {
+ for (int i = 0; i < kNumFPURegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
@@ -147,10 +145,10 @@ int FPURegister::Number(const char* name) {
// -----------------------------------------------------------------------------
-// Instruction
+// Instructions.
-bool Instruction::IsForbiddenInBranchDelay() {
- int op = OpcodeFieldRaw();
+bool Instruction::IsForbiddenInBranchDelay() const {
+ const int op = OpcodeFieldRaw();
switch (op) {
case J:
case JAL:
@@ -189,13 +187,18 @@ bool Instruction::IsForbiddenInBranchDelay() {
}
-bool Instruction::IsLinkingInstruction() {
- int op = OpcodeFieldRaw();
+bool Instruction::IsLinkingInstruction() const {
+ const int op = OpcodeFieldRaw();
switch (op) {
case JAL:
- case BGEZAL:
- case BLTZAL:
- return true;
+ case REGIMM:
+ switch (RtFieldRaw()) {
+ case BGEZAL:
+ case BLTZAL:
+ return true;
+ default:
+ return false;
+ };
case SPECIAL:
switch (FunctionFieldRaw()) {
case JALR:
@@ -209,7 +212,7 @@ bool Instruction::IsLinkingInstruction() {
}
-bool Instruction::IsTrap() {
+bool Instruction::IsTrap() const {
if (OpcodeFieldRaw() != SPECIAL) {
return false;
} else {
@@ -264,6 +267,9 @@ Instruction::Type Instruction::InstructionType() const {
case TLTU:
case TEQ:
case TNE:
+ case MOVZ:
+ case MOVN:
+ case MOVCI:
return kRegisterType;
default:
UNREACHABLE();
@@ -272,20 +278,30 @@ Instruction::Type Instruction::InstructionType() const {
case SPECIAL2:
switch (FunctionFieldRaw()) {
case MUL:
+ case CLZ:
return kRegisterType;
default:
UNREACHABLE();
};
break;
- case COP1: // Coprocessor instructions
+ case SPECIAL3:
switch (FunctionFieldRaw()) {
- case BC1: // branch on coprocessor condition
+ case INS:
+ case EXT:
+ return kRegisterType;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ case COP1: // Coprocessor instructions.
+ switch (RsFieldRawNoAssert()) {
+ case BC1: // Branch on coprocessor condition.
return kImmediateType;
default:
return kRegisterType;
};
break;
- // 16 bits Immediate type instructions. eg: addi dest, src, imm16
+ // 16 bits Immediate type instructions. eg: addi dest, src, imm16.
case REGIMM:
case BEQ:
case BNE:
@@ -304,16 +320,23 @@ Instruction::Type Instruction::InstructionType() const {
case BLEZL:
case BGTZL:
case LB:
+ case LH:
+ case LWL:
case LW:
case LBU:
+ case LHU:
+ case LWR:
case SB:
+ case SH:
+ case SWL:
case SW:
+ case SWR:
case LWC1:
case LDC1:
case SWC1:
case SDC1:
return kImmediateType;
- // 26 bits immediate type instructions. eg: j imm26
+ // 26 bits immediate type instructions. eg: j imm26.
case J:
case JAL:
return kJumpType;
@@ -323,6 +346,7 @@ Instruction::Type Instruction::InstructionType() const {
return kUnsupported;
}
-} } // namespace assembler::mips
+
+} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index d0fdf88db..6bf2570eb 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,15 +28,38 @@
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
-#include "checks.h"
-
// UNIMPLEMENTED_ macro for MIPS.
+#ifdef DEBUG
#define UNIMPLEMENTED_MIPS() \
v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
__FILE__, __LINE__, __func__)
+#else
+#define UNIMPLEMENTED_MIPS()
+#endif
+
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
+#ifdef _MIPS_ARCH_MIPS32R2
+ #define mips32r2 1
+#else
+ #define mips32r2 0
+#endif
+
+
+#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+// Use floating-point coprocessor instructions. This flag is raised when
+// -mhard-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = false;
+#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
+// Not using floating-point coprocessor instructions. This flag is raised when
+// -msoft-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = true;
+#else
+static const bool IsMipsSoftFloatABI = true;
+#endif
+
+
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
//
@@ -44,11 +67,11 @@
// Volume II: The MIPS32 Instruction Set
// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
-namespace assembler {
-namespace mips {
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
-// Registers and FPURegister.
+// Registers and FPURegisters.
// Number of general purpose registers.
static const int kNumRegisters = 32;
@@ -61,9 +84,37 @@ static const int kNumSimuRegisters = 35;
static const int kPCRegister = 34;
// Number coprocessor registers.
-static const int kNumFPURegister = 32;
+static const int kNumFPURegisters = 32;
static const int kInvalidFPURegister = -1;
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+static const int kFCSRRegister = 31;
+static const int kInvalidFPUControlRegister = -1;
+static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
+
+// FCSR constants.
+static const uint32_t kFCSRInexactFlagBit = 2;
+static const uint32_t kFCSRUnderflowFlagBit = 3;
+static const uint32_t kFCSROverflowFlagBit = 4;
+static const uint32_t kFCSRDivideByZeroFlagBit = 5;
+static const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+static const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+static const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+static const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+static const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+static const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+static const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask |
+ kFCSRUnderflowFlagMask |
+ kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask |
+ kFCSRInvalidOpFlagMask;
+
+static const uint32_t kFCSRExceptionFlagMask =
+ kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
// Helper functions for converting between register numbers and names.
class Registers {
public:
@@ -82,13 +133,12 @@ class Registers {
static const int32_t kMinValue = 0x80000000;
private:
-
static const char* names_[kNumSimuRegisters];
static const RegisterAlias aliases_[];
};
// Helper functions for converting between register numbers and names.
-class FPURegister {
+class FPURegisters {
public:
// Return the name of the register.
static const char* Name(int reg);
@@ -102,8 +152,7 @@ class FPURegister {
};
private:
-
- static const char* names_[kNumFPURegister];
+ static const char* names_[kNumFPURegisters];
static const RegisterAlias aliases_[];
};
@@ -114,8 +163,6 @@ class FPURegister {
// On MIPS all instructions are 32 bits.
typedef int32_t Instr;
-typedef unsigned char byte_;
-
// Special Software Interrupt codes when used in the presence of the MIPS
// simulator.
enum SoftwareInterruptCodes {
@@ -123,6 +170,18 @@ enum SoftwareInterruptCodes {
call_rt_redirected = 0xfffff
};
+// On MIPS Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+static const uint32_t kMaxWatchpointCode = 31;
+static const uint32_t kMaxStopCode = 127;
+STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
+
+
// ----- Fields offset and length.
static const int kOpcodeShift = 26;
static const int kOpcodeBits = 6;
@@ -136,22 +195,34 @@ static const int kSaShift = 6;
static const int kSaBits = 5;
static const int kFunctionShift = 0;
static const int kFunctionBits = 6;
+static const int kLuiShift = 16;
static const int kImm16Shift = 0;
static const int kImm16Bits = 16;
static const int kImm26Shift = 0;
static const int kImm26Bits = 26;
+static const int kImm28Shift = 0;
+static const int kImm28Bits = 28;
static const int kFsShift = 11;
static const int kFsBits = 5;
static const int kFtShift = 16;
static const int kFtBits = 5;
-
-// ----- Miscellianous useful masks.
+static const int kFdShift = 6;
+static const int kFdBits = 5;
+static const int kFCccShift = 8;
+static const int kFCccBits = 3;
+static const int kFBccShift = 18;
+static const int kFBccBits = 3;
+static const int kFBtrueShift = 16;
+static const int kFBtrueBits = 1;
+
+// ----- Miscellaneous useful masks.
// Instruction bit masks.
static const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
static const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
static const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+static const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
static const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
static const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
static const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
@@ -159,9 +230,9 @@ static const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
static const int kFunctionFieldMask =
((1 << kFunctionBits) - 1) << kFunctionShift;
// Misc masks.
-static const int HIMask = 0xffff << 16;
-static const int LOMask = 0xffff;
-static const int signMask = 0x80000000;
+static const int kHiMask = 0xffff << 16;
+static const int kLoMask = 0xffff;
+static const int kSignMask = 0x80000000;
// ----- MIPS Opcodes and Function Fields.
@@ -187,19 +258,27 @@ enum Opcode {
XORI = ((1 << 3) + 6) << kOpcodeShift,
LUI = ((1 << 3) + 7) << kOpcodeShift,
- COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class
+ COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
BEQL = ((2 << 3) + 4) << kOpcodeShift,
BNEL = ((2 << 3) + 5) << kOpcodeShift,
BLEZL = ((2 << 3) + 6) << kOpcodeShift,
BGTZL = ((2 << 3) + 7) << kOpcodeShift,
SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
+ SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
LB = ((4 << 3) + 0) << kOpcodeShift,
+ LH = ((4 << 3) + 1) << kOpcodeShift,
+ LWL = ((4 << 3) + 2) << kOpcodeShift,
LW = ((4 << 3) + 3) << kOpcodeShift,
LBU = ((4 << 3) + 4) << kOpcodeShift,
+ LHU = ((4 << 3) + 5) << kOpcodeShift,
+ LWR = ((4 << 3) + 6) << kOpcodeShift,
SB = ((5 << 3) + 0) << kOpcodeShift,
+ SH = ((5 << 3) + 1) << kOpcodeShift,
+ SWL = ((5 << 3) + 2) << kOpcodeShift,
SW = ((5 << 3) + 3) << kOpcodeShift,
+ SWR = ((5 << 3) + 6) << kOpcodeShift,
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
@@ -216,9 +295,12 @@ enum SecondaryField {
SLLV = ((0 << 3) + 4),
SRLV = ((0 << 3) + 6),
SRAV = ((0 << 3) + 7),
+ MOVCI = ((0 << 3) + 1),
JR = ((1 << 3) + 0),
JALR = ((1 << 3) + 1),
+ MOVZ = ((1 << 3) + 2),
+ MOVN = ((1 << 3) + 3),
BREAK = ((1 << 3) + 5),
MFHI = ((2 << 3) + 0),
@@ -250,6 +332,12 @@ enum SecondaryField {
// SPECIAL2 Encoding of Function Field.
MUL = ((0 << 3) + 2),
+ CLZ = ((4 << 3) + 0),
+ CLO = ((4 << 3) + 1),
+
+ // SPECIAL3 Encoding of Function Field.
+ EXT = ((0 << 3) + 0),
+ INS = ((0 << 3) + 4),
// REGIMM encoding of rt Field.
BLTZ = ((0 << 3) + 0) << 16,
@@ -259,8 +347,10 @@ enum SecondaryField {
// COP1 Encoding of rs Field.
MFC1 = ((0 << 3) + 0) << 21,
+ CFC1 = ((0 << 3) + 2) << 21,
MFHC1 = ((0 << 3) + 3) << 21,
MTC1 = ((0 << 3) + 4) << 21,
+ CTC1 = ((0 << 3) + 6) << 21,
MTHC1 = ((0 << 3) + 7) << 21,
BC1 = ((1 << 3) + 0) << 21,
S = ((2 << 3) + 0) << 21,
@@ -269,14 +359,46 @@ enum SecondaryField {
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
+ ROUND_L_S = ((1 << 3) + 0),
+ TRUNC_L_S = ((1 << 3) + 1),
+ CEIL_L_S = ((1 << 3) + 2),
+ FLOOR_L_S = ((1 << 3) + 3),
+ ROUND_W_S = ((1 << 3) + 4),
+ TRUNC_W_S = ((1 << 3) + 5),
+ CEIL_W_S = ((1 << 3) + 6),
+ FLOOR_W_S = ((1 << 3) + 7),
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
CVT_PS_S = ((4 << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
+ ADD_D = ((0 << 3) + 0),
+ SUB_D = ((0 << 3) + 1),
+ MUL_D = ((0 << 3) + 2),
+ DIV_D = ((0 << 3) + 3),
+ SQRT_D = ((0 << 3) + 4),
+ ABS_D = ((0 << 3) + 5),
+ MOV_D = ((0 << 3) + 6),
+ NEG_D = ((0 << 3) + 7),
+ ROUND_L_D = ((1 << 3) + 0),
+ TRUNC_L_D = ((1 << 3) + 1),
+ CEIL_L_D = ((1 << 3) + 2),
+ FLOOR_L_D = ((1 << 3) + 3),
+ ROUND_W_D = ((1 << 3) + 4),
+ TRUNC_W_D = ((1 << 3) + 5),
+ CEIL_W_D = ((1 << 3) + 6),
+ FLOOR_W_D = ((1 << 3) + 7),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
+ C_F_D = ((6 << 3) + 0),
+ C_UN_D = ((6 << 3) + 1),
+ C_EQ_D = ((6 << 3) + 2),
+ C_UEQ_D = ((6 << 3) + 3),
+ C_OLT_D = ((6 << 3) + 4),
+ C_ULT_D = ((6 << 3) + 5),
+ C_OLE_D = ((6 << 3) + 6),
+ C_ULE_D = ((6 << 3) + 7),
// COP1 Encoding of Function Field When rs=W or L.
CVT_S_W = ((4 << 3) + 0),
CVT_D_W = ((4 << 3) + 1),
@@ -293,7 +415,7 @@ enum SecondaryField {
// the 'U' prefix is used to specify unsigned comparisons.
enum Condition {
// Any value < 0 is considered no_condition.
- no_condition = -1,
+ kNoCondition = -1,
overflow = 0,
no_overflow = 1,
@@ -314,32 +436,119 @@ enum Condition {
cc_always = 16,
- // aliases
+ // Aliases.
carry = Uless,
not_carry = Ugreater_equal,
zero = equal,
eq = equal,
not_zero = not_equal,
ne = not_equal,
+ nz = not_equal,
sign = negative,
not_sign = positive,
-
- cc_default = no_condition
+ mi = negative,
+ pl = positive,
+ hi = Ugreater,
+ ls = Uless_equal,
+ ge = greater_equal,
+ lt = less,
+ gt = greater,
+ le = less_equal,
+ hs = Ugreater_equal,
+ lo = Uless,
+ al = cc_always,
+
+ cc_default = kNoCondition
};
+
+// Returns the equivalent of !cc.
+// Negation of the default kNoCondition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc) {
+ ASSERT(cc != cc_always);
+ return static_cast<Condition>(cc ^ 1);
+}
+
+
+inline Condition ReverseCondition(Condition cc) {
+ switch (cc) {
+ case Uless:
+ return Ugreater;
+ case Ugreater:
+ return Uless;
+ case Ugreater_equal:
+ return Uless_equal;
+ case Uless_equal:
+ return Ugreater_equal;
+ case less:
+ return greater;
+ case greater:
+ return less;
+ case greater_equal:
+ return less_equal;
+ case less_equal:
+ return greater_equal;
+ default:
+ return cc;
+ };
+}
+
+
// ----- Coprocessor conditions.
enum FPUCondition {
- F, // False
- UN, // Unordered
- EQ, // Equal
- UEQ, // Unordered or Equal
- OLT, // Ordered or Less Than
- ULT, // Unordered or Less Than
- OLE, // Ordered or Less Than or Equal
- ULE // Unordered or Less Than or Equal
+ F, // False.
+ UN, // Unordered.
+ EQ, // Equal.
+ UEQ, // Unordered or Equal.
+ OLT, // Ordered or Less Than.
+ ULT, // Unordered or Less Than.
+ OLE, // Ordered or Less Than or Equal.
+ ULE // Unordered or Less Than or Equal.
};
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the MIPS. They are defined so that they can
+// appear in shared function signatures, but will be ignored in MIPS
+// implementations.
+enum Hint {
+ no_hint = 0
+};
+
+
+inline Hint NegateHint(Hint hint) {
+ return no_hint;
+}
+
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-mips.cc, as they use named
+// registers and other constants.
+
+// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
+// operations as post-increment of sp.
+extern const Instr kPopInstruction;
+// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
+extern const Instr kPushInstruction;
+// sw(r, MemOperand(sp, 0))
+extern const Instr kPushRegPattern;
+// lw(r, MemOperand(sp, 0))
+extern const Instr kPopRegPattern;
+extern const Instr kLwRegFpOffsetPattern;
+extern const Instr kSwRegFpOffsetPattern;
+extern const Instr kLwRegFpNegOffsetPattern;
+extern const Instr kSwRegFpNegOffsetPattern;
+// A mask for the Rt register for push, pop, lw, sw instructions.
+extern const Instr kRtMask;
+extern const Instr kLwSwInstrTypeMask;
+extern const Instr kLwSwInstrArgumentMask;
+extern const Instr kLwSwOffsetMask;
+
// Break 0xfffff, reserved for redirected real time call.
const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
// A nop instruction. (Encoding of sll 0 0 0).
@@ -348,10 +557,10 @@ const Instr nopInstr = 0;
class Instruction {
public:
enum {
- kInstructionSize = 4,
- kInstructionSizeLog2 = 2,
+ kInstrSize = 4,
+ kInstrSizeLog2 = 2,
// On MIPS PC cannot actually be directly accessed. We behave as if PC was
- // always the value of the current instruction being exectued.
+ // always the value of the current instruction being executed.
kPCReadOffset = 0
};
@@ -388,45 +597,64 @@ class Instruction {
// Accessors for the different named fields used in the MIPS encoding.
- inline Opcode OpcodeField() const {
+ inline Opcode OpcodeValue() const {
return static_cast<Opcode>(
Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
}
- inline int RsField() const {
+ inline int RsValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRsShift + kRsBits - 1, kRsShift);
}
- inline int RtField() const {
+ inline int RtValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRtShift + kRtBits - 1, kRtShift);
}
- inline int RdField() const {
+ inline int RdValue() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kRdShift + kRdBits - 1, kRdShift);
}
- inline int SaField() const {
+ inline int SaValue() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kSaShift + kSaBits - 1, kSaShift);
}
- inline int FunctionField() const {
+ inline int FunctionValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
}
- inline int FsField() const {
- return Bits(kFsShift + kRsBits - 1, kFsShift);
+ inline int FdValue() const {
+ return Bits(kFdShift + kFdBits - 1, kFdShift);
+ }
+
+ inline int FsValue() const {
+ return Bits(kFsShift + kFsBits - 1, kFsShift);
+ }
+
+ inline int FtValue() const {
+ return Bits(kFtShift + kFtBits - 1, kFtShift);
+ }
+
+ // Float Compare condition code instruction bits.
+ inline int FCccValue() const {
+ return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
+ }
+
+ // Float Branch condition code instruction bits.
+ inline int FBccValue() const {
+ return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
}
- inline int FtField() const {
- return Bits(kFtShift + kRsBits - 1, kFtShift);
+ // Float Branch true/false instruction bit.
+ inline int FBtrueValue() const {
+ return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
}
// Return the fields at their original place in the instruction encoding.
@@ -440,6 +668,11 @@ class Instruction {
return InstructionBits() & kRsFieldMask;
}
+ // Same as above function, but safe to call within InstructionType().
+ inline int RsFieldRawNoAssert() const {
+ return InstructionBits() & kRsFieldMask;
+ }
+
inline int RtFieldRaw() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
@@ -461,43 +694,43 @@ class Instruction {
}
// Get the secondary field according to the opcode.
- inline int SecondaryField() const {
+ inline int SecondaryValue() const {
Opcode op = OpcodeFieldRaw();
switch (op) {
case SPECIAL:
case SPECIAL2:
- return FunctionField();
+ return FunctionValue();
case COP1:
- return RsField();
+ return RsValue();
case REGIMM:
- return RtField();
+ return RtValue();
default:
return NULLSF;
}
}
- inline int32_t Imm16Field() const {
+ inline int32_t Imm16Value() const {
ASSERT(InstructionType() == kImmediateType);
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
- inline int32_t Imm26Field() const {
+ inline int32_t Imm26Value() const {
ASSERT(InstructionType() == kJumpType);
return Bits(kImm16Shift + kImm26Bits - 1, kImm26Shift);
}
// Say if the instruction should not be used in a branch delay slot.
- bool IsForbiddenInBranchDelay();
+ bool IsForbiddenInBranchDelay() const;
// Say if the instruction 'links'. eg: jal, bal.
- bool IsLinkingInstruction();
+ bool IsLinkingInstruction() const;
// Say if the instruction is a break or a trap.
- bool IsTrap();
+ bool IsTrap() const;
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instruction.
// Use the At(pc) function to create references to Instruction.
- static Instruction* At(byte_* pc) {
+ static Instruction* At(byte* pc) {
return reinterpret_cast<Instruction*>(pc);
}
@@ -510,16 +743,23 @@ class Instruction {
// -----------------------------------------------------------------------------
// MIPS assembly various constants.
-static const int kArgsSlotsSize = 4 * Instruction::kInstructionSize;
+
+static const int kArgsSlotsSize = 4 * Instruction::kInstrSize;
static const int kArgsSlotsNum = 4;
+// C/C++ argument slots size.
+static const int kCArgsSlotsSize = 4 * Instruction::kInstrSize;
+// JS argument slots size.
+static const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
+// Assembly builtins argument slots size.
+static const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
-static const int kBranchReturnOffset = 2 * Instruction::kInstructionSize;
+static const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
-static const int kDoubleAlignment = 2 * 8;
-static const int kDoubleAlignmentMask = kDoubleAlignmentMask - 1;
+static const int kDoubleAlignmentBits = 3;
+static const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
+static const int kDoubleAlignmentMask = kDoubleAlignment - 1;
-} } // namespace assembler::mips
+} } // namespace v8::internal
#endif // #ifndef V8_MIPS_CONSTANTS_H_
-
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index 659fc01ce..26e95fb24 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -39,26 +39,48 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "cpu.h"
+#include "macro-assembler.h"
+
+#include "simulator.h" // For cache flushing.
namespace v8 {
namespace internal {
+
void CPU::Setup() {
- // Nothing to do.
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return CpuFeatures::IsSupported(FPU);
}
+
void CPU::FlushICache(void* start, size_t size) {
-#ifdef __mips
+ // Nothing to do, flushing no instructions.
+ if (size == 0) {
+ return;
+ }
+
+#if !defined (USE_SIMULATOR)
int res;
- // See http://www.linux-mips.org/wiki/Cacheflush_Syscall
+ // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
if (res) {
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
}
-#endif // #ifdef __mips
+#else // USE_SIMULATOR.
+ // Not generating mips instructions for C-code. This means that we are
+ // building a mips emulator based target. We should notify the simulator
+ // that the Icache was flushed.
+ // None of this code ends up in the snapshot so there are no issues
+ // around whether or not to generate the code when building snapshots.
+ Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
+#endif // USE_SIMULATOR.
}
@@ -68,6 +90,7 @@ void CPU::DebugBreak() {
#endif // #ifdef __mips
}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index b8ae68e39..e323c505e 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,78 +31,258 @@
#if defined(V8_TARGET_ARCH_MIPS)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
namespace v8 {
namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
+
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
- UNIMPLEMENTED_MIPS();
+ // Mips return sequence:
+ // mov sp, fp
+ // lw fp, sp(0)
+ // lw ra, sp(4)
+ // addiu sp, sp, 8
+ // addiu sp, sp, N
+ // jr ra
+ // nop (in branch delay slot)
+
+ // Make sure this constant matches the number if instrucntions we emit.
+ ASSERT(Assembler::kJSReturnSequenceInstructions == 7);
+ CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+ // li and Call pseudo-instructions emit two instructions each.
+ patcher.masm()->li(v8::internal::t9,
+ Operand(reinterpret_cast<int32_t>(
+ Isolate::Current()->debug()->debug_break_return()->entry())));
+ patcher.masm()->Call(v8::internal::t9);
+ patcher.masm()->nop();
+ patcher.masm()->nop();
+ patcher.masm()->nop();
+
+ // TODO(mips): Open issue about using breakpoint instruction instead of nops.
+ // patcher.masm()->bkpt(0);
}
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
- UNIMPLEMENTED_MIPS();
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kJSReturnSequenceInstructions);
}
-// A debug break in the exit code is identified by a call.
+// A debug break in the exit code is identified by the JS frame exit code
+// having been patched with li/call psuedo-instrunction (liu/ori/jalr).
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Patch the code changing the debug break slot code from:
+ // nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // to a call to the debug break slot code.
+ // li t9, address (lui t9 / ori t9 instruction pair)
+ // call t9 (jalr t9 / nop instruction pair)
+ CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+ patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
+ Isolate::Current()->debug()->debug_break_slot()->entry())));
+ patcher.masm()->Call(v8::internal::t9);
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kDebugBreakSlotInstructions);
+}
+
+
#define __ ACCESS_MASM(masm)
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList object_regs,
+ RegList non_object_regs) {
+ __ EnterInternalFrame();
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ And(at, reg, 0xc0000000);
+ __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
+ }
+ __ sll(reg, reg, kSmiTagSize);
+ }
+ }
+ __ MultiPush(object_regs | non_object_regs);
+ }
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ mov(a0, zero_reg); // No arguments.
+ __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ MultiPop(object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ srl(reg, reg, kSmiTagSize);
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ li(reg, kDebugZapValue);
+ }
+ }
+ }
+
+ __ LeaveInternalFrame();
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ __ li(t9, Operand(
+ ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate())));
+ __ lw(t9, MemOperand(t9));
+ __ Jump(t9);
+}
+
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Calling convention for IC load (from ic-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- [sp] : receiver
+ // -----------------------------------
+ // Registers a0 and a2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a2.bit(), 0);
}
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Calling convention for IC store (from ic-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ // Registers a0, a1, and a2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
}
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit(), 0);
}
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Calling convention for IC call (from ic-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a2: name
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a2.bit(), 0);
}
void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Calling convention for construct call (from builtins-mips.cc).
+ // -- a0 : number of arguments (not smi)
+ // -- a1 : constructor function
+ Generate_DebugBreakCallHelper(masm, a1.bit(), a0.bit());
}
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // In places other than IC call sites it is expected that v0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, v0.bit(), 0);
}
void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // No registers used on entry.
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, 0, 0);
+}
+
+
+void Debug::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the trampoline pool in the debug break slot code.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+ }
+ ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
+ masm->InstructionsGeneratedSince(&check_codesize));
+}
+
+
+void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, 0);
}
@@ -110,6 +290,7 @@ void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on mips");
}
+
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on mips");
}
diff --git a/deps/v8/src/arm/register-allocator-arm.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 3b35574da..4b69859a4 100644
--- a/deps/v8/src/arm/register-allocator-arm.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,37 +27,65 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_ARM)
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
namespace v8 {
namespace internal {
-// -------------------------------------------------------------------------
-// Result implementation.
-void Result::ToRegister() {
+int Deoptimizer::table_entry_size_ = 10;
+
+
+int Deoptimizer::patch_size() {
+ const int kCallInstructionSizeInWords = 3;
+ return kCallInstructionSizeInWords * Assembler::kInstrSize;
+}
+
+
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
UNIMPLEMENTED();
}
-void Result::ToRegister(Register target) {
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
UNIMPLEMENTED();
}
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
+void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
+ UNIMPLEMENTED();
+}
+
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- // No byte registers on ARM.
- UNREACHABLE();
- return Result();
+void Deoptimizer::DoComputeOsrOutputFrame() {
+ UNIMPLEMENTED();
}
-} } // namespace v8::internal
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+ int frame_index) {
+ UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::EntryGenerator::Generate() {
+ UNIMPLEMENTED();
+}
+
-#endif // V8_TARGET_ARCH_ARM
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ UNIMPLEMENTED();
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 959a4a220..7df5c4175 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,11 +33,10 @@
//
// NameConverter converter;
// Disassembler d(converter);
-// for (byte_* pc = begin; pc < end;) {
-// char buffer[128];
-// buffer[0] = '\0';
-// byte_* prev_pc = pc;
-// pc += d.InstructionDecode(buffer, sizeof buffer, pc);
+// for (byte* pc = begin; pc < end;) {
+// v8::internal::EmbeddedVector<char, 256> buffer;
+// byte* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, pc);
// printf("%p %08x %s\n",
// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
// }
@@ -59,17 +58,13 @@
#if defined(V8_TARGET_ARCH_MIPS)
-#include "constants-mips.h"
+#include "mips/constants-mips.h"
#include "disasm.h"
#include "macro-assembler.h"
#include "platform.h"
-namespace assembler {
-namespace mips {
-
-
-namespace v8i = v8::internal;
-
+namespace v8 {
+namespace internal {
//------------------------------------------------------------------------------
@@ -90,7 +85,7 @@ class Decoder {
// Writes one disassembled instruction into 'buffer' (0-terminated).
// Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(byte_* instruction);
+ int InstructionDecode(byte* instruction);
private:
// Bottleneck functions to print into the out_buffer.
@@ -99,7 +94,7 @@ class Decoder {
// Printing of common values.
void PrintRegister(int reg);
- void PrintCRegister(int creg);
+ void PrintFPURegister(int freg);
void PrintRs(Instruction* instr);
void PrintRt(Instruction* instr);
void PrintRd(Instruction* instr);
@@ -107,6 +102,11 @@ class Decoder {
void PrintFt(Instruction* instr);
void PrintFd(Instruction* instr);
void PrintSa(Instruction* instr);
+ void PrintSd(Instruction* instr);
+ void PrintSs1(Instruction* instr);
+ void PrintSs2(Instruction* instr);
+ void PrintBc(Instruction* instr);
+ void PrintCc(Instruction* instr);
void PrintFunction(Instruction* instr);
void PrintSecondaryField(Instruction* instr);
void PrintUImm16(Instruction* instr);
@@ -119,7 +119,7 @@ class Decoder {
// Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option);
- int FormatCRegister(Instruction* instr, const char* option);
+ int FormatFPURegister(Instruction* instr, const char* option);
int FormatOption(Instruction* instr, const char* option);
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
@@ -166,84 +166,116 @@ void Decoder::PrintRegister(int reg) {
void Decoder::PrintRs(Instruction* instr) {
- int reg = instr->RsField();
+ int reg = instr->RsValue();
PrintRegister(reg);
}
void Decoder::PrintRt(Instruction* instr) {
- int reg = instr->RtField();
+ int reg = instr->RtValue();
PrintRegister(reg);
}
void Decoder::PrintRd(Instruction* instr) {
- int reg = instr->RdField();
+ int reg = instr->RdValue();
PrintRegister(reg);
}
-// Print the Cregister name according to the active name converter.
-void Decoder::PrintCRegister(int creg) {
- Print(converter_.NameOfXMMRegister(creg));
+// Print the FPUregister name according to the active name converter.
+void Decoder::PrintFPURegister(int freg) {
+ Print(converter_.NameOfXMMRegister(freg));
}
void Decoder::PrintFs(Instruction* instr) {
- int creg = instr->RsField();
- PrintCRegister(creg);
+ int freg = instr->RsValue();
+ PrintFPURegister(freg);
}
void Decoder::PrintFt(Instruction* instr) {
- int creg = instr->RtField();
- PrintCRegister(creg);
+ int freg = instr->RtValue();
+ PrintFPURegister(freg);
}
void Decoder::PrintFd(Instruction* instr) {
- int creg = instr->RdField();
- PrintCRegister(creg);
+ int freg = instr->RdValue();
+ PrintFPURegister(freg);
}
// Print the integer value of the sa field.
void Decoder::PrintSa(Instruction* instr) {
- int sa = instr->SaField();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", sa);
+ int sa = instr->SaValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+
+// Print the integer value of the rd field, when it is not used as reg.
+void Decoder::PrintSd(Instruction* instr) {
+ int sd = instr->RdValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
+}
+
+
+// Print the integer value of the rd field, when used as 'ext' size.
+void Decoder::PrintSs1(Instruction* instr) {
+ int ss = instr->RdValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
+}
+
+
+// Print the integer value of the rd field, when used as 'ins' size.
+void Decoder::PrintSs2(Instruction* instr) {
+ int ss = instr->RdValue();
+ int pos = instr->SaValue();
+ out_buffer_pos_ +=
+ OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
+}
+
+
+// Print the integer value of the cc field for the bc1t/f instructions.
+void Decoder::PrintBc(Instruction* instr) {
+ int cc = instr->FBccValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
+}
+
+
+// Print the integer value of the cc field for the FP compare instructions.
+void Decoder::PrintCc(Instruction* instr) {
+ int cc = instr->FCccValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
}
// Print 16-bit unsigned immediate value.
void Decoder::PrintUImm16(Instruction* instr) {
- int32_t imm = instr->Imm16Field();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%u", imm);
+ int32_t imm = instr->Imm16Value();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
}
// Print 16-bit signed immediate value.
void Decoder::PrintSImm16(Instruction* instr) {
- int32_t imm = ((instr->Imm16Field())<<16)>>16;
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", imm);
+ int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
// Print 16-bit hexa immediate value.
void Decoder::PrintXImm16(Instruction* instr) {
- int32_t imm = instr->Imm16Field();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "0x%x", imm);
+ int32_t imm = instr->Imm16Value();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
// Print 26-bit immediate value.
void Decoder::PrintImm26(Instruction* instr) {
- int32_t imm = instr->Imm26Field();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", imm);
+ int32_t imm = instr->Imm26Value();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
@@ -254,8 +286,8 @@ void Decoder::PrintCode(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case BREAK: {
int32_t code = instr->Bits(25, 6);
- out_buffer_pos_ +=
- v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%05x", code);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "0x%05x (%d)", code, code);
break;
}
case TGE:
@@ -266,7 +298,7 @@ void Decoder::PrintCode(Instruction* instr) {
case TNE: {
int32_t code = instr->Bits(15, 6);
out_buffer_pos_ +=
- v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
+ OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
break;
}
default: // Not a break or trap instruction.
@@ -284,16 +316,16 @@ void Decoder::PrintInstructionName(Instruction* instr) {
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'r');
- if (format[1] == 's') { // 'rs: Rs register
- int reg = instr->RsField();
+ if (format[1] == 's') { // 'rs: Rs register.
+ int reg = instr->RsValue();
PrintRegister(reg);
return 2;
- } else if (format[1] == 't') { // 'rt: rt register
- int reg = instr->RtField();
+ } else if (format[1] == 't') { // 'rt: rt register.
+ int reg = instr->RtValue();
PrintRegister(reg);
return 2;
- } else if (format[1] == 'd') { // 'rd: rd register
- int reg = instr->RdField();
+ } else if (format[1] == 'd') { // 'rd: rd register.
+ int reg = instr->RdValue();
PrintRegister(reg);
return 2;
}
@@ -302,21 +334,21 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
}
-// Handle all Cregister based formatting in this function to reduce the
+// Handle all FPUregister based formatting in this function to reduce the
// complexity of FormatOption.
-int Decoder::FormatCRegister(Instruction* instr, const char* format) {
+int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'f');
- if (format[1] == 's') { // 'fs: fs register
- int reg = instr->RsField();
- PrintCRegister(reg);
+ if (format[1] == 's') { // 'fs: fs register.
+ int reg = instr->FsValue();
+ PrintFPURegister(reg);
return 2;
- } else if (format[1] == 't') { // 'ft: ft register
- int reg = instr->RtField();
- PrintCRegister(reg);
+ } else if (format[1] == 't') { // 'ft: ft register.
+ int reg = instr->FtValue();
+ PrintFPURegister(reg);
return 2;
- } else if (format[1] == 'd') { // 'fd: fd register
- int reg = instr->RdField();
- PrintCRegister(reg);
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPURegister(reg);
return 2;
}
UNREACHABLE();
@@ -331,12 +363,12 @@ int Decoder::FormatCRegister(Instruction* instr, const char* format) {
// characters that were consumed from the formatting string.
int Decoder::FormatOption(Instruction* instr, const char* format) {
switch (format[0]) {
- case 'c': { // 'code for break or trap instructions
+ case 'c': { // 'code for break or trap instructions.
ASSERT(STRING_STARTS_WITH(format, "code"));
PrintCode(instr);
return 4;
}
- case 'i': { // 'imm16u or 'imm26
+ case 'i': { // 'imm16u or 'imm26.
if (format[3] == '1') {
ASSERT(STRING_STARTS_WITH(format, "imm16"));
if (format[5] == 's') {
@@ -356,15 +388,45 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 5;
}
}
- case 'r': { // 'r: registers
+ case 'r': { // 'r: registers.
return FormatRegister(instr, format);
}
- case 'f': { // 'f: Cregisters
- return FormatCRegister(instr, format);
+ case 'f': { // 'f: FPUregisters.
+ return FormatFPURegister(instr, format);
}
- case 's': { // 'sa
- ASSERT(STRING_STARTS_WITH(format, "sa"));
- PrintSa(instr);
+ case 's': { // 'sa.
+ switch (format[1]) {
+ case 'a': {
+ ASSERT(STRING_STARTS_WITH(format, "sa"));
+ PrintSa(instr);
+ return 2;
+ }
+ case 'd': {
+ ASSERT(STRING_STARTS_WITH(format, "sd"));
+ PrintSd(instr);
+ return 2;
+ }
+ case 's': {
+ if (format[2] == '1') {
+ ASSERT(STRING_STARTS_WITH(format, "ss1")); /* ext size */
+ PrintSs1(instr);
+ return 3;
+ } else {
+ ASSERT(STRING_STARTS_WITH(format, "ss2")); /* ins size */
+ PrintSs2(instr);
+ return 3;
+ }
+ }
+ }
+ }
+ case 'b': { // 'bc - Special for bc1 cc field.
+ ASSERT(STRING_STARTS_WITH(format, "bc"));
+ PrintBc(instr);
+ return 2;
+ }
+ case 'C': { // 'Cc - Special for c.xx.d cc field.
+ ASSERT(STRING_STARTS_WITH(format, "Cc"));
+ PrintCc(instr);
return 2;
}
};
@@ -399,256 +461,460 @@ void Decoder::Unknown(Instruction* instr) {
void Decoder::DecodeTypeRegister(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
- case COP1: // Coprocessor instructions
+ case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
+ case BC1: // bc1 handled in DecodeTypeImmediate.
UNREACHABLE();
break;
case MFC1:
- Format(instr, "mfc1 'rt, 'fs");
+ Format(instr, "mfc1 'rt, 'fs");
break;
case MFHC1:
- Format(instr, "mfhc1 rt, 'fs");
+ Format(instr, "mfhc1 'rt, 'fs");
break;
case MTC1:
- Format(instr, "mtc1 'rt, 'fs");
+ Format(instr, "mtc1 'rt, 'fs");
+ break;
+ // These are called "fs" too, although they are not FPU registers.
+ case CTC1:
+ Format(instr, "ctc1 'rt, 'fs");
+ break;
+ case CFC1:
+ Format(instr, "cfc1 'rt, 'fs");
break;
case MTHC1:
- Format(instr, "mthc1 rt, 'fs");
+ Format(instr, "mthc1 'rt, 'fs");
break;
- case S:
case D:
+ switch (instr->FunctionFieldRaw()) {
+ case ADD_D:
+ Format(instr, "add.d 'fd, 'fs, 'ft");
+ break;
+ case SUB_D:
+ Format(instr, "sub.d 'fd, 'fs, 'ft");
+ break;
+ case MUL_D:
+ Format(instr, "mul.d 'fd, 'fs, 'ft");
+ break;
+ case DIV_D:
+ Format(instr, "div.d 'fd, 'fs, 'ft");
+ break;
+ case ABS_D:
+ Format(instr, "abs.d 'fd, 'fs");
+ break;
+ case MOV_D:
+ Format(instr, "mov.d 'fd, 'fs");
+ break;
+ case NEG_D:
+ Format(instr, "neg.d 'fd, 'fs");
+ break;
+ case SQRT_D:
+ Format(instr, "sqrt.d 'fd, 'fs");
+ break;
+ case CVT_W_D:
+ Format(instr, "cvt.w.d 'fd, 'fs");
+ break;
+ case CVT_L_D: {
+ if (mips32r2) {
+ Format(instr, "cvt.l.d 'fd, 'fs");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case TRUNC_W_D:
+ Format(instr, "trunc.w.d 'fd, 'fs");
+ break;
+ case TRUNC_L_D: {
+ if (mips32r2) {
+ Format(instr, "trunc.l.d 'fd, 'fs");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case ROUND_W_D:
+ Format(instr, "round.w.d 'fd, 'fs");
+ break;
+ case FLOOR_W_D:
+ Format(instr, "floor.w.d 'fd, 'fs");
+ break;
+ case CEIL_W_D:
+ Format(instr, "ceil.w.d 'fd, 'fs");
+ break;
+ case CVT_S_D:
+ Format(instr, "cvt.s.d 'fd, 'fs");
+ break;
+ case C_F_D:
+ Format(instr, "c.f.d 'fs, 'ft, 'Cc");
+ break;
+ case C_UN_D:
+ Format(instr, "c.un.d 'fs, 'ft, 'Cc");
+ break;
+ case C_EQ_D:
+ Format(instr, "c.eq.d 'fs, 'ft, 'Cc");
+ break;
+ case C_UEQ_D:
+ Format(instr, "c.ueq.d 'fs, 'ft, 'Cc");
+ break;
+ case C_OLT_D:
+ Format(instr, "c.olt.d 'fs, 'ft, 'Cc");
+ break;
+ case C_ULT_D:
+ Format(instr, "c.ult.d 'fs, 'ft, 'Cc");
+ break;
+ case C_OLE_D:
+ Format(instr, "c.ole.d 'fs, 'ft, 'Cc");
+ break;
+ case C_ULE_D:
+ Format(instr, "c.ule.d 'fs, 'ft, 'Cc");
+ break;
+ default:
+ Format(instr, "unknown.cop1.d");
+ break;
+ }
+ break;
+ case S:
UNIMPLEMENTED_MIPS();
break;
case W:
switch (instr->FunctionFieldRaw()) {
- case CVT_S_W:
- UNIMPLEMENTED_MIPS();
+ case CVT_S_W: // Convert word to float (single).
+ Format(instr, "cvt.s.w 'fd, 'fs");
break;
case CVT_D_W: // Convert word to double.
- Format(instr, "cvt.d.w 'fd, 'fs");
+ Format(instr, "cvt.d.w 'fd, 'fs");
break;
default:
UNREACHABLE();
- };
+ }
break;
case L:
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_D_L: {
+ if (mips32r2) {
+ Format(instr, "cvt.d.l 'fd, 'fs");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case CVT_S_L: {
+ if (mips32r2) {
+ Format(instr, "cvt.s.l 'fd, 'fs");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
case PS:
UNIMPLEMENTED_MIPS();
break;
- break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR:
- Format(instr, "jr 'rs");
+ Format(instr, "jr 'rs");
break;
case JALR:
- Format(instr, "jalr 'rs");
+ Format(instr, "jalr 'rs");
break;
case SLL:
if ( 0x0 == static_cast<int>(instr->InstructionBits()))
Format(instr, "nop");
else
- Format(instr, "sll 'rd, 'rt, 'sa");
+ Format(instr, "sll 'rd, 'rt, 'sa");
break;
case SRL:
- Format(instr, "srl 'rd, 'rt, 'sa");
+ if (instr->RsValue() == 0) {
+ Format(instr, "srl 'rd, 'rt, 'sa");
+ } else {
+ if (mips32r2) {
+ Format(instr, "rotr 'rd, 'rt, 'sa");
+ } else {
+ Unknown(instr);
+ }
+ }
break;
case SRA:
- Format(instr, "sra 'rd, 'rt, 'sa");
+ Format(instr, "sra 'rd, 'rt, 'sa");
break;
case SLLV:
- Format(instr, "sllv 'rd, 'rt, 'rs");
+ Format(instr, "sllv 'rd, 'rt, 'rs");
break;
case SRLV:
- Format(instr, "srlv 'rd, 'rt, 'rs");
+ if (instr->SaValue() == 0) {
+ Format(instr, "srlv 'rd, 'rt, 'rs");
+ } else {
+ if (mips32r2) {
+ Format(instr, "rotrv 'rd, 'rt, 'rs");
+ } else {
+ Unknown(instr);
+ }
+ }
break;
case SRAV:
- Format(instr, "srav 'rd, 'rt, 'rs");
+ Format(instr, "srav 'rd, 'rt, 'rs");
break;
case MFHI:
- Format(instr, "mfhi 'rd");
+ Format(instr, "mfhi 'rd");
break;
case MFLO:
- Format(instr, "mflo 'rd");
+ Format(instr, "mflo 'rd");
break;
case MULT:
- Format(instr, "mult 'rs, 'rt");
+ Format(instr, "mult 'rs, 'rt");
break;
case MULTU:
- Format(instr, "multu 'rs, 'rt");
+ Format(instr, "multu 'rs, 'rt");
break;
case DIV:
- Format(instr, "div 'rs, 'rt");
+ Format(instr, "div 'rs, 'rt");
break;
case DIVU:
- Format(instr, "divu 'rs, 'rt");
+ Format(instr, "divu 'rs, 'rt");
break;
case ADD:
- Format(instr, "add 'rd, 'rs, 'rt");
+ Format(instr, "add 'rd, 'rs, 'rt");
break;
case ADDU:
- Format(instr, "addu 'rd, 'rs, 'rt");
+ Format(instr, "addu 'rd, 'rs, 'rt");
break;
case SUB:
- Format(instr, "sub 'rd, 'rs, 'rt");
+ Format(instr, "sub 'rd, 'rs, 'rt");
break;
case SUBU:
- Format(instr, "sub 'rd, 'rs, 'rt");
+ Format(instr, "subu 'rd, 'rs, 'rt");
break;
case AND:
- Format(instr, "and 'rd, 'rs, 'rt");
+ Format(instr, "and 'rd, 'rs, 'rt");
break;
case OR:
- if (0 == instr->RsField()) {
- Format(instr, "mov 'rd, 'rt");
- } else if (0 == instr->RtField()) {
- Format(instr, "mov 'rd, 'rs");
+ if (0 == instr->RsValue()) {
+ Format(instr, "mov 'rd, 'rt");
+ } else if (0 == instr->RtValue()) {
+ Format(instr, "mov 'rd, 'rs");
} else {
- Format(instr, "or 'rd, 'rs, 'rt");
+ Format(instr, "or 'rd, 'rs, 'rt");
}
break;
case XOR:
- Format(instr, "xor 'rd, 'rs, 'rt");
+ Format(instr, "xor 'rd, 'rs, 'rt");
break;
case NOR:
- Format(instr, "nor 'rd, 'rs, 'rt");
+ Format(instr, "nor 'rd, 'rs, 'rt");
break;
case SLT:
- Format(instr, "slt 'rd, 'rs, 'rt");
+ Format(instr, "slt 'rd, 'rs, 'rt");
break;
case SLTU:
- Format(instr, "sltu 'rd, 'rs, 'rt");
+ Format(instr, "sltu 'rd, 'rs, 'rt");
break;
case BREAK:
Format(instr, "break, code: 'code");
break;
case TGE:
- Format(instr, "tge 'rs, 'rt, code: 'code");
+ Format(instr, "tge 'rs, 'rt, code: 'code");
break;
case TGEU:
- Format(instr, "tgeu 'rs, 'rt, code: 'code");
+ Format(instr, "tgeu 'rs, 'rt, code: 'code");
break;
case TLT:
- Format(instr, "tlt 'rs, 'rt, code: 'code");
+ Format(instr, "tlt 'rs, 'rt, code: 'code");
break;
case TLTU:
- Format(instr, "tltu 'rs, 'rt, code: 'code");
+ Format(instr, "tltu 'rs, 'rt, code: 'code");
break;
case TEQ:
- Format(instr, "teq 'rs, 'rt, code: 'code");
+ Format(instr, "teq 'rs, 'rt, code: 'code");
break;
case TNE:
- Format(instr, "tne 'rs, 'rt, code: 'code");
+ Format(instr, "tne 'rs, 'rt, code: 'code");
+ break;
+ case MOVZ:
+ Format(instr, "movz 'rd, 'rs, 'rt");
+ break;
+ case MOVN:
+ Format(instr, "movn 'rd, 'rs, 'rt");
+ break;
+ case MOVCI:
+ if (instr->Bit(16)) {
+ Format(instr, "movt 'rd, 'rs, 'bc");
+ } else {
+ Format(instr, "movf 'rd, 'rs, 'bc");
+ }
break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
case MUL:
+ Format(instr, "mul 'rd, 'rs, 'rt");
+ break;
+ case CLZ:
+ Format(instr, "clz 'rd, 'rs");
break;
default:
UNREACHABLE();
- };
+ }
+ break;
+ case SPECIAL3:
+ switch (instr->FunctionFieldRaw()) {
+ case INS: {
+ if (mips32r2) {
+ Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case EXT: {
+ if (mips32r2) {
+ Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
break;
default:
UNREACHABLE();
- };
+ }
}
void Decoder::DecodeTypeImmediate(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
// ------------- REGIMM class.
+ case COP1:
+ switch (instr->RsFieldRaw()) {
+ case BC1:
+ if (instr->FBtrueValue()) {
+ Format(instr, "bc1t 'bc, 'imm16u");
+ } else {
+ Format(instr, "bc1f 'bc, 'imm16u");
+ }
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break; // Case COP1.
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
- Format(instr, "bltz 'rs, 'imm16u");
+ Format(instr, "bltz 'rs, 'imm16u");
break;
case BLTZAL:
- Format(instr, "bltzal 'rs, 'imm16u");
+ Format(instr, "bltzal 'rs, 'imm16u");
break;
case BGEZ:
- Format(instr, "bgez 'rs, 'imm16u");
+ Format(instr, "bgez 'rs, 'imm16u");
break;
case BGEZAL:
- Format(instr, "bgezal 'rs, 'imm16u");
+ Format(instr, "bgezal 'rs, 'imm16u");
break;
default:
UNREACHABLE();
- };
- break; // case REGIMM
+ }
+ break; // Case REGIMM.
// ------------- Branch instructions.
case BEQ:
- Format(instr, "beq 'rs, 'rt, 'imm16u");
+ Format(instr, "beq 'rs, 'rt, 'imm16u");
break;
case BNE:
- Format(instr, "bne 'rs, 'rt, 'imm16u");
+ Format(instr, "bne 'rs, 'rt, 'imm16u");
break;
case BLEZ:
- Format(instr, "blez 'rs, 'imm16u");
+ Format(instr, "blez 'rs, 'imm16u");
break;
case BGTZ:
- Format(instr, "bgtz 'rs, 'imm16u");
+ Format(instr, "bgtz 'rs, 'imm16u");
break;
// ------------- Arithmetic instructions.
case ADDI:
- Format(instr, "addi 'rt, 'rs, 'imm16s");
+ Format(instr, "addi 'rt, 'rs, 'imm16s");
break;
case ADDIU:
- Format(instr, "addiu 'rt, 'rs, 'imm16s");
+ Format(instr, "addiu 'rt, 'rs, 'imm16s");
break;
case SLTI:
- Format(instr, "slti 'rt, 'rs, 'imm16s");
+ Format(instr, "slti 'rt, 'rs, 'imm16s");
break;
case SLTIU:
- Format(instr, "sltiu 'rt, 'rs, 'imm16u");
+ Format(instr, "sltiu 'rt, 'rs, 'imm16u");
break;
case ANDI:
- Format(instr, "andi 'rt, 'rs, 'imm16x");
+ Format(instr, "andi 'rt, 'rs, 'imm16x");
break;
case ORI:
- Format(instr, "ori 'rt, 'rs, 'imm16x");
+ Format(instr, "ori 'rt, 'rs, 'imm16x");
break;
case XORI:
- Format(instr, "xori 'rt, 'rs, 'imm16x");
+ Format(instr, "xori 'rt, 'rs, 'imm16x");
break;
case LUI:
- Format(instr, "lui 'rt, 'imm16x");
+ Format(instr, "lui 'rt, 'imm16x");
break;
// ------------- Memory instructions.
case LB:
- Format(instr, "lb 'rt, 'imm16s('rs)");
+ Format(instr, "lb 'rt, 'imm16s('rs)");
+ break;
+ case LH:
+ Format(instr, "lh 'rt, 'imm16s('rs)");
+ break;
+ case LWL:
+ Format(instr, "lwl 'rt, 'imm16s('rs)");
break;
case LW:
- Format(instr, "lw 'rt, 'imm16s('rs)");
+ Format(instr, "lw 'rt, 'imm16s('rs)");
break;
case LBU:
- Format(instr, "lbu 'rt, 'imm16s('rs)");
+ Format(instr, "lbu 'rt, 'imm16s('rs)");
+ break;
+ case LHU:
+ Format(instr, "lhu 'rt, 'imm16s('rs)");
+ break;
+ case LWR:
+ Format(instr, "lwr 'rt, 'imm16s('rs)");
break;
case SB:
- Format(instr, "sb 'rt, 'imm16s('rs)");
+ Format(instr, "sb 'rt, 'imm16s('rs)");
+ break;
+ case SH:
+ Format(instr, "sh 'rt, 'imm16s('rs)");
+ break;
+ case SWL:
+ Format(instr, "swl 'rt, 'imm16s('rs)");
break;
case SW:
- Format(instr, "sw 'rt, 'imm16s('rs)");
+ Format(instr, "sw 'rt, 'imm16s('rs)");
+ break;
+ case SWR:
+ Format(instr, "swr 'rt, 'imm16s('rs)");
break;
case LWC1:
- Format(instr, "lwc1 'ft, 'imm16s('rs)");
+ Format(instr, "lwc1 'ft, 'imm16s('rs)");
break;
case LDC1:
- Format(instr, "ldc1 'ft, 'imm16s('rs)");
+ Format(instr, "ldc1 'ft, 'imm16s('rs)");
break;
case SWC1:
- Format(instr, "swc1 'rt, 'imm16s('fs)");
+ Format(instr, "swc1 'ft, 'imm16s('rs)");
break;
case SDC1:
- Format(instr, "sdc1 'rt, 'imm16s('fs)");
+ Format(instr, "sdc1 'ft, 'imm16s('rs)");
break;
default:
UNREACHABLE();
@@ -660,10 +926,10 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
void Decoder::DecodeTypeJump(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case J:
- Format(instr, "j 'imm26");
+ Format(instr, "j 'imm26");
break;
case JAL:
- Format(instr, "jal 'imm26");
+ Format(instr, "jal 'imm26");
break;
default:
UNREACHABLE();
@@ -672,10 +938,10 @@ void Decoder::DecodeTypeJump(Instruction* instr) {
// Disassemble the instruction at *instr_ptr into the output buffer.
-int Decoder::InstructionDecode(byte_* instr_ptr) {
+int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ",
instr->InstructionBits());
switch (instr->InstructionType()) {
@@ -695,11 +961,11 @@ int Decoder::InstructionDecode(byte_* instr_ptr) {
UNSUPPORTED_MIPS();
}
}
- return Instruction::kInstructionSize;
+ return Instruction::kInstrSize;
}
-} } // namespace assembler::mips
+} } // namespace v8::internal
@@ -707,38 +973,34 @@ int Decoder::InstructionDecode(byte_* instr_ptr) {
namespace disasm {
-namespace v8i = v8::internal;
-
-
-const char* NameConverter::NameOfAddress(byte_* addr) const {
- static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
- v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
- return tmp_buffer.start();
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
}
-const char* NameConverter::NameOfConstant(byte_* addr) const {
+const char* NameConverter::NameOfConstant(byte* addr) const {
return NameOfAddress(addr);
}
const char* NameConverter::NameOfCPURegister(int reg) const {
- return assembler::mips::Registers::Name(reg);
+ return v8::internal::Registers::Name(reg);
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
- return assembler::mips::FPURegister::Name(reg);
+ return v8::internal::FPURegisters::Name(reg);
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
- UNREACHABLE(); // MIPS does not have the concept of a byte register
+ UNREACHABLE(); // MIPS does not have the concept of a byte register.
return "nobytereg";
}
-const char* NameConverter::NameInCode(byte_* addr) const {
+const char* NameConverter::NameInCode(byte* addr) const {
// The default name converter is called for unknown code. So we will not try
// to access any memory.
return "";
@@ -755,31 +1017,32 @@ Disassembler::~Disassembler() {}
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte_* instruction) {
- assembler::mips::Decoder d(converter_, buffer);
+ byte* instruction) {
+ v8::internal::Decoder d(converter_, buffer);
return d.InstructionDecode(instruction);
}
-int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
- UNIMPLEMENTED_MIPS();
+// The MIPS assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
return -1;
}
-void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
NameConverter converter;
Disassembler d(converter);
- for (byte_* pc = begin; pc < end;) {
+ for (byte* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
- byte_* prev_pc = pc;
+ byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
fprintf(f, "%p %08x %s\n",
prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
+
#undef UNSUPPORTED
} // namespace disasm
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index d63056299..faaa0e0f4 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,57 +37,8 @@ namespace v8 {
namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
Address ExitFrame::ComputeStackPointer(Address fp) {
- Address sp = fp + ExitFrameConstants::kSPDisplacement;
- const int offset = ExitFrameConstants::kCodeOffset;
- Object* code = Memory::Object_at(fp + offset);
- bool is_debug_exit = code->IsSmi();
- if (is_debug_exit) {
- sp -= kNumJSCallerSaved * kPointerSize;
- }
- return sp;
-}
-
-
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- // Do nothing
-}
-
-
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
- UNIMPLEMENTED_MIPS();
- return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- UNIMPLEMENTED_MIPS();
- return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
- return fp() + StandardFrameConstants::kCallerSPOffset;
+ return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
}
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 06e9979c2..2e720fb17 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,16 +40,17 @@ namespace internal {
static const int kNumRegs = 32;
static const RegList kJSCallerSaved =
+ 1 << 2 | // v0
1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
1 << 7; // a3
-static const int kNumJSCallerSaved = 4;
+static const int kNumJSCallerSaved = 5;
// Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns r0.code() == 0.
+// e.g. JSCallerSavedReg(0) returns a0.code() == 4.
int JSCallerSavedCode(int n);
@@ -58,14 +59,63 @@ static const RegList kCalleeSaved =
// Saved temporaries.
1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
- // gp, sp, fp
+ // gp, sp, fp.
1 << 28 | 1 << 29 | 1 << 30;
static const int kNumCalleeSaved = 11;
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+// TODO(mips): Only 8 registers may actually be sufficient. Revisit.
+static const int kNumSafepointRegisters = 16;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+static const int kNumSafepointSavedRegisters =
+ kNumJSCallerSaved + kNumCalleeSaved;
+
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+static const int kUndefIndex = -1;
+// Map with indexes on stack that corresponds to codes of saved registers.
+static const int kSafepointRegisterStackIndexMap[kNumRegs] = {
+ kUndefIndex,
+ kUndefIndex,
+ 0, // v0
+ kUndefIndex,
+ 1, // a0
+ 2, // a1
+ 3, // a2
+ 4, // a3
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ 5, // Saved temporaries.
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ 13, // gp
+ 14, // sp
+ 15, // fp
+ kUndefIndex
+};
+
// ----------------------------------------------------
@@ -88,23 +138,24 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- // Exit frames have a debug marker on the stack.
- static const int kSPDisplacement = -1 * kPointerSize;
+ // See some explanation in MacroAssembler::EnterExitFrame.
+ // This marks the top of the extra allocated stack space.
+ static const int kStackSpaceOffset = -3 * kPointerSize;
- // The debug marker is just above the frame pointer.
- static const int kDebugMarkOffset = -1 * kPointerSize;
- // Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
- static const int kCodeOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
- static const int kSavedRegistersOffset = 0 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;
// The calling JS function is between FP and PC.
static const int kCallerPCOffset = +1 * kPointerSize;
+ // MIPS-specific: a pointer to the old sp to avoid unnecessary calculations.
+ static const int kCallerSPOffset = +2 * kPointerSize;
+
// FP-relative displacement of the caller's SP.
- static const int kCallerSPDisplacement = +3 * kPointerSize;
+ static const int kCallerSPDisplacement = +2 * kPointerSize;
};
@@ -123,9 +174,12 @@ class StandardFrameConstants : public AllStatic {
static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
// C/C++ argument slots size.
- static const int kCArgsSlotsSize = 4 * kPointerSize;
+ static const int kCArgSlotCount = 4;
+ static const int kCArgsSlotsSize = kCArgSlotCount * kPointerSize;
// JS argument slots size.
static const int kJSArgsSlotsSize = 0 * kPointerSize;
+ // Assembly builtins argument slots size.
+ static const int kBArgsSlotsSize = 0 * kPointerSize;
};
@@ -133,7 +187,7 @@ class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kSavedRegistersOffset = +2 * kPointerSize;
+ static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
@@ -159,6 +213,7 @@ inline Object* JavaScriptFrame::function_slot_object() const {
return Memory::Object_at(fp() + offset);
}
+
} } // namespace v8::internal
#endif
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 17ee531a3..ea0b09225 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,73 +29,643 @@
#if defined(V8_TARGET_ARCH_MIPS)
-#include "codegen-inl.h"
+// Note on Mips implementation:
+//
+// The result_register() for mips is the 'v0' register, which is defined
+// by the ABI to contain function return values. However, the first
+// parameter to a function is defined to be 'a0'. So there are many
+// places where we have to move a previous result in v0 to a0 for the
+// next call: mov(a0, v0). This is not needed on the other architectures.
+
+#include "code-stubs.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
#include "parser.h"
+#include "scopes.h"
+#include "stub-cache.h"
+
+#include "mips/code-stubs-mips.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
-void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
- UNIMPLEMENTED_MIPS();
+
+static unsigned GetPropertyId(Property* property) {
+ if (property->is_synthetic()) return AstNode::kNoNumber;
+ return property->id();
+}
+
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
+// bit immediate value is used) is the delta from the pc to the first
+// instruction of the patchable code.
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ ASSERT(patch_site_.is_bound() == info_emitted_);
+ }
+
+ // When initially emitting this ensure that a jump is always generated to skip
+ // the inlined smi code.
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ bind(&patch_site_);
+ __ andi(at, reg, 0);
+ // Always taken before patched.
+ __ Branch(target, eq, at, Operand(zero_reg));
+ }
+
+ // When initially emitting this ensure that a jump is never generated to skip
+ // the inlined smi code.
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ andi(at, reg, 0);
+ // Never taken before patched.
+ __ Branch(target, ne, at, Operand(zero_reg));
+ }
+
+ void EmitPatchInfo() {
+ int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+ Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
+ __ andi(at, reg, delta_to_patch_site % kImm16Mask);
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// o a1: the JS function object being called (ie, ourselves)
+// o cp: our context
+// o fp: our caller's frame pointer
+// o sp: stack pointer
+// o ra: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-mips.h for its layout.
+void FullCodeGenerator::Generate(CompilationInfo* info) {
+ ASSERT(info_ == NULL);
+ info_ = info;
+ SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ function compiled by full code generator");
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop-at");
+ }
+#endif
+
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). t1 is zero for method calls and non-zero for
+ // function calls.
+ if (info->is_strict_mode() || info->is_native()) {
+ Label ok;
+ __ Branch(&ok, eq, t1, Operand(zero_reg));
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ sw(a2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
+
+ int locals_count = scope()->num_stack_slots();
+
+ __ Push(ra, fp, cp, a1);
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ }
+ // Adjust fp to point to caller's fp.
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ for (int i = 0; i < locals_count; i++) {
+ __ push(at);
+ }
+ }
+
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is in a1.
+ __ push(a1);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ }
+ function_in_register = false;
+ // Context is returned in both v0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->AsSlot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ lw(a0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ __ li(a1, Operand(Context::SlotOffset(slot->index())));
+ __ addu(a2, cp, a1);
+ __ sw(a0, MemOperand(a2, 0));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have to use two more registers to avoid
+ // clobbering cp.
+ __ mov(a2, cp);
+ __ RecordWrite(a2, a1, a3);
+ }
+ }
+ }
+
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(a3, a1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ int offset = scope()->num_parameters() * kPointerSize;
+ __ Addu(a2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ li(a1, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ Push(a3, a2, a1);
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::Type type;
+ if (is_strict_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
+ __ CallStub(&stub);
+
+ Move(arguments->AsSlot(), v0, a1, a2);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+
+ } else {
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(t0));
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence();
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+ ASSERT(Smi::FromInt(0) == 0);
+ __ mov(v0, zero_reg);
+}
+
+
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+ Comment cmnt(masm_, "[ Stack check");
+ Label ok;
+ __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(t0));
+ StackCheckStub stub;
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordStackCheck(stmt->OsrEntryId());
+
+ __ CallStub(&stub);
+ __ bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
void FullCodeGenerator::EmitReturnSequence() {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in v0.
+ __ push(v0);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence.
+ { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ // Here we use masm_-> instead of the __ macro to avoid the code coverage
+ // tool from instrumenting as we rely on the code size here.
+ int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+ CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ __ RecordJSReturn();
+ masm_->mov(sp, fp);
+ masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
+ masm_->Addu(sp, sp, Operand(sp_delta));
+ masm_->Jump(ra);
+ }
+
+#ifdef DEBUG
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceInstructions <=
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
+#endif
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
+ codegen()->Move(result_register(), slot);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
+ codegen()->Move(result_register(), slot);
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
+ // For simplicity we always test the accumulator register.
+ codegen()->Move(result_register(), slot);
+ codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->DoTest(this);
}
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
}
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
}
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
- UNIMPLEMENTED_MIPS();
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+ __ push(result_register());
}
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ true,
+ true_label_,
+ false_label_);
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(this);
+ }
}
-void FullCodeGenerator::DropAndApply(int count,
- Expression::Context context,
- Register reg) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
}
-void FullCodeGenerator::Apply(Expression::Context context,
- Label* materialize_true,
- Label* materialize_false) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ li(result_register(), Operand(lit));
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates cannot be pushed directly.
+ __ li(result_register(), Operand(lit));
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ true,
+ true_label_,
+ false_label_);
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ li(result_register(), Operand(lit));
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ sw(reg, MemOperand(sp, 0));
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Move(result_register(), reg);
+ codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == materialize_false);
+ __ bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ Branch(&done);
+ __ bind(materialize_false);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ push(at);
+ __ Branch(&done);
+ __ bind(materialize_false);
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ __ push(at);
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == true_label_);
+ ASSERT(materialize_false == false_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(at, value_root_index);
+ __ push(at);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ true,
+ true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ ToBooleanStub stub(result_register());
+ __ CallStub(&stub);
+ __ mov(at, zero_reg);
+ } else {
+ // Call the runtime to find the boolean value of the source and then
+ // translate it into control flow to the pair of labels.
+ __ push(result_register());
+ __ CallRuntime(Runtime::kToBool, 1);
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ }
+ Split(ne, v0, Operand(at), if_true, if_false, fall_through);
+}
+
+
+void FullCodeGenerator::Split(Condition cc,
+ Register lhs,
+ const Operand& rhs,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ Branch(if_true, cc, lhs, rhs);
+ } else if (if_true == fall_through) {
+ __ Branch(if_false, NegateCondition(cc), lhs, rhs);
+ } else {
+ __ Branch(if_true, cc, lhs, rhs);
+ __ Branch(if_false);
+ }
}
MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0); // UNIMPLEMENTED RETURN
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return MemOperand(fp, SlotOffset(slot));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ scope()->ContextChainLength(slot->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextOperand(scratch, slot->index());
+ }
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return MemOperand(v0, 0);
}
void FullCodeGenerator::Move(Register destination, Slot* source) {
- UNIMPLEMENTED_MIPS();
+ // Use destination as scratch.
+ MemOperand slot_operand = EmitSlotSearch(source, destination);
+ __ lw(destination, slot_operand);
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+ Label skip;
+ if (should_normalize) __ Branch(&skip);
+
+ ForwardBailoutStack* current = forward_bailout_stack_;
+ while (current != NULL) {
+ PrepareForBailout(current->expr(), state);
+ current = current->parent();
+ }
+
+ if (should_normalize) {
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+ Split(eq, a0, Operand(t0), if_true, if_false, NULL);
+ __ bind(&skip);
+ }
}
@@ -103,157 +673,3617 @@ void FullCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
+ ASSERT(!scratch1.is(src) && !scratch2.is(src));
+ MemOperand location = EmitSlotSearch(dst, scratch1);
+ __ sw(src, location);
+ // Emit the write barrier code if the location is in the heap.
+ if (dst->type() == Slot::CONTEXT) {
+ __ RecordWrite(scratch1,
+ Operand(Context::SlotOffset(dst->index())),
+ scratch2,
+ src);
+ }
+}
+
+
+void FullCodeGenerator::EmitDeclaration(Variable* variable,
+ Variable::Mode mode,
+ FunctionLiteral* function) {
+ Comment cmnt(masm_, "[ Declaration");
+ ASSERT(variable != NULL); // Must have been resolved.
+ Slot* slot = variable->AsSlot();
+ Property* prop = variable->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ if (mode == Variable::CONST) {
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ sw(t0, MemOperand(fp, SlotOffset(slot)));
+ } else if (function != NULL) {
+ VisitForAccumulatorValue(function);
+ __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // We bypass the general EmitSlotSearch because we know more about
+ // this specific context.
+
+ // The variable in the decl always resides in the current function
+ // context.
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (FLAG_debug_code) {
+ // Check that we're not inside a 'with'.
+ __ lw(a1, ContextOperand(cp, Context::FCONTEXT_INDEX));
+ __ Check(eq, "Unexpected declaration in current context.",
+ a1, Operand(cp));
+ }
+ if (mode == Variable::CONST) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ sw(at, ContextOperand(cp, slot->index()));
+ // No write barrier since the_hole_value is in old space.
+ } else if (function != NULL) {
+ VisitForAccumulatorValue(function);
+ __ sw(result_register(), ContextOperand(cp, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ // We know that we have written a function, which is not a smi.
+ __ mov(a1, cp);
+ __ RecordWrite(a1, Operand(offset), a2, result_register());
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ li(a2, Operand(variable->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(mode == Variable::VAR ||
+ mode == Variable::CONST);
+ PropertyAttributes attr =
+ (mode == Variable::VAR) ? NONE : READ_ONLY;
+ __ li(a1, Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (mode == Variable::CONST) {
+ __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
+ __ Push(cp, a2, a1, a0);
+ } else if (function != NULL) {
+ __ Push(cp, a2, a1);
+ // Push initial value for function declaration.
+ VisitForStackValue(function);
+ } else {
+ ASSERT(Smi::FromInt(0) == 0);
+ // No initial value!
+ __ mov(a0, zero_reg); // Operand(Smi::FromInt(0)));
+ __ Push(cp, a2, a1, a0);
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ // A const declaration aliasing a parameter is an illegal redeclaration.
+ ASSERT(mode != Variable::CONST);
+ if (function != NULL) {
+ // We are declaring a function that rewrites to a property.
+ // Use (keyed) IC to set the initial value. We cannot visit the
+ // rewrite because it's shared and we risk recording duplicate AST
+ // IDs for bailouts from optimized code.
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ { AccumulatorValueContext for_object(this);
+ EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ }
+
+ __ push(result_register());
+ VisitForAccumulatorValue(function);
+ __ mov(a0, result_register());
+ __ pop(a2);
+
+ ASSERT(prop->key()->AsLiteral() != NULL &&
+ prop->key()->AsLiteral()->handle()->IsSmi());
+ __ li(a1, Operand(prop->key()->AsLiteral()->handle()));
+
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ // Value in v0 is ignored (declarations are statements).
+ }
+ }
}
void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
- UNIMPLEMENTED_MIPS();
+ EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
}
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- UNIMPLEMENTED_MIPS();
+ // Call the runtime to declare the globals.
+ // The context is the first argument.
+ __ li(a2, Operand(pairs));
+ __ li(a1, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+ __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(cp, a2, a1, a0);
+ __ CallRuntime(Runtime::kDeclareGlobals, 4);
+ // Return value is ignored.
}
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+ __ mov(a0, result_register()); // CompareStub requires args in a0, a1.
+
+ // Perform the comparison as if via '==='.
+ __ lw(a1, MemOperand(sp, 0)); // Switch value.
+ bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ or_(a2, a1, a0);
+ patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+
+ __ Branch(&next_test, ne, a1, Operand(a0));
+ __ Drop(1); // Switch value is no longer needed.
+ __ Branch(clause->body_target());
+
+ __ bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ EmitCallIC(ic, &patch_site, clause->CompareId());
+
+ __ Branch(&next_test, ne, v0, Operand(zero_reg));
+ __ Drop(1); // Switch value is no longer needed.
+ __ Branch(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ Branch(nested_statement.break_target());
+ } else {
+ __ Branch(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ bind(nested_statement.break_target());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ Comment cmnt(masm_, "[ ForInStatement");
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. Both SpiderMonkey and JSC
+ // ignore null and undefined in contrast to the specification; see
+ // ECMA-262 section 12.6.4.
+ VisitForAccumulatorValue(stmt->enumerable());
+ __ mov(a0, result_register()); // Result as param to InvokeBuiltin below.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&exit, eq, a0, Operand(at));
+ Register null_value = t1;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Branch(&exit, eq, a0, Operand(null_value));
+
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(a0, &convert);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ bind(&convert);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a0, v0);
+ __ bind(&done_convert);
+ __ push(a0);
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ Label next, call_runtime;
+ // Preload a couple of values used in the loop.
+ Register empty_fixed_array_value = t2;
+ __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Register empty_descriptor_array_value = t3;
+ __ LoadRoot(empty_descriptor_array_value,
+ Heap::kEmptyDescriptorArrayRootIndex);
+ __ mov(a1, a0);
+ __ bind(&next);
+
+ // Check that there are no elements. Register a1 contains the
+ // current JS object we've reached through the prototype chain.
+ __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+ __ Branch(&call_runtime, ne, a2, Operand(empty_fixed_array_value));
+
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in a2 for the subsequent
+ // prototype load.
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
+ __ JumpIfSmi(a3, &call_runtime);
+
+ // Check that there is an enum cache in the non-empty instance
+ // descriptors (a3). This is the case if the next enumeration
+ // index field does not contain a smi.
+ __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
+ __ JumpIfSmi(a3, &call_runtime);
+
+ // For all objects but the receiver, check that the cache is empty.
+ Label check_prototype;
+ __ Branch(&check_prototype, eq, a1, Operand(a0));
+ __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ Branch(&call_runtime, ne, a3, Operand(empty_fixed_array_value));
+
+ // Load the prototype from the map and loop if non-null.
+ __ bind(&check_prototype);
+ __ lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
+ __ Branch(&next, ne, a1, Operand(null_value));
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ lw(v0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ Branch(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(a0); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array;
+ __ mov(a2, v0);
+ __ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kMetaMapRootIndex);
+ __ Branch(&fixed_array, ne, a1, Operand(at));
+
+ // We got a map in register v0. Get the enumeration cache from it.
+ __ bind(&use_cache);
+ __ LoadInstanceDescriptors(v0, a1);
+ __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset));
+ __ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Setup the four remaining stack slots.
+ __ push(v0); // Map.
+ __ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ li(a0, Operand(Smi::FromInt(0)));
+ // Push enumeration cache, enumeration cache length (as smi) and zero.
+ __ Push(a2, a1, a0);
+ __ jmp(&loop);
+
+ // We got a fixed array in register v0. Iterate through that.
+ __ bind(&fixed_array);
+ __ li(a1, Operand(Smi::FromInt(0))); // Map (0) - force slow check.
+ __ Push(a1, v0);
+ __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ li(a0, Operand(Smi::FromInt(0)));
+ __ Push(a1, a0); // Fixed array length (as smi) and initial index.
+
+ // Generate code for doing the condition check.
+ __ bind(&loop);
+ // Load the current count to a0, load the length to a1.
+ __ lw(a0, MemOperand(sp, 0 * kPointerSize));
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Branch(loop_statement.break_target(), hs, a0, Operand(a1));
+
+ // Get the current entry of the array into register a3.
+ __ lw(a2, MemOperand(sp, 2 * kPointerSize));
+ __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(t0, a2, t0); // Array base + scaled (smi) index.
+ __ lw(a3, MemOperand(t0)); // Current entry.
+
+ // Get the expected map from the stack or a zero map in the
+ // permanent slow case into register a2.
+ __ lw(a2, MemOperand(sp, 3 * kPointerSize));
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we have to filter the key.
+ Label update_each;
+ __ lw(a1, MemOperand(sp, 4 * kPointerSize));
+ __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Branch(&update_each, eq, t0, Operand(a2));
+
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
+ // just skip it.
+ __ push(a1); // Enumerable.
+ __ push(a3); // Current entry.
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ mov(a3, result_register());
+ __ Branch(loop_statement.continue_target(), eq, a3, Operand(zero_reg));
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register a3.
+ __ bind(&update_each);
+ __ mov(result_register(), a3);
+ // Perform the assignment as if via '='.
+ { EffectContext context(this);
+ EmitAssignment(stmt->each(), stmt->AssignmentId());
+ }
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for the going to the next element by incrementing
+ // the index (smi) stored on top of the stack.
+ __ bind(loop_statement.continue_target());
+ __ pop(a0);
+ __ Addu(a0, a0, Operand(Smi::FromInt(1)));
+ __ push(a0);
+
+ EmitStackCheck(stmt);
+ __ Branch(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ bind(loop_statement.break_target());
+ __ Drop(5);
+
+ // Exit and decrement the loop depth.
+ __ bind(&exit);
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ !pretenure &&
+ scope()->is_function_scope() &&
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+ __ li(a0, Operand(info));
+ __ push(a0);
+ __ CallStub(&stub);
+ } else {
+ __ li(a0, Operand(info));
+ __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
+ : Heap::kFalseValueRootIndex);
+ __ Push(cp, a0, a1);
+ __ CallRuntime(Runtime::kNewClosure, 3);
+ }
+ context()->Plug(v0);
}
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr->var());
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
+ Slot* slot,
+ TypeofState typeof_state,
+ Label* slow) {
+ Register current = cp;
+ Register next = a1;
+ Register temp = a2;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ Branch(slow, ne, temp, Operand(zero_reg));
+ }
+ // Load next context in chain.
+ __ lw(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label loop, fast;
+ if (!current.is(next)) {
+ __ Move(next, current);
+ }
+ __ bind(&loop);
+ // Terminate at global context.
+ __ lw(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kGlobalContextMapRootIndex);
+ __ Branch(&fast, eq, temp, Operand(t0));
+ // Check that extension is NULL.
+ __ lw(temp, ContextOperand(next, Context::EXTENSION_INDEX));
+ __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Load next context in chain.
+ __ lw(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ Branch(&loop);
+ __ bind(&fast);
+ }
+
+ __ lw(a0, GlobalObjectOperand());
+ __ li(a2, Operand(slot->var()->name()));
+ RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ EmitCallIC(ic, mode, AstNode::kNoNumber);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
+ Slot* slot,
+ Label* slow) {
+ ASSERT(slot->type() == Slot::CONTEXT);
+ Register context = cp;
+ Register next = a3;
+ Register temp = t0;
+
+ for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ Branch(slow, ne, temp, Operand(zero_reg));
+ }
+ __ lw(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ context = next;
+ }
+ }
+ // Check that last extension is NULL.
+ __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ Branch(slow, ne, temp, Operand(zero_reg));
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextOperand(context, slot->index());
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
+ Slot* slot,
+ TypeofState typeof_state,
+ Label* slow,
+ Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+ EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
+ __ Branch(done);
+ } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
+ Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+ if (potential_slot != NULL) {
+ // Generate fast case for locals that rewrite to slots.
+ __ lw(v0, ContextSlotOperandCheckExtensions(potential_slot, slow));
+ if (potential_slot->var()->mode() == Variable::CONST) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ movz(v0, a0, at); // Conditional move.
+ }
+ __ Branch(done);
+ } else if (rewrite != NULL) {
+ // Generate fast case for calls of an argument function.
+ Property* property = rewrite->AsProperty();
+ if (property != NULL) {
+ VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+ Literal* key_literal = property->key()->AsLiteral();
+ if (obj_proxy != NULL &&
+ key_literal != NULL &&
+ obj_proxy->IsArguments() &&
+ key_literal->handle()->IsSmi()) {
+ // Load arguments object if there are no eval-introduced
+ // variables. Then load the argument from the arguments
+ // object using keyed load.
+ __ lw(a1,
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
+ slow));
+ __ li(a0, Operand(key_literal->handle()));
+ Handle<Code> ic =
+ isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ Branch(done);
+ }
+ }
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(Variable* var) {
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
+ Slot* slot = var->AsSlot();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
+
+ if (slot == NULL) {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in a2 and the global
+ // object (receiver) in a0.
+ __ lw(a0, GlobalObjectOperand());
+ __ li(a2, Operand(var->name()));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ context()->Plug(v0);
+
+ } else if (slot->type() == Slot::LOOKUP) {
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ Comment cmnt(masm_, "Lookup slot");
+ __ li(a1, Operand(var->name()));
+ __ Push(cp, a1); // Context and name.
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ bind(&done);
+
+ context()->Plug(v0);
+
+ } else {
+ Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+ ? "Context slot"
+ : "Stack slot");
+ if (var->mode() == Variable::CONST) {
+ // Constants may be the hole value if they have not been initialized.
+ // Unhole them.
+ MemOperand slot_operand = EmitSlotSearch(slot, a0);
+ __ lw(v0, slot_operand);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ movz(v0, a0, at); // Conditional move.
+ context()->Plug(v0);
+ } else {
+ context()->Plug(slot);
+ }
+ }
}
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label materialized;
+ // Registers will be used as follows:
+ // t1 = materialized value (RegExp literal)
+ // t0 = JS function, literals array
+ // a3 = literal index
+ // a2 = RegExp pattern
+ // a1 = RegExp flags
+ // a0 = RegExp literal clone
+ __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(t0, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ lw(t1, FieldMemOperand(t0, literal_offset));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&materialized, ne, t1, Operand(at));
+
+ // Create regexp literal using runtime function.
+ // Result will be in v0.
+ __ li(a3, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a2, Operand(expr->pattern()));
+ __ li(a1, Operand(expr->flags()));
+ __ Push(t0, a3, a2, a1);
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ mov(t1, v0);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ push(t1);
+ __ li(a0, Operand(Smi::FromInt(size)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ pop(t1);
+
+ __ bind(&allocated);
+
+ // After this, registers are used as follows:
+ // v0: Newly allocated regexp.
+ // t1: Materialized regexp.
+ // a2: temp.
+ __ CopyFields(v0, t1, a2.bit(), size / kPointerSize);
+ context()->Plug(v0);
}
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+ __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a1, Operand(expr->constant_properties()));
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ li(a0, Operand(Smi::FromInt(flags)));
+ __ Push(a3, a2, a1, a0);
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ } else {
+ __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in v0.
+ bool result_saved = false;
+
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ expr->CalculateEmitStore();
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(v0); // Save result on stack.
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ __ mov(a0, result_register());
+ __ li(a2, Operand(key->handle()));
+ __ lw(a1, MemOperand(sp));
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ // Fall through.
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ lw(a0, MemOperand(sp));
+ __ push(a0);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ li(a0, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
+ __ push(a0);
+ __ CallRuntime(Runtime::kSetProperty, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+ case ObjectLiteral::Property::GETTER:
+ case ObjectLiteral::Property::SETTER:
+ // Duplicate receiver on stack.
+ __ lw(a0, MemOperand(sp));
+ __ push(a0);
+ VisitForStackValue(key);
+ __ li(a1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0)));
+ __ push(a1);
+ VisitForStackValue(value);
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ break;
+ }
+ }
+
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ lw(a0, MemOperand(sp));
+ __ push(a0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(v0);
+ }
}
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+ __ mov(a0, result_register());
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+ __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a1, Operand(expr->constant_elements()));
+ __ Push(a3, a2, a1);
+ if (expr->constant_elements()->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ __ CallStub(&stub);
+ __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
+ 1, a1, a2);
+ } else if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ } else {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+ __ CallStub(&stub);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ for (int i = 0; i < length; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (subexpr->AsLiteral() != NULL ||
+ CompileTimeValue::IsCompileTimeValue(subexpr)) {
+ continue;
+ }
+
+ if (!result_saved) {
+ __ push(v0);
+ result_saved = true;
+ }
+ VisitForAccumulatorValue(subexpr);
+
+ // Store the subexpression value in the array's elements.
+ __ lw(a1, MemOperand(sp)); // Copy of array literal.
+ __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset));
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ sw(result_register(), FieldMemOperand(a1, offset));
+
+ // Update the write barrier for the array store with v0 as the scratch
+ // register.
+ __ li(a2, Operand(offset));
+ // TODO(PJ): double check this RecordWrite call.
+ __ RecordWrite(a1, a2, result_register());
+
+ PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(v0);
+ }
}
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ Assignment");
+ // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+ // on the left-hand side.
+ if (!expr->target()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->target());
+ return;
+ }
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the accumulator.
+ VisitForAccumulatorValue(property->obj());
+ __ push(result_register());
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case KEYED_PROPERTY:
+ // We need the key and receiver on both the stack and in v0 and a1.
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
+ __ lw(a1, MemOperand(sp, 0));
+ __ push(v0);
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ { AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ __ push(v0); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ AccumulatorValueContext context(this);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
+ op,
+ mode,
+ expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op, mode);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(v0);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- UNIMPLEMENTED_MIPS();
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ mov(a0, result_register());
+ __ li(a2, Operand(key->handle()));
+ // Call load IC. It has arguments receiver and property name a0 and a2.
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- UNIMPLEMENTED_MIPS();
+ SetSourcePosition(prop->position());
+ __ mov(a0, result_register());
+ // Call keyed load IC. It has arguments key and receiver in a0 and a1.
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Expression* left_expr,
+ Expression* right_expr) {
+ Label done, smi_case, stub_call;
+
+ Register scratch1 = a2;
+ Register scratch2 = a3;
+
+ // Get the arguments.
+ Register left = a1;
+ Register right = a0;
+ __ pop(left);
+ __ mov(a0, result_register());
+
+ // Perform combined smi check on both operands.
+ __ Or(scratch1, left, Operand(right));
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+ __ bind(&stub_call);
+ BinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ // Smi case. This code works the same way as the smi-smi case in the type
+ // recording binary operation stub, see
+ // BinaryOpStub::GenerateSmiSmiOperation for comments.
+ switch (op) {
+ case Token::SAR:
+ __ Branch(&stub_call);
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ srav(right, left, scratch1);
+ __ And(v0, right, Operand(~kSmiTagMask));
+ break;
+ case Token::SHL: {
+ __ Branch(&stub_call);
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ sllv(scratch1, scratch1, scratch2);
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
+ __ SmiTag(v0, scratch1);
+ break;
+ }
+ case Token::SHR: {
+ __ Branch(&stub_call);
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ srlv(scratch1, scratch1, scratch2);
+ __ And(scratch2, scratch1, 0xc0000000);
+ __ Branch(&stub_call, ne, scratch2, Operand(zero_reg));
+ __ SmiTag(v0, scratch1);
+ break;
+ }
+ case Token::ADD:
+ __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+ __ BranchOnOverflow(&stub_call, scratch1);
+ break;
+ case Token::SUB:
+ __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+ __ BranchOnOverflow(&stub_call, scratch1);
+ break;
+ case Token::MUL: {
+ __ SmiUntag(scratch1, right);
+ __ Mult(left, scratch1);
+ __ mflo(scratch1);
+ __ mfhi(scratch2);
+ __ sra(scratch1, scratch1, 31);
+ __ Branch(&stub_call, ne, scratch1, Operand(scratch2));
+ __ mflo(v0);
+ __ Branch(&done, ne, v0, Operand(zero_reg));
+ __ Addu(scratch2, right, left);
+ __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
+ ASSERT(Smi::FromInt(0) == 0);
+ __ mov(v0, zero_reg);
+ break;
+ }
+ case Token::BIT_OR:
+ __ Or(v0, left, Operand(right));
+ break;
+ case Token::BIT_AND:
+ __ And(v0, left, Operand(right));
+ break;
+ case Token::BIT_XOR:
+ __ Xor(v0, left, Operand(right));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ context()->Plug(v0);
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode) {
+ __ mov(a0, result_register());
+ __ pop(a1);
+ BinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), NULL, expr->id());
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
+ // Invalid left-hand sides are rewritten to have a 'throw
+ // ReferenceError' on the left-hand side.
+ if (!expr->IsValidLeftHandSide()) {
+ VisitForEffect(expr);
+ return;
+ }
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->AsProperty();
+ if (prop != NULL) {
+ assign_type = (prop->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ push(result_register()); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ __ mov(a1, result_register());
+ __ pop(a0); // Restore value.
+ __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ push(result_register()); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(a1, result_register());
+ __ pop(a2);
+ __ pop(a0); // Restore value.
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ break;
+ }
+ }
+ PrepareForBailoutForId(bailout_ast_id, TOS_REG);
+ context()->Plug(v0);
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Expression::Context context) {
- UNIMPLEMENTED_MIPS();
+ Token::Value op) {
+ ASSERT(var != NULL);
+ ASSERT(var->is_global() || var->AsSlot() != NULL);
+
+ if (var->is_global()) {
+ ASSERT(!var->is_this());
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in a0, variable name in
+ // a2, and the global object in a1.
+ __ mov(a0, result_register());
+ __ li(a2, Operand(var->name()));
+ __ lw(a1, GlobalObjectOperand());
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+
+ } else if (op == Token::INIT_CONST) {
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are able
+ // to drill a hole to that function context, even from inside a 'with'
+ // context. We thus bypass the normal static scope lookup.
+ Slot* slot = var->AsSlot();
+ Label skip;
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ // No const parameters.
+ UNREACHABLE();
+ break;
+ case Slot::LOCAL:
+ // Detect const reinitialization by checking for the hole value.
+ __ lw(a1, MemOperand(fp, SlotOffset(slot)));
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ Branch(&skip, ne, a1, Operand(t0));
+ __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
+ break;
+ case Slot::CONTEXT: {
+ __ lw(a1, ContextOperand(cp, Context::FCONTEXT_INDEX));
+ __ lw(a2, ContextOperand(a1, slot->index()));
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ Branch(&skip, ne, a2, Operand(t0));
+ __ sw(result_register(), ContextOperand(a1, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(a3, result_register()); // Preserve the stored value in v0.
+ __ RecordWrite(a1, Operand(offset), a3, a2);
+ break;
+ }
+ case Slot::LOOKUP:
+ __ push(result_register());
+ __ li(a0, Operand(slot->var()->name()));
+ __ Push(cp, a0); // Context and name.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ break;
+ }
+ __ bind(&skip);
+
+ } else if (var->mode() != Variable::CONST) {
+ // Perform the assignment for non-const variables. Const assignments
+ // are simply skipped.
+ Slot* slot = var->AsSlot();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ // Perform the assignment.
+ __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
+ break;
+
+ case Slot::CONTEXT: {
+ MemOperand target = EmitSlotSearch(slot, a1);
+ // Perform the assignment and issue the write barrier.
+ __ sw(result_register(), target);
+ // RecordWrite may destroy all its register arguments.
+ __ mov(a3, result_register());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ RecordWrite(a1, Operand(offset), a2, a3);
+ break;
+ }
+
+ case Slot::LOOKUP:
+ // Call the runtime for the assignment.
+ __ push(v0); // Value.
+ __ li(a1, Operand(slot->var()->name()));
+ __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(cp, a1, a0); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ break;
+ }
+ }
}
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- UNIMPLEMENTED_MIPS();
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(result_register());
+ __ lw(t0, MemOperand(sp, kPointerSize)); // Receiver is now under value.
+ __ push(t0);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
+ }
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ mov(a0, result_register()); // Load the value.
+ __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+ // Load receiver to a1. Leave a copy in the stack if needed for turning the
+ // receiver into fast case.
+ if (expr->ends_initialization_block()) {
+ __ lw(a1, MemOperand(sp));
+ } else {
+ __ pop(a1);
+ }
+
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(v0); // Result of assignment, saved even if not needed.
+ // Receiver is under the result value.
+ __ lw(t0, MemOperand(sp, kPointerSize));
+ __ push(t0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(v0);
+ __ Drop(1);
+ }
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(v0);
}
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- UNIMPLEMENTED_MIPS();
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(result_register());
+ // Receiver is now under the key and value.
+ __ lw(t0, MemOperand(sp, 2 * kPointerSize));
+ __ push(t0);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
+ }
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ // Call keyed store IC.
+ // The arguments are:
+ // - a0 is the value,
+ // - a1 is the key,
+ // - a2 is the receiver.
+ __ mov(a0, result_register());
+ __ pop(a1); // Key.
+ // Load receiver to a2. Leave a copy in the stack if needed for turning the
+ // receiver into fast case.
+ if (expr->ends_initialization_block()) {
+ __ lw(a2, MemOperand(sp));
+ } else {
+ __ pop(a2);
+ }
+
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(v0); // Result of assignment, saved even if not needed.
+ // Receiver is under the result value.
+ __ lw(t0, MemOperand(sp, kPointerSize));
+ __ push(t0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(v0);
+ __ Drop(1);
+ }
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(v0);
}
void FullCodeGenerator::VisitProperty(Property* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ VisitForAccumulatorValue(expr->obj());
+ EmitNamedPropertyLoad(expr);
+ context()->Plug(v0);
+ } else {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ pop(a1);
+ EmitKeyedPropertyLoad(expr);
+ context()->Plug(v0);
+ }
}
+
void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> ignored,
+ Handle<Object> name,
RelocInfo::Mode mode) {
- UNIMPLEMENTED_MIPS();
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ __ li(a2, Operand(name));
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+ EmitCallIC(ic, mode, expr->id());
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+ Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ // Swap the name of the function and the receiver on the stack to follow
+ // the calling convention for call ICs.
+ __ pop(a1);
+ __ push(v0);
+ __ push(a1);
+
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
+ __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, v0); // Drop the key still on the stack.
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub stub(arg_count, in_loop, flags);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, v0);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+ int arg_count) {
+ // Push copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+ } else {
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ }
+ __ push(a1);
+
+ // Push the receiver of the enclosing function and do runtime call.
+ __ lw(a1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+ __ push(a1);
+ // Push the strict mode flag.
+ __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+ __ push(a1);
+
+ __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+ ? Runtime::kResolvePossiblyDirectEvalNoLookup
+ : Runtime::kResolvePossiblyDirectEval, 4);
}
void FullCodeGenerator::VisitCall(Call* expr) {
- UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* fun = expr->expression();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ if (var != NULL && var->is_possibly_eval()) {
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ { PreservePositionScope pos_scope(masm()->positions_recorder());
+ VisitForStackValue(fun);
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ push(a2); // Reserved receiver slot.
+
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ // If we know that eval can only be shadowed by eval-introduced
+ // variables we attempt to load the global eval function directly
+ // in generated code. If we succeed, there is no need to perform a
+ // context lookup in the runtime system.
+ Label done;
+ if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ Label slow;
+ EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
+ NOT_INSIDE_TYPEOF,
+ &slow);
+ // Push the function and resolve eval.
+ __ push(v0);
+ EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+ __ jmp(&done);
+ __ bind(&slow);
+ }
+
+ // Push copy of the function (found below the arguments) and
+ // resolve eval.
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ push(a1);
+ EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+ if (done.is_linked()) {
+ __ bind(&done);
+ }
+
+ // The runtime call returns a pair of values in v0 (function) and
+ // v1 (receiver). Touch up the stack with the right values.
+ __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ sw(v1, MemOperand(sp, arg_count * kPointerSize));
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_IMPLICIT);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, v0);
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Push global object as receiver for the call IC.
+ __ lw(a0, GlobalObjectOperand());
+ __ push(a0);
+ EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->AsSlot() != NULL &&
+ var->AsSlot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot (dynamically introduced variable).
+ Label slow, done;
+
+ { PreservePositionScope scope(masm()->positions_recorder());
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
+ NOT_INSIDE_TYPEOF,
+ &slow,
+ &done);
+ }
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in v0)
+ // and the object holding it (returned in v1).
+ __ push(context_register());
+ __ li(a2, Operand(var->name()));
+ __ push(a2);
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ Push(v0, v1); // Function, receiver.
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ Branch(&call);
+ __ bind(&done);
+ // Push function.
+ __ push(v0);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
+ __ push(a1);
+ __ bind(&call);
+ }
+
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot. That object could be the hole if the
+ // receiver is implicitly the global object.
+ EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(prop->obj());
+ }
+ EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property.
+ // For a synthetic property use keyed load IC followed by function call,
+ // for a regular property use keyed EmitCallIC.
+ if (prop->is_synthetic()) {
+ // Do not visit the object and key subexpressions (they are shared
+ // by all occurrences of the same rewritten parameter).
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
+ Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
+ MemOperand operand = EmitSlotSearch(slot, a1);
+ __ lw(a1, operand);
+
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
+ __ li(a0, Operand(prop->key()->AsLiteral()->handle()));
+
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ lw(a1, GlobalObjectOperand());
+ __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+ __ Push(v0, a1); // Function, receiver.
+ EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ } else {
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(prop->obj());
+ }
+ EmitKeyedCallWithIC(expr, prop->key());
+ }
+ }
+ } else {
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(fun);
+ }
+ // Load global receiver object.
+ __ lw(a1, GlobalObjectOperand());
+ __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+ __ push(a1);
+ // Emit function call.
+ EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ ASSERT(expr->return_is_recorded_);
+#endif
}
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForStackValue(expr->expression());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into a1 and a0.
+ __ li(a0, Operand(arg_count));
+ __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+
+ Handle<Code> construct_builtin =
+ isolate()->builtins()->JSConstructCall();
+ __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ __ And(t0, v0, Operand(kSmiTagMask));
+ Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
+ Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(if_true, eq, v0, Operand(at));
+ __ lw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ lbu(a1, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+ __ Branch(if_false, ne, at, Operand(zero_reg));
+ __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
+ if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
+ if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+ __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ if (FLAG_debug_code) __ AbortIfSmi(v0);
+
+ __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
+ __ And(t0, t0, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ Branch(if_true, ne, t0, Operand(zero_reg));
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ lw(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kHashTableMapRootIndex);
+ __ Branch(if_false, eq, a2, Operand(t0));
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ LoadInstanceDescriptors(a1, t0);
+ __ lw(a3, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ // t0: descriptor array
+ // a3: length of descriptor array
+ // Calculate the end of the descriptor array.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kPointerSize == 4);
+ __ Addu(a2, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a2, a2, t1);
+
+ // Calculate location of the first key name.
+ __ Addu(t0,
+ t0,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ // The use of t2 to store the valueOf symbol asumes that it is not otherwise
+ // used in the loop below.
+ __ li(t2, Operand(FACTORY->value_of_symbol()));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ lw(a3, MemOperand(t0, 0));
+ __ Branch(if_false, eq, a3, Operand(t2));
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ bind(&entry);
+ __ Branch(&loop, ne, t0, Operand(a2));
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+ __ JumpIfSmi(a2, if_false);
+ __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ lw(a3, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+ __ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ Branch(if_false, ne, a2, Operand(a3));
+
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+ __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+ __ jmp(if_true);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a2);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(if_false);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(eq, a1, Operand(JS_ARRAY_TYPE),
+ if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Get the frame pointer for the calling frame.
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&check_frame_marker, ne,
+ a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
+ if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ pop(a1);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in a1 and the formal
+ // parameter count in a0.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(a1, v0);
+ __ li(a0, Operand(Smi::FromInt(scope()->num_parameters())));
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Label exit;
+ // Get the number of formal parameters.
+ __ li(v0, Operand(Smi::FromInt(scope()->num_parameters())));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&exit, ne, a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ lw(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is a smi, we return null.
+ __ JumpIfSmi(v0, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ __ GetObjectType(v0, v0, a1); // Map is now in v0.
+ __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ Branch(&function, ge, a1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+
+ // Check if the constructor in the map is a function.
+ __ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
+ __ GetObjectType(v0, a1, a1);
+ __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
+
+ // v0 now contains the constructor function. Grab the
+ // instance class name from there.
+ __ lw(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ Branch(&done);
+
+ // Functions have class 'Function'.
+ __ bind(&function);
+ __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+ __ jmp(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ bind(&non_function_constructor);
+ __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+ __ jmp(&done);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ LoadRoot(v0, Heap::kNullValueRootIndex);
+
+ // All done.
+ __ bind(&done);
+
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallRuntime(Runtime::kLog, 2);
+ }
+#endif
+ // Finally, we're expected to leave a value on the top of the stack.
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Label slow_allocate_heapnumber;
+ Label heapnumber_allocated;
+
+ // Save the new heap number in callee-saved register s0, since
+ // we call out to external C code below.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(s0, a1, a2, t6, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(s0, v0); // Save result in s0, so it is saved thru CFunc call.
+
+ __ bind(&heapnumber_allocated);
+
+ // Convert 32 random bits in v0 to 0.(32 random bits) in a double
+ // by computing:
+ // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+ if (CpuFeatures::IsSupported(FPU)) {
+ __ PrepareCallCFunction(1, a0);
+ __ li(a0, Operand(ExternalReference::isolate_address()));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+
+ CpuFeatures::Scope scope(FPU);
+ // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+ __ li(a1, Operand(0x41300000));
+ // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
+ __ Move(f12, v0, a1);
+ // Move 0x4130000000000000 to FPU.
+ __ Move(f14, zero_reg, a1);
+ // Subtract and store the result in the heap number.
+ __ sub_d(f0, f12, f14);
+ __ sdc1(f0, MemOperand(s0, HeapNumber::kValueOffset - kHeapObjectTag));
+ __ mov(v0, s0);
+ } else {
+ __ PrepareCallCFunction(2, a0);
+ __ mov(a0, s0);
+ __ li(a1, Operand(ExternalReference::isolate_address()));
+ __ CallCFunction(
+ ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
+ }
+
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub;
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub;
+ ASSERT(args->length() == 4);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(v0, &done);
+ // If the object is not a value type, return the object.
+ __ GetObjectType(v0, a1, a1);
+ __ Branch(&done, ne, a1, Operand(JS_VALUE_TYPE));
+
+ __ lw(v0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+ // Load the arguments on the stack and call the runtime function.
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ MathPowStub stub;
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
+ __ pop(a1); // v0 = value. a1 = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ JumpIfSmi(a1, &done);
+
+ // If the object is not a value type, return the value.
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE));
+
+ // Store the value.
+ __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ RecordWrite(a1, Operand(JSValue::kValueOffset - kHeapObjectTag), a2, a3);
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument on the stack and call the stub.
+ VisitForStackValue(args->at(0));
+
+ NumberToStringStub stub;
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ StringCharFromCodeGenerator generator(v0, a1);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(a1);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+ __ mov(a0, result_register());
+
+ Register object = a1;
+ Register index = a0;
+ Register scratch = a2;
+ Register result = v0;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ scratch,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+ __ mov(a0, result_register());
+
+ Register object = a1;
+ Register index = a0;
+ Register scratch1 = a2;
+ Register scratch2 = a3;
+ Register result = v0;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ li(result, Operand(Smi::FromInt(0)));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringCompareStub stub;
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
+ TranscendentalCacheStub::TAGGED);
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
+ TranscendentalCacheStub::TAGGED);
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::TAGGED);
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the runtime function.
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallRuntime(Runtime::kMath_sqrt, 1);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+ ASSERT(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ VisitForAccumulatorValue(args->last()); // Function.
+
+ // InvokeFunction requires the function in a1. Move it in there.
+ __ mov(a1, result_register());
+ ParameterCount count(arg_count);
+ __ InvokeFunction(a1, count, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+ RegExpConstructResultStub stub;
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ Label done;
+ Label slow_case;
+ Register object = a0;
+ Register index1 = a1;
+ Register index2 = a2;
+ Register elements = a3;
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+
+ __ lw(object, MemOperand(sp, 2 * kPointerSize));
+ // Fetch the map and check if array is in fast case.
+ // Check that object doesn't require security checks and
+ // has no indexed interceptor.
+ __ GetObjectType(object, scratch1, scratch2);
+ __ Branch(&slow_case, ne, scratch2, Operand(JS_ARRAY_TYPE));
+ // Map is now in scratch1.
+
+ __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
+ __ And(scratch2, scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ Branch(&slow_case, ne, scratch2, Operand(zero_reg));
+
+ // Check the object's elements are in fast case and writable.
+ __ lw(elements, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(scratch2, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&slow_case, ne, scratch1, Operand(scratch2));
+
+ // Check that both indices are smis.
+ __ lw(index1, MemOperand(sp, 1 * kPointerSize));
+ __ lw(index2, MemOperand(sp, 0));
+ __ JumpIfNotBothSmi(index1, index2, &slow_case);
+
+ // Check that both indices are valid.
+ Label not_hi;
+ __ lw(scratch1, FieldMemOperand(object, JSArray::kLengthOffset));
+ __ Branch(&slow_case, ls, scratch1, Operand(index1));
+ __ Branch(&not_hi, NegateCondition(hi), scratch1, Operand(index1));
+ __ Branch(&slow_case, ls, scratch1, Operand(index2));
+ __ bind(&not_hi);
+
+ // Bring the address of the elements into index1 and index2.
+ __ Addu(scratch1, elements,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(index1, index1, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(index1, scratch1, index1);
+ __ sll(index2, index2, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(index2, scratch1, index2);
+
+ // Swap elements.
+ __ lw(scratch1, MemOperand(index1, 0));
+ __ lw(scratch2, MemOperand(index2, 0));
+ __ sw(scratch1, MemOperand(index2, 0));
+ __ sw(scratch2, MemOperand(index1, 0));
+
+ Label new_space;
+ __ InNewSpace(elements, scratch1, eq, &new_space);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask).
+
+ __ mov(scratch1, elements);
+ __ RecordWriteHelper(elements, index1, scratch2);
+ __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
+
+ __ bind(&new_space);
+ // We are done. Drop elements from the stack, and return undefined.
+ __ Drop(3);
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&slow_case);
+ __ CallRuntime(Runtime::kSwapElements, 3);
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ isolate()->global_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort("Attempt to use undefined cache.");
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(v0);
+ return;
+ }
+
+ VisitForAccumulatorValue(args->at(1));
+
+ Register key = v0;
+ Register cache = a1;
+ __ lw(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ lw(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+ __ lw(cache,
+ ContextOperand(
+ cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ lw(cache,
+ FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+
+ Label done, not_found;
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ lw(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+ // a2 now holds finger offset as a smi.
+ __ Addu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // a3 now points to the start of fixed array elements.
+ __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(a3, a3, at);
+ // a3 now points to key of indexed element of cache.
+ __ lw(a2, MemOperand(a3));
+ __ Branch(&not_found, ne, key, Operand(a2));
+
+ __ lw(v0, MemOperand(a3, kPointerSize));
+ __ Branch(&done);
+
+ __ bind(&not_found);
+ // Call runtime to perform the lookup.
+ __ Push(cache, key);
+ __ CallRuntime(Runtime::kGetFromCache, 2);
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Register right = v0;
+ Register left = a1;
+ Register tmp = a2;
+ Register tmp2 = a3;
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1)); // Result (right) in v0.
+ __ pop(left);
+
+ Label done, fail, ok;
+ __ Branch(&ok, eq, left, Operand(right));
+ // Fail if either is a non-HeapObject.
+ __ And(tmp, left, Operand(right));
+ __ And(at, tmp, Operand(kSmiTagMask));
+ __ Branch(&fail, eq, at, Operand(zero_reg));
+ __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+ __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
+ __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ Branch(&fail, ne, tmp, Operand(tmp2));
+ __ lw(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
+ __ lw(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
+ __ Branch(&ok, eq, tmp, Operand(tmp2));
+ __ bind(&fail);
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+ __ bind(&ok);
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ __ bind(&done);
+
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
+ __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(v0);
+ }
+
+ __ lw(v0, FieldMemOperand(v0, String::kHashFieldOffset));
+ __ IndexFromHash(v0, v0);
+
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ empty_separator_loop, one_char_separator_loop,
+ one_char_separator_loop_entry, long_separator_loop;
+
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(0));
+
+ // All aliases of the same register have disjoint lifetimes.
+ Register array = v0;
+ Register elements = no_reg; // Will be v0.
+ Register result = no_reg; // Will be v0.
+ Register separator = a1;
+ Register array_length = a2;
+ Register result_pos = no_reg; // Will be a2.
+ Register string_length = a3;
+ Register string = t0;
+ Register element = t1;
+ Register elements_end = t2;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+ Register scratch3 = t4;
+ Register scratch4 = v1;
+
+ // Separator operand is on the stack.
+ __ pop(separator);
+
+ // Check that the array is a JSArray.
+ __ JumpIfSmi(array, &bailout);
+ __ GetObjectType(array, scratch1, scratch2);
+ __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(scratch1, scratch2, &bailout);
+
+ // If the array has length zero, return the empty string.
+ __ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+ __ SmiUntag(array_length);
+ __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
+ __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+ __ Branch(&done);
+
+ __ bind(&non_trivial_array);
+
+ // Get the FixedArray containing array's elements.
+ elements = array;
+ __ lw(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+ array = no_reg; // End of array's live range.
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ mov(string_length, zero_reg);
+ __ Addu(element,
+ elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(elements_end, array_length, kPointerSizeLog2);
+ __ Addu(elements_end, element, elements_end);
+ // Loop condition: while (element < elements_end).
+ // Live values in registers:
+ // elements: Fixed array of strings.
+ // array_length: Length of the fixed array of strings (not smi)
+ // separator: Separator string
+ // string_length: Accumulated sum of string lengths (smi).
+ // element: Current array element.
+ // elements_end: Array end.
+ if (FLAG_debug_code) {
+ __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin",
+ array_length, Operand(zero_reg));
+ }
+ __ bind(&loop);
+ __ lw(string, MemOperand(element));
+ __ Addu(element, element, kPointerSize);
+ __ JumpIfSmi(string, &bailout);
+ __ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ lw(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+ __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
+ __ BranchOnOverflow(&bailout, scratch3);
+ __ Branch(&loop, lt, element, Operand(elements_end));
+
+ // If array_length is 1, return elements[0], a string.
+ __ Branch(&not_size_one_array, ne, array_length, Operand(1));
+ __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ Branch(&done);
+
+ __ bind(&not_size_one_array);
+
+ // Live values in registers:
+ // separator: Separator string
+ // array_length: Length of the array.
+ // string_length: Sum of string lengths (smi).
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ JumpIfSmi(separator, &bailout);
+ __ lw(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+ // Add (separator length times array_length) - separator length to the
+ // string_length to get the length of the result string. array_length is not
+ // smi but the other values are, so the result is a smi.
+ __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ Subu(string_length, string_length, Operand(scratch1));
+ __ Mult(array_length, scratch1);
+ // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+ // zero.
+ __ mfhi(scratch2);
+ __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
+ __ mflo(scratch2);
+ __ And(scratch3, scratch2, Operand(0x80000000));
+ __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
+ __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
+ __ BranchOnOverflow(&bailout, scratch3);
+ __ SmiUntag(string_length);
+
+ // Get first element in the array to free up the elements register to be used
+ // for the result.
+ __ Addu(element,
+ elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ result = elements; // End of live range for elements.
+ elements = no_reg;
+ // Live values in registers:
+ // element: First array element
+ // separator: Separator string
+ // string_length: Length of result string (not smi)
+ // array_length: Length of the array.
+ __ AllocateAsciiString(result,
+ string_length,
+ scratch1,
+ scratch2,
+ elements_end,
+ &bailout);
+ // Prepare for looping. Set up elements_end to end of the array. Set
+ // result_pos to the position of the result where to write the first
+ // character.
+ __ sll(elements_end, array_length, kPointerSizeLog2);
+ __ Addu(elements_end, element, elements_end);
+ result_pos = array_length; // End of live range for array_length.
+ array_length = no_reg;
+ __ Addu(result_pos,
+ result,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+ // Check the length of the separator.
+ __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ li(at, Operand(Smi::FromInt(1)));
+ __ Branch(&one_char_separator, eq, scratch1, Operand(at));
+ __ Branch(&long_separator, gt, scratch1, Operand(at));
+
+ // Empty separator case.
+ __ bind(&empty_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+
+ // Copy next array element to the result.
+ __ lw(string, MemOperand(element));
+ __ Addu(element, element, kPointerSize);
+ __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ // End while (element < elements_end).
+ __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
+ ASSERT(result.is(v0));
+ __ Branch(&done);
+
+ // One-character separator case.
+ __ bind(&one_char_separator);
+ // Replace separator with its ascii character value.
+ __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator.
+ __ jmp(&one_char_separator_loop_entry);
+
+ __ bind(&one_char_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Single separator ascii char (in lower byte).
+
+ // Copy the separator character to the result.
+ __ sb(separator, MemOperand(result_pos));
+ __ Addu(result_pos, result_pos, 1);
+
+ // Copy next array element to the result.
+ __ bind(&one_char_separator_loop_entry);
+ __ lw(string, MemOperand(element));
+ __ Addu(element, element, kPointerSize);
+ __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ // End while (element < elements_end).
+ __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
+ ASSERT(result.is(v0));
+ __ Branch(&done);
+
+ // Long separator case (separator is more than one character). Entry is at the
+ // label long_separator below.
+ __ bind(&long_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Separator string.
+
+ // Copy the separator to the result.
+ __ lw(string_length, FieldMemOperand(separator, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ Addu(string,
+ separator,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+
+ __ bind(&long_separator);
+ __ lw(string, MemOperand(element));
+ __ Addu(element, element, kPointerSize);
+ __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ // End while (element < elements_end).
+ __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
+ ASSERT(result.is(v0));
+ __ Branch(&done);
+
+ __ bind(&bailout);
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
+ context()->Plug(v0);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- UNIMPLEMENTED_MIPS();
+ Handle<String> name = expr->name();
+ if (name->length() > 0 && name->Get(0) == '_') {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
+ Comment cmnt(masm_, "[ CallRuntime");
+ ZoneList<Expression*>* args = expr->arguments();
+
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ lw(a0, GlobalObjectOperand());
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kBuiltinsOffset));
+ __ push(a0);
+ }
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function.
+ __ li(a2, Operand(expr->name()));
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP,
+ mode);
+ EmitCallIC(ic, mode, expr->id());
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ }
+ context()->Plug(v0);
}
void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- UNIMPLEMENTED_MIPS();
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* prop = expr->expression()->AsProperty();
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+
+ if (prop != NULL) {
+ if (prop->is_synthetic()) {
+ // Result of deleting parameters is false, even when they rewrite
+ // to accesses on the arguments object.
+ context()->Plug(false);
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
+ __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+ __ push(a1);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(v0);
+ }
+ } else if (var != NULL) {
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+ if (var->is_global()) {
+ __ lw(a2, GlobalObjectOperand());
+ __ li(a1, Operand(var->name()));
+ __ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ Push(a2, a1, a0);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(v0);
+ } else if (var->AsSlot() != NULL &&
+ var->AsSlot()->type() != Slot::LOOKUP) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(false);
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ push(context_register());
+ __ li(a2, Operand(var->name()));
+ __ push(a2);
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(v0);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ }
+
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+
+ // Notice that the labels are swapped.
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ if (context()->IsTest()) ForwardBailoutToChild(expr);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
+ context()->Plug(if_false, if_true); // Labels swapped.
+ }
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ { StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ CallRuntime(Runtime::kTypeof, 1);
+ context()->Plug(v0);
+ break;
+ }
+
+ case Token::ADD: {
+ Comment cmt(masm_, "[ UnaryOperation (ADD)");
+ VisitForAccumulatorValue(expr->expression());
+ Label no_conversion;
+ __ JumpIfSmi(result_register(), &no_conversion);
+ __ mov(a0, result_register());
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
+ __ bind(&no_conversion);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Token::SUB:
+ EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
+ break;
+
+ case Token::BIT_NOT:
+ EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+ const char* comment) {
+ // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+ Comment cmt(masm_, comment);
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode overwrite =
+ can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ UnaryOpStub stub(expr->op(), overwrite);
+ // GenericUnaryOpStub expects the argument to be in a0.
+ VisitForAccumulatorValue(expr->expression());
+ SetSourcePosition(expr->position());
+ __ mov(a0, result_register());
+ EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ context()->Plug(v0);
}
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
+ // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+ // as the left-hand side.
+ if (!expr->expression()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->expression());
+ return;
+ }
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ li(at, Operand(Smi::FromInt(0)));
+ __ push(at);
+ }
+ if (assign_type == NAMED_PROPERTY) {
+ // Put the object both on the stack and in the accumulator.
+ VisitForAccumulatorValue(prop->obj());
+ __ push(v0);
+ EmitNamedPropertyLoad(prop);
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ lw(a1, MemOperand(sp, 0));
+ __ push(v0);
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ }
+
+ // Call ToNumber only if operand is not a smi.
+ Label no_conversion;
+ __ JumpIfSmi(v0, &no_conversion);
+ __ mov(a0, v0);
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
+ __ bind(&no_conversion);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(v0);
+ break;
+ case NAMED_PROPERTY:
+ __ sw(v0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ sw(v0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ }
+ }
+ __ mov(a0, result_register());
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ __ li(a1, Operand(Smi::FromInt(count_value)));
+
+ if (ShouldInlineSmiCase(expr->op())) {
+ __ AdduAndCheckForOverflow(v0, a0, a1, t0);
+ __ BranchOnOverflow(&stub_call, t0); // Do stub on overflow.
+
+ // We could eliminate this smi check if we split the code at
+ // the first smi check before calling ToNumber.
+ patch_site.EmitJumpIfSmi(v0, &done);
+ __ bind(&stub_call);
+ }
+
+ // Record position before stub call.
+ SetSourcePosition(expr->position());
+
+ BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+ EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+ __ bind(&done);
+
+ // Store the value returned in v0.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(v0);
+ }
+ // For all contexts except EffectConstant we have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(v0);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ mov(a0, result_register()); // Value.
+ __ li(a2, Operand(prop->key()->AsLiteral()->handle())); // Name.
+ __ pop(a1); // Receiver.
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(v0);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ mov(a0, result_register()); // Value.
+ __ pop(a1); // Key.
+ __ pop(a2); // Receiver.
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(v0);
+ }
+ break;
+ }
+ }
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ lw(a0, GlobalObjectOperand());
+ __ li(a2, Operand(proxy->name()));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ PrepareForBailout(expr, TOS_REG);
+ context()->Plug(v0);
+ } else if (proxy != NULL &&
+ proxy->var()->AsSlot() != NULL &&
+ proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ Slot* slot = proxy->var()->AsSlot();
+ EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ __ li(a0, Operand(proxy->name()));
+ __ Push(cp, a0);
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ PrepareForBailout(expr, TOS_REG);
+ __ bind(&done);
+
+ context()->Plug(v0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitInCurrentContext(expr);
+ }
+}
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(expr);
+ }
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ if (check->Equals(isolate()->heap()->number_symbol())) {
+ __ JumpIfSmi(v0, if_true);
+ __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ __ JumpIfSmi(v0, if_false);
+ // Check for undetectable objects => false.
+ __ GetObjectType(v0, v0, a1);
+ __ Branch(if_false, ge, a1, Operand(FIRST_NONSTRING_TYPE));
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(eq, a1, Operand(zero_reg),
+ if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(if_true, eq, v0, Operand(at));
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(if_true, eq, v0, Operand(at));
+ __ JumpIfSmi(v0, if_false);
+ // Check for undetectable objects => true.
+ __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, v0); // Leave map in a1.
+ Split(ge, v0, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE),
+ if_true, if_false, fall_through);
+
+ } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ __ JumpIfSmi(v0, if_false);
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(if_true, eq, v0, Operand(at));
+ // Check for JS objects => true.
+ __ GetObjectType(v0, v0, a1);
+ __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ lbu(a1, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+ __ Branch(if_false, gt, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ // Check for undetectable objects => false.
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
+ } else {
+ if (if_false != fall_through) __ jmp(if_false);
+ }
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
}
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
+
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
+ context()->Plug(if_true, if_false);
+ return;
+ }
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+ Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForStackValue(expr->right());
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ // The stub returns 0 for true.
+ Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cc = eq;
+ bool strict = false;
+ switch (op) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through.
+ case Token::EQ:
+ cc = eq;
+ __ mov(a0, result_register());
+ __ pop(a1);
+ break;
+ case Token::LT:
+ cc = lt;
+ __ mov(a0, result_register());
+ __ pop(a1);
+ break;
+ case Token::GT:
+ // Reverse left and right sides to obtain ECMA-262 conversion order.
+ cc = lt;
+ __ mov(a1, result_register());
+ __ pop(a0);
+ break;
+ case Token::LTE:
+ // Reverse left and right sides to obtain ECMA-262 conversion order.
+ cc = ge;
+ __ mov(a1, result_register());
+ __ pop(a0);
+ break;
+ case Token::GTE:
+ cc = ge;
+ __ mov(a0, result_register());
+ __ pop(a1);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ Or(a2, a0, Operand(a1));
+ patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+ Split(cc, a1, Operand(a0), if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ EmitCallIC(ic, &patch_site, expr->id());
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ Comment cmnt(masm_, "[ CompareToNull");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForAccumulatorValue(expr->expression());
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ __ mov(a0, result_register());
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ if (expr->is_strict()) {
+ Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
+ } else {
+ __ Branch(if_true, eq, a0, Operand(a1));
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ Branch(if_true, eq, a0, Operand(a1));
+ __ And(at, a0, Operand(kSmiTagMask));
+ __ Branch(if_false, eq, at, Operand(zero_reg));
+ // It can be an undetectable object.
+ __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+ }
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- UNIMPLEMENTED_MIPS();
+ __ lw(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(v0);
}
-Register FullCodeGenerator::result_register() { return v0; }
+Register FullCodeGenerator::result_register() {
+ return v0;
+}
+
+Register FullCodeGenerator::context_register() {
+ return cp;
+}
-Register FullCodeGenerator::context_register() { return cp; }
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+ RelocInfo::Mode mode,
+ unsigned ast_id) {
+ ASSERT(mode == RelocInfo::CODE_TARGET ||
+ mode == RelocInfo::CODE_TARGET_CONTEXT);
+ Counters* counters = isolate()->counters();
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(counters->named_load_full(), 1, a1, a2);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(counters->keyed_load_full(), 1, a1, a2);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(counters->named_store_full(), 1, a1, a2);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(counters->keyed_store_full(), 1, a1, a2);
+ default:
+ break;
+ }
+ if (ast_id == kNoASTId || mode == RelocInfo::CODE_TARGET_CONTEXT) {
+ __ Call(ic, mode);
+ } else {
+ ASSERT(mode == RelocInfo::CODE_TARGET);
+ mode = RelocInfo::CODE_TARGET_WITH_ID;
+ __ CallWithAstId(ic, mode, ast_id);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+ JumpPatchSite* patch_site,
+ unsigned ast_id) {
+ Counters* counters = isolate()->counters();
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(counters->named_load_full(), 1, a1, a2);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(counters->keyed_load_full(), 1, a1, a2);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(counters->named_store_full(), 1, a1, a2);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(counters->keyed_store_full(), 1, a1, a2);
+ default:
+ break;
+ }
+
+ if (ast_id == kNoASTId) {
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ } else {
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET_WITH_ID, ast_id);
+ }
+ if (patch_site != NULL && patch_site->is_bound()) {
+ patch_site->EmitPatchInfo();
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- UNIMPLEMENTED_MIPS();
+ ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ sw(value, MemOperand(fp, frame_offset));
}
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- UNIMPLEMENTED_MIPS();
+ __ lw(dst, ContextOperand(cp, context_index));
}
@@ -261,12 +4291,28 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
// Non-local control flow support.
void FullCodeGenerator::EnterFinallyBlock() {
- UNIMPLEMENTED_MIPS();
+ ASSERT(!result_register().is(a1));
+ // Store result register while executing finally block.
+ __ push(result_register());
+ // Cook return address in link register to stack (smi encoded Code* delta).
+ __ Subu(a1, ra, Operand(masm_->CodeObject()));
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ ASSERT_EQ(0, kSmiTag);
+ __ Addu(a1, a1, Operand(a1)); // Convert to smi.
+ __ push(a1);
}
void FullCodeGenerator::ExitFinallyBlock() {
- UNIMPLEMENTED_MIPS();
+ ASSERT(!result_register().is(a1));
+ // Restore result register from stack.
+ __ pop(a1);
+ // Uncook return address and return.
+ __ pop(result_register());
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ __ sra(a1, a1, 1); // Un-smi-tag value.
+ __ Addu(at, a1, Operand(masm_->CodeObject()));
+ __ Jump(at);
}
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index e5c2ad80c..cbae8e46e 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,7 +31,8 @@
#if defined(V8_TARGET_ARCH_MIPS)
-#include "codegen-inl.h"
+#include "codegen.h"
+#include "code-stubs.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
@@ -47,38 +48,568 @@ namespace internal {
#define __ ACCESS_MASM(masm)
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register scratch0,
+ Register scratch1,
+ Label* miss) {
+ // Register usage:
+ // receiver: holds the receiver on entry and is unchanged.
+ // elements: holds the property dictionary on fall through.
+ // Scratch registers:
+ // scratch0: used to holds the receiver map.
+ // scratch1: used to holds the receiver instance type, receiver bit mask
+ // and elements map.
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the receiver is a valid JS object.
+ __ GetObjectType(receiver, scratch0, scratch1);
+ __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // If this assert fails, we have to check upper bound too.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+
+ GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
+
+ // Check that the global object does not require access checks.
+ __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
+ __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasNamedInterceptor)));
+ __ Branch(miss, ne, scratch1, Operand(zero_reg));
+
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
+ __ Branch(miss, ne, scratch1, Operand(scratch0));
+}
+
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done. Can be the same as elements or name clobbering
+// one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry check that the value is a normal
+ // property.
+ __ bind(&done); // scratch2 == elements + 4 * index.
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ And(at,
+ scratch1,
+ Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Get the value at the masked, scaled index and return.
+ __ lw(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ bind(&done); // scratch2 == elements + 4 * index.
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask
+ = (PropertyDetails::TypeField::mask() |
+ PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Store the value at the masked, scaled index and return.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+ __ sw(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ mov(scratch1, value);
+ __ RecordWrite(elements, scratch2, scratch1);
+}
+
+
+static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register reg0,
+ Register reg1,
+ Register reg2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ //
+ // Scratch registers:
+ //
+ // reg0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // reg1 - Used to hold the capacity mask of the dictionary.
+ //
+ // reg2 - Used for the index into the dictionary.
+ // at - Temporary (avoid MacroAssembler instructions also using 'at').
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ __ nor(reg1, reg0, zero_reg);
+ __ sll(at, reg0, 15);
+ __ addu(reg0, reg1, at);
+
+ // hash = hash ^ (hash >> 12);
+ __ srl(at, reg0, 12);
+ __ xor_(reg0, reg0, at);
+
+ // hash = hash + (hash << 2);
+ __ sll(at, reg0, 2);
+ __ addu(reg0, reg0, at);
+
+ // hash = hash ^ (hash >> 4);
+ __ srl(at, reg0, 4);
+ __ xor_(reg0, reg0, at);
+
+ // hash = hash * 2057;
+ __ li(reg1, Operand(2057));
+ __ mul(reg0, reg0, reg1);
+
+ // hash = hash ^ (hash >> 16);
+ __ srl(at, reg0, 16);
+ __ xor_(reg0, reg0, at);
+
+ // Compute the capacity mask.
+ __ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+ __ sra(reg1, reg1, kSmiTagSize);
+ __ Subu(reg1, reg1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ static const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use reg2 for index calculations and keep the hash intact in reg0.
+ __ mov(reg2, reg0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ __ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
+ }
+ __ and_(reg2, reg2, reg1);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ __ sll(at, reg2, 1); // 2x.
+ __ addu(reg2, reg2, at); // reg2 = reg2 * 3.
+
+ // Check if the key is identical to the name.
+ __ sll(at, reg2, kPointerSizeLog2);
+ __ addu(reg2, elements, at);
+
+ __ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
+ if (i != kProbes - 1) {
+ __ Branch(&done, eq, key, Operand(at));
+ } else {
+ __ Branch(miss, ne, key, Operand(at));
+ }
+ }
+
+ __ bind(&done);
+ // Check that the value is a normal property.
+ // reg2: elements + (index * kPointerSize).
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ __ lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
+ __ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ __ lw(result, FieldMemOperand(reg2, kValueOffset));
+}
+
+
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Label miss;
+
+ StubCompiler::GenerateLoadArrayLength(masm, a0, a3, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
-void LoadIC::GenerateStringLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Label miss;
+
+ StubCompiler::GenerateLoadStringLength(masm, a0, a1, a3, &miss,
+ support_wrappers);
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Label miss;
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, a0, a1, a3, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map,
+ Register scratch,
+ int interceptor_bit,
+ Label* slow) {
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ Branch(slow, ne, at, Operand(zero_reg));
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing into string
+ // objects work as intended.
+ ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - holds the elements of the receiver on exit.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the the same as 'receiver' or 'key'.
+ // Unchanged on bailout so 'receiver' and 'key' can be safely
+ // used by further computation.
+ //
+ // Scratch registers:
+ //
+ // scratch1 - used to hold elements map and elements length.
+ // Holds the elements map if not_fast_array branch is taken.
+ //
+ // scratch2 - used to hold the loaded value.
+
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode (not dictionary).
+ __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(not_fast_array, ne, scratch1, Operand(at));
+ } else {
+ __ AssertFastElements(elements);
+ }
+
+ // Check that the key (index) is within bounds.
+ __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Branch(out_of_range, hs, key, Operand(scratch1));
+
+ // Fast case: Do the load.
+ __ Addu(scratch1, elements,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // The key is a smi.
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(at, at, scratch1);
+ __ lw(scratch2, MemOperand(at));
+
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ Branch(out_of_range, eq, scratch2, Operand(at));
+ __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a symbol string.
+// Falls through if a key is a symbol.
+static void GenerateKeyStringCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_symbol) {
+ // The key is not a smi.
+ // Is it a string?
+ __ GetObjectType(key, map, hash);
+ __ Branch(not_symbol, ge, hash, Operand(FIRST_NONSTRING_TYPE));
+
+ // Is the string an array index, with cached numeric value?
+ __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+ __ And(at, hash, Operand(String::kContainsCachedArrayIndexMask));
+ __ Branch(index_string, eq, at, Operand(zero_reg));
+
+ // Is the string a symbol?
+ // map: key map
+ __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ ASSERT(kSymbolTag != 0);
+ __ And(at, hash, Operand(kIsSymbolMask));
+ __ Branch(not_symbol, eq, at, Operand(zero_reg));
}
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
+// The generated code does not accept smi keys.
+// The generated code falls through if both probes miss.
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state) {
+ // ----------- S t a t e -------------
+ // -- a1 : receiver
+ // -- a2 : name
+ // -----------------------------------
+ Label number, non_number, non_string, boolean, probe, miss;
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(kind,
+ NOT_IN_LOOP,
+ MONOMORPHIC,
+ extra_ic_state,
+ NORMAL,
+ argc);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, a1, a2, a3, t0, t1);
+
+ // If the stub cache probing failed, the receiver might be a value.
+ // For value objects, we use the map of the prototype objects for
+ // the corresponding JSValue for the cache and that is what we need
+ // to probe.
+ //
+ // Check for number.
+ __ JumpIfSmi(a1, &number, t1);
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(&non_number, ne, a3, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&number);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::NUMBER_FUNCTION_INDEX, a1);
+ __ Branch(&probe);
+
+ // Check for string.
+ __ bind(&non_number);
+ __ Branch(&non_string, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::STRING_FUNCTION_INDEX, a1);
+ __ Branch(&probe);
+
+ // Check for boolean.
+ __ bind(&non_string);
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+ __ Branch(&boolean, eq, a1, Operand(t0));
+ __ LoadRoot(t1, Heap::kFalseValueRootIndex);
+ __ Branch(&miss, ne, a1, Operand(t1));
+ __ bind(&boolean);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::BOOLEAN_FUNCTION_INDEX, a1);
+
+ // Probe the stub cache for the value object.
+ __ bind(&probe);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, a1, a2, a3, t0, t1);
+
+ __ bind(&miss);
}
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+ int argc,
+ Label* miss,
+ Register scratch) {
+ // a1: function
+
+ // Check that the value isn't a smi.
+ __ JumpIfSmi(a1, miss);
+
+ // Check that the value is a JSFunction.
+ __ GetObjectType(a1, scratch, scratch);
+ __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
- // Registers:
- // a2: name
- // ra: return address
+
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Get the receiver of the function from the stack into a1.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+ GenerateStringDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
+
+ // a0: elements
+ // Search the dictionary - put result in register a1.
+ GenerateDictionaryLoad(masm, &miss, a0, a2, a1, a3, t0);
+
+ GenerateFunctionTailCall(masm, argc, &miss, t0);
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+}
+
+
+static void GenerateCallMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_ic_state) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ if (id == IC::kCallIC_Miss) {
+ __ IncrementCounter(isolate->counters()->call_miss(), 1, a3, t0);
+ } else {
+ __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, a3, t0);
+ }
// Get the receiver of the function from the stack.
__ lw(a3, MemOperand(sp, argc*kPointerSize));
@@ -86,123 +617,1130 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
__ EnterInternalFrame();
// Push the receiver and the name of the function.
- __ MultiPush(a2.bit() | a3.bit());
+ __ Push(a3, a2);
// Call the entry.
__ li(a0, Operand(2));
- __ li(a1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
+ __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
CEntryStub stub(1);
__ CallStub(&stub);
- // Move result to r1 and leave the internal frame.
+ // Move result to a1 and leave the internal frame.
__ mov(a1, v0);
__ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
- Label invoke, global;
- __ lw(a2, MemOperand(sp, argc * kPointerSize));
- __ andi(t0, a2, kSmiTagMask);
- __ Branch(eq, &invoke, t0, Operand(zero_reg));
- __ GetObjectType(a2, a3, a3);
- __ Branch(eq, &global, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(ne, &invoke, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
- __ sw(a2, MemOperand(sp, argc * kPointerSize));
+ // This can happen only for regular CallIC but not KeyedCallIC.
+ if (id == IC::kCallIC_Miss) {
+ Label invoke, global;
+ __ lw(a2, MemOperand(sp, argc * kPointerSize));
+ __ andi(t0, a2, kSmiTagMask);
+ __ Branch(&invoke, eq, t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, a3);
+ __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
+ // Patch the receiver on the stack.
+ __ bind(&global);
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a2, MemOperand(sp, argc * kPointerSize));
+ __ bind(&invoke);
+ }
// Invoke the function.
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
ParameterCount actual(argc);
- __ bind(&invoke);
- __ InvokeFunction(a1, actual, JUMP_FUNCTION);
+ __ InvokeFunction(a1,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ call_kind);
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
}
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack into a1.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+ GenerateMiss(masm, argc, extra_ic_state);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ GenerateCallNormal(masm, argc);
+ GenerateMiss(masm, argc, Code::kNoExtraICState);
+}
+
+
+void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
+}
+
+
+void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack into a1.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+ Label do_call, slow_call, slow_load, slow_reload_receiver;
+ Label check_number_dictionary, check_string, lookup_monomorphic_cache;
+ Label index_smi, index_string;
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(a2, &check_string);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, a1, a0, a3, Map::kHasIndexedInterceptor, &slow_call);
+
+ GenerateFastArrayLoad(
+ masm, a1, a2, t0, a3, a0, a1, &check_number_dictionary, &slow_load);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, a0, a3);
+
+ __ bind(&do_call);
+ // receiver in a1 is not used after this point.
+ // a2: key
+ // a1: function
+
+ GenerateFunctionTailCall(masm, argc, &slow_call, a0);
+
+ __ bind(&check_number_dictionary);
+ // a2: key
+ // a3: elements map
+ // t0: elements pointer
+ // Check whether the elements is a number dictionary.
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&slow_load, ne, a3, Operand(at));
+ __ sra(a0, a2, kSmiTagSize);
+ // a0: untagged index
+ GenerateNumberDictionaryLoad(masm, &slow_load, t0, a2, a1, a0, a3, t1);
+ __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
+ __ jmp(&do_call);
+
+ __ bind(&slow_load);
+ // This branch is taken when calling KeyedCallIC_Miss is neither required
+ // nor beneficial.
+ __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
+ __ EnterInternalFrame();
+ __ push(a2); // Save the key.
+ __ Push(a1, a2); // Pass the receiver and the key.
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(a2); // Restore the key.
+ __ LeaveInternalFrame();
+ __ mov(a1, v0);
+ __ jmp(&do_call);
+
+ __ bind(&check_string);
+ GenerateKeyStringCheck(masm, a2, a0, a3, &index_string, &slow_call);
+
+ // The key is known to be a symbol.
+ // If the receiver is a regular JS object with slow properties then do
+ // a quick inline probe of the receiver's dictionary.
+ // Otherwise do the monomorphic cache probe.
+ GenerateKeyedLoadReceiverCheck(
+ masm, a1, a0, a3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
+
+ __ lw(a0, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+ __ lw(a3, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&lookup_monomorphic_cache, ne, a3, Operand(at));
+
+ GenerateDictionaryLoad(masm, &slow_load, a0, a2, a1, a3, t0);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, a0, a3);
+ __ jmp(&do_call);
+
+ __ bind(&lookup_monomorphic_cache);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, a0, a3);
+ GenerateMonomorphicCacheProbe(masm,
+ argc,
+ Code::KEYED_CALL_IC,
+ Code::kNoExtraICState);
+ // Fall through on miss.
+
+ __ bind(&slow_call);
+ // This branch is taken if:
+ // - the receiver requires boxing or access check,
+ // - the key is neither smi nor symbol,
+ // - the value loaded is not a function,
+ // - there is hope that the runtime will create a monomorphic call stub,
+ // that will get fetched next time.
+ __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
+ GenerateMiss(masm, argc);
+
+ __ bind(&index_string);
+ __ IndexFromHash(a3, a2);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
+}
+
+
+void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Check if the name is a string.
+ Label miss;
+ __ JumpIfSmi(a2, &miss);
+ __ IsObjectJSStringType(a2, a0, &miss);
+
+ GenerateCallNormal(masm, argc);
+ __ bind(&miss);
+ GenerateMiss(masm, argc);
+}
+
+
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, a0, a2, a3, t0, t1);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Label miss;
+
+ GenerateStringDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
+
+ // a1: elements
+ GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+
+ __ mov(a3, a0);
+ __ Push(a3, a2);
+
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ TailCallExternalReference(ref, 2, 1);
}
-void LoadIC::ClearInlinedVersion(Address address) {}
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- return false;
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the map check
+ // later, we do not need to check for interceptors or whether it
+ // requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ GetObjectType(object, scratch1, scratch2);
+ __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ // Check that the key is a positive smi.
+ __ And(scratch1, key, Operand(0x8000001));
+ __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+ __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
+ __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
+
+ // Load element index and check whether it is the hole.
+ const int kOffset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ li(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, key, scratch3);
+ __ Addu(scratch3, scratch3, Operand(kOffset));
+
+ __ Addu(scratch2, scratch1, scratch3);
+ __ lw(scratch2, MemOperand(scratch2));
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ li(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, scratch2, scratch3);
+ __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+ __ Addu(scratch2, scratch1, scratch3);
+ return MemOperand(scratch2);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
+ __ li(scratch, Operand(kPointerSize >> 1));
+ __ mul(scratch, key, scratch);
+ __ Addu(scratch,
+ scratch,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Addu(scratch, backing_store, scratch);
+ return MemOperand(scratch);
}
-void KeyedLoadIC::ClearInlinedVersion(Address address) {}
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- return false;
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
+ __ lw(v0, mapped_location);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in a2.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
+ __ lw(a2, unmapped_location);
+ __ Branch(&slow, eq, a2, Operand(a3));
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+ __ mov(v0, a2);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
+ __ sw(a0, mapped_location);
+ // Verify mapped_location MemOperand is register, with no offset.
+ ASSERT_EQ(mapped_location.offset(), 0);
+ __ RecordWrite(a3, mapped_location.rm(), t5);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // (In delay slot) return the value stored in v0.
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in a3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
+ __ sw(a0, unmapped_location);
+ ASSERT_EQ(unmapped_location.offset(), 0);
+ __ RecordWrite(a3, unmapped_location.rm(), t5);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // (In delay slot) return the value stored in v0.
+ __ bind(&slow);
+ GenerateMiss(masm, false);
}
-void KeyedStoreIC::ClearInlinedVersion(Address address) {}
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- return false;
+
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ // Load receiver.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a1, a2, a3, t0, t1, &notin, &slow);
+ __ lw(a1, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow, a3);
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in a3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a2, a3, t0, &slow);
+ __ lw(a1, unmapped_location);
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+ __ Branch(&slow, eq, a1, Operand(a3));
+ GenerateFunctionTailCall(masm, argc, &slow, a3);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
}
Object* KeyedLoadIC_Miss(Arguments args);
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+
+ __ Push(a1, a0);
+
+ // Perform tail call to the entry.
+ ExternalReference ref = force_generic
+ ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
+ : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+
+ __ Push(a1, a0);
+
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label slow, check_string, index_smi, index_string, property_array_property;
+ Label probe_dictionary, check_number_dictionary;
+
+ Register key = a0;
+ Register receiver = a1;
+
+ Isolate* isolate = masm->isolate();
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &check_string);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(a2, a3, &check_number_dictionary);
+
+ GenerateFastArrayLoad(
+ masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
+
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
+ __ Ret();
+
+ __ bind(&check_number_dictionary);
+ __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
+
+ // Check whether the elements is a number dictionary.
+ // a0: key
+ // a3: elements map
+ // t0: elements
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&slow, ne, a3, Operand(at));
+ __ sra(a2, a0, kSmiTagSize);
+ GenerateNumberDictionaryLoad(masm, &slow, t0, a0, v0, a2, a3, t1);
+ __ Ret();
+
+ // Slow case, key and receiver still in a0 and a1.
+ __ bind(&slow);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
+ 1,
+ a2,
+ a3);
+ GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_string);
+ GenerateKeyStringCheck(masm, key, a2, a3, &index_string, &slow);
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary.
+ __ lw(a3, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+ __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&probe_dictionary, eq, t0, Operand(at));
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the string hash.
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ sra(a3, a2, KeyedLookupCache::kMapHashShift);
+ __ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset));
+ __ sra(at, t0, String::kHashShift);
+ __ xor_(a3, a3, at);
+ __ And(a3, a3, Operand(KeyedLookupCache::kCapacityMask));
+
+ // Load the key (consisting of map and symbol) from the cache and
+ // check for match.
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
+ __ li(t0, Operand(cache_keys));
+ __ sll(at, a3, kPointerSizeLog2 + 1);
+ __ addu(t0, t0, at);
+ __ lw(t1, MemOperand(t0)); // Move t0 to symbol.
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ Branch(&slow, ne, a2, Operand(t1));
+ __ lw(t1, MemOperand(t0));
+ __ Branch(&slow, ne, a0, Operand(t1));
+
+ // Get field offset.
+ // a0 : key
+ // a1 : receiver
+ // a2 : receiver's map
+ // a3 : lookup cache index
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+ __ li(t0, Operand(cache_field_offsets));
+ __ sll(at, a3, kPointerSizeLog2);
+ __ addu(at, t0, at);
+ __ lw(t1, MemOperand(at));
+ __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
+ __ Subu(t1, t1, t2);
+ __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
+
+ // Load in-object property.
+ __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ addu(t2, t2, t1); // Index from start of object.
+ __ Subu(a1, a1, Operand(kHeapObjectTag)); // Remove the heap tag.
+ __ sll(at, t2, kPointerSizeLog2);
+ __ addu(at, a1, at);
+ __ lw(v0, MemOperand(at));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1,
+ a2,
+ a3);
+ __ Ret();
+
+ // Load property array property.
+ __ bind(&property_array_property);
+ __ lw(a1, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+ __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ sll(t0, t1, kPointerSizeLog2);
+ __ Addu(t0, t0, a1);
+ __ lw(v0, MemOperand(t0));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1,
+ a2,
+ a3);
+ __ Ret();
+
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+ // a1: receiver
+ // a0: key
+ // a3: elements
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(a2, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
+ // Load the property to v0.
+ GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+ 1,
+ a2,
+ a3);
+ __ Ret();
+
+ __ bind(&index_string);
+ __ IndexFromHash(a3, key);
+ // Now jump to the place where smi keys are handled.
+ __ Branch(&index_smi);
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key (index)
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ Register receiver = a1;
+ Register index = a0;
+ Register scratch1 = a2;
+ Register scratch2 = a3;
+ Register result = v0;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&miss);
+ GenerateMiss(masm, false);
}
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(a2, a1, a0);
+ __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
+ __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
+ __ Push(a1, a0);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ Label slow, fast, array, extra, exit;
+
+ // Register usage.
+ Register value = a0;
+ Register key = a1;
+ Register receiver = a2;
+ Register elements = a3; // Elements array of the receiver.
+ // t0 is used as ip in the arm version.
+ // t3-t4 are used as temporaries.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &slow);
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Get the map of the object.
+ __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
+ __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ Branch(&slow, ne, t0, Operand(zero_reg));
+ // Check if the object is a JS array or not.
+ __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
+
+ __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
+ // Check that the object is some kind of JSObject.
+ __ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE));
+ __ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ // Object case: Check key against length in the elements array.
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check that the object is in fast mode and writable.
+ __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&slow, ne, t3, Operand(t0));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Branch(&fast, lo, key, Operand(t0));
+ // Fall thru to slow if un-tagged index >= length.
+
+ // Slow case, handle jump to runtime.
+ __ bind(&slow);
+
+ // Entry registers are intact.
+ // a0: value.
+ // a1: key.
+ // a2: receiver.
+
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+
+ __ bind(&extra);
+ // Only support writing to array[array.length].
+ __ Branch(&slow, ne, key, Operand(t0));
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Branch(&slow, hs, key, Operand(t0));
+ // Calculate key + 1 as smi.
+ ASSERT_EQ(0, kSmiTag);
+ __ Addu(t3, key, Operand(Smi::FromInt(1)));
+ __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Branch(&fast);
+
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+
+ __ bind(&array);
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&slow, ne, t3, Operand(t0));
+
+ // Check the key against the length in the array.
+ __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Branch(&extra, hs, key, Operand(t0));
+ // Fall through to fast case.
+
+ __ bind(&fast);
+ // Fast case, store the value to the elements backing store.
+ __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t4, t4, Operand(t1));
+ __ sw(value, MemOperand(t4));
+ // Skip write barrier if the written value is a smi.
+ __ JumpIfSmi(value, &exit);
+
+ // Update write barrier for the elements array address.
+ __ Subu(t3, t4, Operand(elements));
+
+ __ RecordWrite(elements, Operand(t3), t4, t5);
+ __ bind(&exit);
+
+ __ mov(v0, a0); // Return the value written.
+ __ Ret();
}
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label slow;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(a1, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask));
+ __ Branch(&slow, ne, t0, Operand(zero_reg));
+
+ // Get the map of the receiver.
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ And(a3, a3, Operand(kSlowCaseBitFieldMask));
+ __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor));
+ // Everything is fine, call runtime.
+ __ Push(a1, a0); // Receiver, key.
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(ExternalReference(
+ IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
+
+ __ bind(&slow);
+ GenerateMiss(masm, false);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(a2, a1, a0);
+
+ ExternalReference ref = force_generic
+ ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+ masm->isolate())
+ : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ // We can't use MultiPush as the order of the registers is important.
+ __ Push(a2, a1, a0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+
+ __ TailCallExternalReference(ref, 3, 1);
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Get the receiver from the stack and probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC,
+ strict_mode);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, a1, a2, a3, t0, t1);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
}
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ __ Push(a1, a2, a0);
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
+ masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ //
+ // This accepts as a receiver anything JSObject::SetElementsLength accepts
+ // (currently anything except for external and pixel arrays which means
+ // anything with elements of FixedArray type.), but currently is restricted
+ // to JSArray.
+ // Value must be a number, but only smis are accepted as the most common case.
+
+ Label miss;
+
+ Register receiver = a1;
+ Register value = a0;
+ Register scratch = a3;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ GetObjectType(receiver, scratch, scratch);
+ __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+ __ GetObjectType(scratch, scratch, scratch);
+ __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver, value);
+
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength),
+ masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&miss);
+
+ GenerateMiss(masm);
}
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ GenerateStringDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
+
+ GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
+ __ Ret();
+
+ __ bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ __ Push(a1, a2, a0);
+
+ __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
+ __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(a1, a0);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
#undef __
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ // Reverse left and right operands to obtain ECMA-262 conversion order.
+ return lt;
+ case Token::LTE:
+ // Reverse left and right operands to obtain ECMA-262 conversion order.
+ return ge;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ HandleScope scope;
+ Handle<Code> rewritten;
+ State previous_state = GetState();
+ State state = TargetState(previous_state, false, x, y);
+ if (state == GENERIC) {
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+ rewritten = stub.GetCode();
+ } else {
+ ICCompareStub stub(op_, state);
+ rewritten = stub.GetCode();
+ }
+ set_target(*rewritten);
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC (%s->%s)#%s]\n",
+ GetStateName(previous_state),
+ GetStateName(state),
+ Token::Name(op_));
+ }
+#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address());
+ }
+}
+
+
+void PatchInlinedSmiCode(Address address) {
+ Address andi_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a andi at, rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(andi_instruction_address);
+ if (!Assembler::IsAndImmediate(instr)) {
+ return;
+ }
+
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ int delta = Assembler::GetImmediate16(instr);
+ delta += Assembler::GetRs(instr) * kImm16Mask;
+ // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
+ // signals that nothing was inlined.
+ if (delta == 0) {
+ return;
+ }
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
+ address, andi_instruction_address, delta);
+ }
+#endif
+
+ Address patch_address =
+ andi_instruction_address - delta * Instruction::kInstrSize;
+ Instr instr_at_patch = Assembler::instr_at(patch_address);
+ Instr branch_instr =
+ Assembler::instr_at(patch_address + Instruction::kInstrSize);
+ ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+ ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+ ASSERT(Assembler::IsBranch(branch_instr));
+ if (Assembler::IsBeq(branch_instr)) {
+ // This is patching a "jump if not smi" site to be active.
+ // Changing:
+ // andi at, rx, 0
+ // Branch <target>, eq, at, Operand(zero_reg)
+ // to:
+ // andi at, rx, #kSmiTagMask
+ // Branch <target>, ne, at, Operand(zero_reg)
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+ patcher.masm()->andi(at, reg, kSmiTagMask);
+ patcher.ChangeBranchCondition(ne);
+ } else {
+ ASSERT(Assembler::IsBne(branch_instr));
+ // This is patching a "jump if smi" site to be active.
+ // Changing:
+ // andi at, rx, 0
+ // Branch <target>, ne, at, Operand(zero_reg)
+ // to:
+ // andi at, rx, #kSmiTagMask
+ // Branch <target>, eq, at, Operand(zero_reg)
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+ patcher.masm()->andi(at, reg, kSmiTagMask);
+ patcher.ChangeBranchCondition(eq);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/jump-target-mips.cc b/deps/v8/src/mips/jump-target-mips.cc
deleted file mode 100644
index 408f75e79..000000000
--- a/deps/v8/src/mips/jump-target-mips.cc
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
- ASSERT(cgen()->has_valid_frame());
- // Live non-frame registers are not allowed at unconditional jumps
- // because we have no way of invalidating the corresponding results
- // which are still live in the C++ code.
- ASSERT(cgen()->HasValidEntryRegisters());
-
- if (is_bound()) {
- // Backward jump. There already a frame expectation at the target.
- ASSERT(direction_ == BIDIRECTIONAL);
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- } else {
- // Use the current frame as the expected one at the target if necessary.
- if (entry_frame_ == NULL) {
- entry_frame_ = cgen()->frame();
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- } else {
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- }
-
- // The predicate is_linked() should be made true. Its implementation
- // detects the presence of a frame pointer in the reaching_frames_ list.
- if (!is_linked()) {
- reaching_frames_.Add(NULL);
- ASSERT(is_linked());
- }
- }
- __ b(&entry_label_);
- __ nop(); // Branch delay slot nop.
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint ignored) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::Call() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::DoBind() {
- ASSERT(!is_bound());
-
- // Live non-frame registers are not allowed at the start of a basic
- // block.
- ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
- if (cgen()->has_valid_frame()) {
- // If there is a current frame we can use it on the fall through.
- if (entry_frame_ == NULL) {
- entry_frame_ = new VirtualFrame(cgen()->frame());
- } else {
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- }
- } else {
- // If there is no current frame we must have an entry frame which we can
- // copy.
- ASSERT(entry_frame_ != NULL);
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- }
-
- // The predicate is_linked() should be made false. Its implementation
- // detects the presence (or absence) of frame pointers in the
- // reaching_frames_ list. If we inserted a bogus frame to make
- // is_linked() true, remove it now.
- if (is_linked()) {
- reaching_frames_.Clear();
- }
-
- __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
- // On ARM we do not currently emit merge code for jumps, so we need to do
- // it explicitly here. The only merging necessary is to drop extra
- // statement state from the stack.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->Drop(count);
- DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even
- // on the fall through. This is so we can bind the return target
- // with state on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- // On ARM we do not currently emit merge code at binding sites, so we need
- // to do it explicitly here. The only merging necessary is to drop extra
- // statement state from the stack.
- cgen()->frame()->Drop(count);
- }
-
- DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/arm/register-allocator-arm.h b/deps/v8/src/mips/lithium-codegen-mips.h
index fdbc88f5d..2aec68456 100644
--- a/deps/v8/src/arm/register-allocator-arm.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,20 +25,41 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_H_
-#define V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+
+#include "mips/lithium-mips.h"
+
+#include "deoptimizer.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
namespace v8 {
namespace internal {
-class RegisterAllocatorConstants : public AllStatic {
+// Forward declarations.
+class LDeferredCode;
+
+class LCodeGen BASE_EMBEDDED {
public:
- // No registers are currently managed by the register allocator on ARM.
- static const int kNumRegisters = 0;
- static const int kInvalidRegister = -1;
-};
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
+
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode() {
+ UNIMPLEMENTED();
+ return false;
+ }
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+};
} } // namespace v8::internal
-#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
new file mode 100644
index 000000000..ebc1e43bf
--- /dev/null
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -0,0 +1,307 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_LITHIUM_MIPS_H_
+#define V8_MIPS_LITHIUM_MIPS_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "lithium.h"
+#include "safepoint-table.h"
+
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+class LEnvironment;
+class Translation;
+
+class LInstruction: public ZoneObject {
+ public:
+ LInstruction() { }
+ virtual ~LInstruction() { }
+
+ // Predicates should be generated by macro as in lithium-ia32.h.
+ virtual bool IsLabel() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+ virtual bool IsOsrEntry() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ LPointerMap* pointer_map() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ bool HasPointerMap() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
+
+ LEnvironment* environment() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ bool HasEnvironment() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
+
+ virtual bool IsControl() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ void MarkAsCall() { UNIMPLEMENTED(); }
+ void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ bool IsMarkedAsSaveDoubles() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ virtual bool HasResult() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ virtual LOperand* result() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ virtual int InputCount() {
+ UNIMPLEMENTED();
+ return 0;
+ }
+
+ virtual LOperand* InputAt(int i) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ virtual int TempCount() {
+ UNIMPLEMENTED();
+ return 0;
+ }
+
+ virtual LOperand* TempAt(int i) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LOperand* FirstInput() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LOperand* Output() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+#ifdef DEBUG
+ void VerifyCall() { UNIMPLEMENTED(); }
+#endif
+};
+
+
+class LGap: public LInstruction {
+ public:
+ explicit LGap(HBasicBlock* block) { }
+
+ HBasicBlock* block() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+};
+
+
+class LLabel: public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block) : LGap(block) { }
+};
+
+
+class LOsrEntry: public LInstruction {
+ public:
+ // Function could be generated by a macro as in lithium-ia32.h.
+ static LOsrEntry* cast(LInstruction* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LOperand** SpilledRegisterArray() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+ LOperand** SpilledDoubleRegisterArray() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
+ UNIMPLEMENTED();
+ }
+ void MarkSpilledDoubleRegister(int allocation_index,
+ LOperand* spill_operand) {
+ UNIMPLEMENTED();
+ }
+};
+
+
+class LChunk: public ZoneObject {
+ public:
+ explicit LChunk(HGraph* graph) { }
+
+ HGraph* graph() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ const ZoneList<LPointerMap*>* pointer_maps() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LOperand* GetNextSpillSlot(bool double_slot) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LConstantOperand* DefineConstantOperand(HConstant* constant) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LLabel* GetLabel(int block_id) const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ const ZoneList<LInstruction*>* instructions() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ int GetParameterStackSlot(int index) const {
+ UNIMPLEMENTED();
+ return 0;
+ }
+
+ void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
+
+ LGap* GetGapAt(int index) const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ bool IsGapAt(int index) const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ int NearestGapPos(int index) const {
+ UNIMPLEMENTED();
+ return 0;
+ }
+
+ void MarkEmptyBlocks() { UNIMPLEMENTED(); }
+
+ CompilationInfo* info() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+#ifdef DEBUG
+ void Verify() { UNIMPLEMENTED(); }
+#endif
+};
+
+
+class LChunkBuilder BASE_EMBEDDED {
+ public:
+ LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
+
+ // Build the sequence for the graph.
+ LChunk* Build() {
+ UNIMPLEMENTED();
+ return NULL;
+ };
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
+ UNIMPLEMENTED(); \
+ return NULL; \
+ }
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_LITHIUM_MIPS_H_
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index e096028e3..990b4995f 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,84 +25,109 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+#include <limits.h> // For LONG_MIN, LONG_MAX.
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
#include "runtime.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
- unresolved_(0),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ allow_stub_calls_(true) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
}
+// Arguments macros.
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_ARGS cond, r1, r2
-void MacroAssembler::Jump(Register target, Condition cond,
- Register r1, const Operand& r2) {
- Jump(Operand(target), cond, r1, r2);
+#define REGISTER_TARGET_BODY(Name) \
+void MacroAssembler::Name(Register target, \
+ BranchDelaySlot bd) { \
+ Name(Operand(target), bd); \
+} \
+void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
+ BranchDelaySlot bd) { \
+ Name(Operand(target), COND_ARGS, bd); \
}
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- Jump(Operand(target, rmode), cond, r1, r2);
+#define INT_PTR_TARGET_BODY(Name) \
+void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
+ BranchDelaySlot bd) { \
+ Name(Operand(target, rmode), bd); \
+} \
+void MacroAssembler::Name(intptr_t target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd) { \
+ Name(Operand(target, rmode), COND_ARGS, bd); \
}
-void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+#define BYTE_PTR_TARGET_BODY(Name) \
+void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
+ BranchDelaySlot bd) { \
+ Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
+} \
+void MacroAssembler::Name(byte* target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd) { \
+ Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
}
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+#define CODE_TARGET_BODY(Name) \
+void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
+ BranchDelaySlot bd) { \
+ Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
+} \
+void MacroAssembler::Name(Handle<Code> target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd) { \
+ Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
}
-void MacroAssembler::Call(Register target,
- Condition cond, Register r1, const Operand& r2) {
- Call(Operand(target), cond, r1, r2);
-}
+REGISTER_TARGET_BODY(Jump)
+REGISTER_TARGET_BODY(Call)
+INT_PTR_TARGET_BODY(Jump)
+INT_PTR_TARGET_BODY(Call)
+BYTE_PTR_TARGET_BODY(Jump)
+BYTE_PTR_TARGET_BODY(Call)
+CODE_TARGET_BODY(Jump)
+CODE_TARGET_BODY(Call)
+#undef COND_TYPED_ARGS
+#undef COND_ARGS
+#undef REGISTER_TARGET_BODY
+#undef BYTE_PTR_TARGET_BODY
+#undef CODE_TARGET_BODY
-void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- Call(Operand(target, rmode), cond, r1, r2);
-}
-
-void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+void MacroAssembler::Ret(BranchDelaySlot bd) {
+ Jump(Operand(ra), bd);
}
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
-}
-
-
-void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
- Jump(Operand(ra), cond, r1, r2);
+void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
+ BranchDelaySlot bd) {
+ Jump(Operand(ra), cond, r1, r2, bd);
}
@@ -111,51 +136,324 @@ void MacroAssembler::LoadRoot(Register destination,
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
+
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond,
Register src1, const Operand& src2) {
- Branch(NegateCondition(cond), 2, src1, src2);
+ Branch(2, NegateCondition(cond), src1, src2);
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
-void MacroAssembler::RecordWrite(Register object, Register offset,
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index) {
+ sw(source, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index,
+ Condition cond,
+ Register src1, const Operand& src2) {
+ Branch(2, NegateCondition(cond), src1, src2);
+ sw(source, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::RecordWriteHelper(Register object,
+ Register address,
+ Register scratch) {
+ if (emit_debug_code()) {
+ // Check that the object is not in new space.
+ Label not_in_new_space;
+ InNewSpace(object, scratch, ne, &not_in_new_space);
+ Abort("new-space object passed to RecordWriteHelper");
+ bind(&not_in_new_space);
+ }
+
+ // Calculate page address: Clear bits from 0 to kPageSizeBits.
+ if (mips32r2) {
+ Ins(object, zero_reg, 0, kPageSizeBits);
+ } else {
+ // The Ins macro is slow on r1, so use shifts instead.
+ srl(object, object, kPageSizeBits);
+ sll(object, object, kPageSizeBits);
+ }
+
+ // Calculate region number.
+ Ext(address, address, Page::kRegionSizeLog2,
+ kPageSizeBits - Page::kRegionSizeLog2);
+
+ // Mark region dirty.
+ lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ li(at, Operand(1));
+ sllv(at, at, address);
+ or_(scratch, scratch, at);
+ sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+}
+
+
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+ // Safepoints expect a block of kNumSafepointRegisters values on the
+ // stack, so adjust the stack for unsaved registers.
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ ASSERT(num_unsaved >= 0);
+ Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+ MultiPush(kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ MultiPop(kSafepointSavedRegisters);
+ Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+}
+
+
+void MacroAssembler::PushSafepointRegistersAndDoubles() {
+ PushSafepointRegisters();
+ Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+ for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+ FPURegister reg = FPURegister::FromAllocationIndex(i);
+ sdc1(reg, MemOperand(sp, i * kDoubleSize));
+ }
+}
+
+
+void MacroAssembler::PopSafepointRegistersAndDoubles() {
+ for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+ FPURegister reg = FPURegister::FromAllocationIndex(i);
+ ldc1(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+ PopSafepointRegisters();
+}
+
+
+void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
+ Register dst) {
+ sw(src, SafepointRegistersAndDoublesSlot(dst));
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+ sw(src, SafepointRegisterSlot(dst));
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ lw(dst, SafepointRegisterSlot(src));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // The registers are pushed starting with the highest encoding,
+ // which means that lowest encodings are closest to the stack pointer.
+ return kSafepointRegisterStackIndexMap[reg_code];
+}
+
+
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+ return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+ // General purpose registers are pushed last on the stack.
+ int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
+ int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+ return MemOperand(sp, doubles_size + register_offset);
+}
+
+
+
+
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch) {
+ ASSERT(cc == eq || cc == ne);
+ And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
+ Branch(branch, cc, scratch,
+ Operand(ExternalReference::new_space_start(isolate())));
+}
+
+
+// Will clobber 4 registers: object, scratch0, scratch1, at. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
+
+ Label done;
+
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch0, eq, &done);
+
+ // Add offset into the object.
+ Addu(scratch0, object, offset);
+
+ // Record the actual write.
+ RecordWriteHelper(object, scratch0, scratch1);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ li(object, Operand(BitCast<int32_t>(kZapValue)));
+ li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+ li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+// Will clobber 4 registers: object, address, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
Register scratch) {
- UNIMPLEMENTED_MIPS();
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+
+ Label done;
+
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch, eq, &done);
+
+ // Record the actual write.
+ RecordWriteHelper(object, address, scratch);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ li(object, Operand(BitCast<int32_t>(kZapValue)));
+ li(address, Operand(BitCast<int32_t>(kZapValue)));
+ li(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Allocation support.
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss) {
+ Label same_contexts;
+
+ ASSERT(!holder_reg.is(scratch));
+ ASSERT(!holder_reg.is(at));
+ ASSERT(!scratch.is(at));
+
+ // Load current lexical context from the stack frame.
+ lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+ Check(ne, "we should not have an empty lexical context",
+ scratch, Operand(zero_reg));
+#endif
+
+ // Load the global context of the current context.
+ int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ lw(scratch, FieldMemOperand(scratch, offset));
+ lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+ // Check the context is a global context.
+ if (emit_debug_code()) {
+ // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
+ push(holder_reg); // Temporarily save holder on the stack.
+ // Read the first word and compare to the global_context_map.
+ lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ LoadRoot(at, Heap::kGlobalContextMapRootIndex);
+ Check(eq, "JSGlobalObject::global_context should be a global context.",
+ holder_reg, Operand(at));
+ pop(holder_reg); // Restore holder.
+ }
+
+ // Check if both contexts are the same.
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ Branch(&same_contexts, eq, scratch, Operand(at));
+
+ // Check the context is a global context.
+ if (emit_debug_code()) {
+ // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
+ push(holder_reg); // Temporarily save holder on the stack.
+ mov(holder_reg, at); // Move at to its holding place.
+ LoadRoot(at, Heap::kNullValueRootIndex);
+ Check(ne, "JSGlobalProxy::context() should not be null.",
+ holder_reg, Operand(at));
+
+ lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
+ LoadRoot(at, Heap::kGlobalContextMapRootIndex);
+ Check(eq, "JSGlobalObject::global_context should be a global context.",
+ holder_reg, Operand(at));
+ // Restore at is not needed. at is reloaded below.
+ pop(holder_reg); // Restore holder.
+ // Restore at to holder's context.
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ }
+
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ int token_offset = Context::kHeaderSize +
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+ lw(scratch, FieldMemOperand(scratch, token_offset));
+ lw(at, FieldMemOperand(at, token_offset));
+ Branch(miss, ne, scratch, Operand(at));
+
+ bind(&same_contexts);
}
// ---------------------------------------------------------------------------
-// Instruction macros
+// Instruction macros.
-void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- add(rd, rs, rt.rm());
+ addu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
- addi(rd, rs, rt.imm32_);
+ if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+ addiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
- add(rd, rs, at);
+ addu(rd, rs, at);
}
}
}
-void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- addu(rd, rs, rt.rm());
+ subu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
- addiu(rd, rs, rt.imm32_);
+ if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+ addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
- addu(rd, rs, at);
+ subu(rd, rs, at);
}
}
}
@@ -225,7 +523,7 @@ void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
andi(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -241,7 +539,7 @@ void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
ori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -257,7 +555,7 @@ void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
xori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -281,11 +579,20 @@ void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
}
+void MacroAssembler::Neg(Register rs, const Operand& rt) {
+ ASSERT(rt.is_reg());
+ ASSERT(!at.is(rs));
+ ASSERT(!at.is(rt.rm()));
+ li(at, -1);
+ xor_(rs, rt.rm(), at);
+}
+
+
void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
slti(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -301,7 +608,7 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
sltiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -313,60 +620,61 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
}
-//------------Pseudo-instructions-------------
-
-void MacroAssembler::movn(Register rd, Register rt) {
- addiu(at, zero_reg, -1); // Fill at with ones.
- xor_(rd, rt, at);
+void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+ if (mips32r2) {
+ if (rt.is_reg()) {
+ rotrv(rd, rs, rt.rm());
+ } else {
+ rotr(rd, rs, rt.imm32_);
+ }
+ } else {
+ if (rt.is_reg()) {
+ subu(at, zero_reg, rt.rm());
+ sllv(at, rs, at);
+ srlv(rd, rs, rt.rm());
+ or_(rd, rd, at);
+ } else {
+ if (rt.imm32_ == 0) {
+ srl(rd, rs, 0);
+ } else {
+ srl(at, rs, rt.imm32_);
+ sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
+ or_(rd, rd, at);
+ }
+ }
+ }
}
+//------------Pseudo-instructions-------------
+
void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
ASSERT(!j.is_reg());
-
- if (!MustUseAt(j.rmode_) && !gen2instr) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (!MustUseReg(j.rmode_) && !gen2instr) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int16(j.imm32_)) {
addiu(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & HIMask)) {
+ } else if (!(j.imm32_ & kHiMask)) {
ori(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & LOMask)) {
- lui(rd, (HIMask & j.imm32_) >> 16);
+ } else if (!(j.imm32_ & kImm16Mask)) {
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
} else {
- lui(rd, (HIMask & j.imm32_) >> 16);
- ori(rd, rd, (LOMask & j.imm32_));
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+ ori(rd, rd, (j.imm32_ & kImm16Mask));
}
- } else if (MustUseAt(j.rmode_) || gen2instr) {
- if (MustUseAt(j.rmode_)) {
+ } else if (MustUseReg(j.rmode_) || gen2instr) {
+ if (MustUseReg(j.rmode_)) {
RecordRelocInfo(j.rmode_, j.imm32_);
}
// We need always the same number of instructions as we may need to patch
// this code to load another value which may need 2 instructions to load.
- if (is_int16(j.imm32_)) {
- nop();
- addiu(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & HIMask)) {
- nop();
- ori(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & LOMask)) {
- nop();
- lui(rd, (HIMask & j.imm32_) >> 16);
- } else {
- lui(rd, (HIMask & j.imm32_) >> 16);
- ori(rd, rd, (LOMask & j.imm32_));
- }
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+ ori(rd, rd, (j.imm32_ & kImm16Mask));
}
}
-// Exception-generating instructions and debugging support
-void MacroAssembler::stop(const char* msg) {
- // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
- // We use the 0x54321 value to be able to find it easily when reading memory.
- break_(0x54321);
-}
-
-
void MacroAssembler::MultiPush(RegList regs) {
int16_t NumSaved = 0;
int16_t NumToPush = NumberOfBitsSet(regs);
@@ -417,153 +725,1000 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
}
+void MacroAssembler::Ext(Register rt,
+ Register rs,
+ uint16_t pos,
+ uint16_t size) {
+ ASSERT(pos < 32);
+ ASSERT(pos + size < 32);
+
+ if (mips32r2) {
+ ext_(rt, rs, pos, size);
+ } else {
+ // Move rs to rt and shift it left then right to get the
+ // desired bitfield on the right side and zeroes on the left.
+ sll(rt, rs, 32 - (pos + size));
+ srl(rt, rt, 32 - size);
+ }
+}
+
+
+void MacroAssembler::Ins(Register rt,
+ Register rs,
+ uint16_t pos,
+ uint16_t size) {
+ ASSERT(pos < 32);
+ ASSERT(pos + size < 32);
+
+ if (mips32r2) {
+ ins_(rt, rs, pos, size);
+ } else {
+ ASSERT(!rt.is(t8) && !rs.is(t8));
+
+ srl(t8, rt, pos + size);
+ // The left chunk from rt that needs to
+ // be saved is on the right side of t8.
+ sll(at, t8, pos + size);
+ // The 'at' register now contains the left chunk on
+ // the left (proper position) and zeroes.
+ sll(t8, rt, 32 - pos);
+ // t8 now contains the right chunk on the left and zeroes.
+ srl(t8, t8, 32 - pos);
+ // t8 now contains the right chunk on
+ // the right (proper position) and zeroes.
+ or_(rt, at, t8);
+ // rt now contains the left and right chunks from the original rt
+ // in their proper position and zeroes in the middle.
+ sll(t8, rs, 32 - size);
+ // t8 now contains the chunk from rs on the left and zeroes.
+ srl(t8, t8, 32 - size - pos);
+ // t8 now contains the original chunk from rs in
+ // the middle (proper position).
+ or_(rt, rt, t8);
+ // rt now contains the result of the ins instruction in R2 mode.
+ }
+}
+
+
+void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
+ // Move the data from fs to t4.
+ mfc1(t4, fs);
+ return Cvt_d_uw(fd, t4);
+}
+
+
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd (and fd + 1).
+ // We do this by converting rs minus the MSB to avoid sign conversion,
+ // then adding 2^31-1 and 1 to the result.
+
+ ASSERT(!fd.is(f20));
+ ASSERT(!rs.is(t9));
+ ASSERT(!rs.is(t8));
+
+ // Save rs's MSB to t8.
+ And(t8, rs, 0x80000000);
+ // Remove rs's MSB.
+ And(t9, rs, 0x7FFFFFFF);
+ // Move t9 to fd.
+ mtc1(t9, fd);
+
+ // Convert fd to a real FP value.
+ cvt_d_w(fd, fd);
+
+ Label conversion_done;
+
+ // If rs's MSB was 0, it's done.
+ // Otherwise we need to add that to the FP register.
+ Branch(&conversion_done, eq, t8, Operand(zero_reg));
+
+ // First load 2^31 - 1 into f20.
+ Or(t9, zero_reg, 0x7FFFFFFF);
+ mtc1(t9, f20);
+
+ // Convert it to FP and add it to fd.
+ cvt_d_w(f20, f20);
+ add_d(fd, fd, f20);
+ // Now add 1.
+ Or(t9, zero_reg, 1);
+ mtc1(t9, f20);
+
+ cvt_d_w(f20, f20);
+ add_d(fd, fd, f20);
+ bind(&conversion_done);
+}
+
+
+void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
+ Trunc_uw_d(fs, t4);
+ mtc1(t4, fd);
+}
+
+
+void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
+ ASSERT(!fd.is(f22));
+ ASSERT(!rs.is(t8));
+
+ // Load 2^31 into f22.
+ Or(t8, zero_reg, 0x80000000);
+ Cvt_d_uw(f22, t8);
+
+ // Test if f22 > fd.
+ c(OLT, D, fd, f22);
+
+ Label simple_convert;
+ // If fd < 2^31 we can convert it normally.
+ bc1t(&simple_convert);
+
+ // First we subtract 2^31 from fd, then trunc it to rs
+ // and add 2^31 to rs.
+
+ sub_d(f22, fd, f22);
+ trunc_w_d(f22, f22);
+ mfc1(rs, f22);
+ or_(rs, rs, t8);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_w_d(f22, fd);
+ mfc1(rs, f22);
+
+ bind(&done);
+}
+
+
+// Tries to get a signed int32 out of a double precision floating point heap
+// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
+// 32bits signed integer range.
+// This method implementation differs from the ARM version for performance
+// reasons.
+void MacroAssembler::ConvertToInt32(Register source,
+ Register dest,
+ Register scratch,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label *not_int32) {
+ Label right_exponent, done;
+ // Get exponent word (ENDIAN issues).
+ lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
+ // Load dest with zero. We use this either for the final shift or
+ // for the answer.
+ mov(dest, zero_reg);
+ // Check whether the exponent matches a 32 bit signed int that is not a Smi.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
+ // the exponent that we are fastest at and also the highest exponent we can
+ // handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ // If we have a match of the int32-but-not-Smi exponent then skip some logic.
+ Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
+ // If the exponent is higher than that then go to not_int32 case. This
+ // catches numbers that don't fit in a signed int32, infinities and NaNs.
+ Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
+
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ Subu(scratch2, scratch2, Operand(zero_exponent));
+ // Dest already has a Smi zero.
+ Branch(&done, lt, scratch2, Operand(zero_reg));
+ if (!CpuFeatures::IsSupported(FPU)) {
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ srl(dest, scratch2, HeapNumber::kExponentShift);
+ // We now have the exponent in dest. Subtract from 30 to get
+ // how much to shift down.
+ li(at, Operand(30));
+ subu(dest, at, dest);
+ }
+ bind(&right_exponent);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // MIPS FPU instructions implementing double precision to integer
+ // conversion using round to zero. Since the FP value was qualified
+ // above, the resulting integer should be a legal int32.
+ // The original 'Exponent' word is still in scratch.
+ lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
+ trunc_w_d(double_scratch, double_scratch);
+ mfc1(dest, double_scratch);
+ } else {
+ // On entry, dest has final downshift, scratch has original sign/exp/mant.
+ // Save sign bit in top bit of dest.
+ And(scratch2, scratch, Operand(0x80000000));
+ Or(dest, dest, Operand(scratch2));
+ // Put back the implicit 1, just above mantissa field.
+ Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
+
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to leave the sign bit 0 so we subtract 2 bits from the shift
+ // distance. But we want to clear the sign-bit so shift one more bit
+ // left, then shift right one bit.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ sll(scratch, scratch, shift_distance + 1);
+ srl(scratch, scratch, 1);
+
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
+ // The width of the field here is the same as the shift amount above.
+ const int field_width = shift_distance;
+ Ext(scratch2, scratch2, 32-shift_distance, field_width);
+ Ins(scratch, scratch2, 0, field_width);
+ // Move down according to the exponent.
+ srlv(scratch, scratch, dest);
+ // Prepare the negative version of our integer.
+ subu(scratch2, zero_reg, scratch);
+ // Trick to check sign bit (msb) held in dest, count leading zero.
+ // 0 indicates negative, save negative version with conditional move.
+ clz(dest, dest);
+ movz(scratch, scratch2, dest);
+ mov(dest, scratch);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
+ Register input_high,
+ Register input_low,
+ Register scratch) {
+ Label done, normal_exponent, restore_sign;
+ // Extract the biased exponent in result.
+ Ext(result,
+ input_high,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Check for Infinity and NaNs, which should return 0.
+ Subu(scratch, result, HeapNumber::kExponentMask);
+ movz(result, zero_reg, scratch);
+ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ Subu(result,
+ result,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ Branch(&normal_exponent, le, result, Operand(zero_reg));
+ mov(result, zero_reg);
+ Branch(&done);
+
+ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+ // Save the sign.
+ Register sign = result;
+ result = no_reg;
+ And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
+ // to check for this specific case.
+ Label high_shift_needed, high_shift_done;
+ Branch(&high_shift_needed, lt, scratch, Operand(32));
+ mov(input_high, zero_reg);
+ Branch(&high_shift_done);
+ bind(&high_shift_needed);
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ Or(input_high,
+ input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ sllv(input_high, input_high, scratch);
+
+ bind(&high_shift_done);
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ li(at, 32);
+ subu(scratch, at, scratch);
+ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+ // Negate scratch.
+ Subu(scratch, zero_reg, scratch);
+ sllv(input_low, input_low, scratch);
+ Branch(&shift_done);
+
+ bind(&pos_shift);
+ srlv(input_low, input_low, scratch);
+
+ bind(&shift_done);
+ Or(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ mov(scratch, sign);
+ result = sign;
+ sign = no_reg;
+ Subu(result, zero_reg, input_high);
+ movz(result, input_high, scratch);
+ bind(&done);
+}
+
+
+void MacroAssembler::EmitECMATruncate(Register result,
+ FPURegister double_input,
+ FPURegister single_scratch,
+ Register scratch,
+ Register input_high,
+ Register input_low) {
+ CpuFeatures::Scope scope(FPU);
+ ASSERT(!input_high.is(result));
+ ASSERT(!input_low.is(result));
+ ASSERT(!input_low.is(input_high));
+ ASSERT(!scratch.is(result) &&
+ !scratch.is(input_high) &&
+ !scratch.is(input_low));
+ ASSERT(!single_scratch.is(double_input));
+
+ Label done;
+ Label manual;
+
+ // Clear cumulative exception flags and save the FCSR.
+ Register scratch2 = input_high;
+ cfc1(scratch2, FCSR);
+ ctc1(zero_reg, FCSR);
+ // Try a conversion to a signed integer.
+ trunc_w_d(single_scratch, double_input);
+ mfc1(result, single_scratch);
+ // Retrieve and restore the FCSR.
+ cfc1(scratch, FCSR);
+ ctc1(scratch2, FCSR);
+ // Check for overflow and NaNs.
+ And(scratch,
+ scratch,
+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
+ // If we had no exceptions we are done.
+ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Load the double value and perform a manual truncation.
+ Move(input_low, input_high, double_input);
+ EmitOutOfInt32RangeTruncate(result,
+ input_high,
+ input_low,
+ scratch);
+ bind(&done);
+}
+
+
+void MacroAssembler::GetLeastBitsFromSmi(Register dst,
+ Register src,
+ int num_least_bits) {
+ Ext(dst, src, kSmiTagSize, num_least_bits);
+}
+
+
+void MacroAssembler::GetLeastBitsFromInt32(Register dst,
+ Register src,
+ int num_least_bits) {
+ And(dst, src, Operand((1 << num_least_bits) - 1));
+}
+
+
// Emulated condtional branches do not emit a nop in the branch delay slot.
+//
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
+ (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
+ (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-// Trashes the at register if no scratch register is provided.
-void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
- const Operand& rt, Register scratch) {
+
+bool MacroAssembler::UseAbsoluteCodePointers() {
+ if (is_trampoline_emitted()) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+ BranchShort(offset, bdslot);
+}
+
+
+void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BranchShort(offset, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Jr(L, bdslot);
+ } else {
+ BranchShort(L, bdslot);
+ }
+}
+
+
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchShort(L, cond, rs, rt, bdslot);
+ }
+}
+
+
+void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
+ b(offset);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ ASSERT(!rs.is(zero_reg));
Register r2 = no_reg;
+ Register scratch = at;
+
if (rt.is_reg()) {
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
r2 = rt.rm_;
- } else if (cond != cc_always) {
- // We don't want any other register but scratch clobbered.
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
+ switch (cond) {
+ case cc_always:
+ b(offset);
+ break;
+ case eq:
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ if (r2.is(zero_reg)) {
+ bgtz(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (r2.is(zero_reg)) {
+ bgez(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (r2.is(zero_reg)) {
+ bltz(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (r2.is(zero_reg)) {
+ blez(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (r2.is(zero_reg)) {
+ bgtz(rs, offset);
+ } else {
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (r2.is(zero_reg)) {
+ bgez(rs, offset);
+ } else {
+ sltu(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (r2.is(zero_reg)) {
+ // No code needs to be emitted.
+ return;
+ } else {
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (r2.is(zero_reg)) {
+ b(offset);
+ } else {
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ switch (cond) {
+ case cc_always:
+ b(offset);
+ break;
+ case eq:
+ // We don't want any other register but scratch clobbered.
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ // We don't want any other register but scratch clobbered.
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ if (rt.imm32_ == 0) {
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (rt.imm32_ == 0) {
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ slti(scratch, rs, rt.imm32_);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (rt.imm32_ == 0) {
+ bltz(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ slti(scratch, rs, rt.imm32_);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (rt.imm32_ == 0) {
+ blez(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (rt.imm32_ == 0) {
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (rt.imm32_ == 0) {
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ sltiu(scratch, rs, rt.imm32_);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (rt.imm32_ == 0) {
+ // No code needs to be emitted.
+ return;
+ } else if (is_int16(rt.imm32_)) {
+ sltiu(scratch, rs, rt.imm32_);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (rt.imm32_ == 0) {
+ b(offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- beq(rs, r2, offset);
- break;
- case ne:
- bne(rs, r2, offset);
- break;
- // Signed comparison
- case greater:
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- break;
+void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
+ // We use branch_offset as an argument for the branch instructions to be sure
+ // it is called just before generating the branch instruction, as needed.
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- break;
+ b(shifted_branch_offset(L, false));
- default:
- UNREACHABLE();
- }
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
-void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
- const Operand& rt, Register scratch) {
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ int32_t offset;
Register r2 = no_reg;
+ Register scratch = at;
if (rt.is_reg()) {
r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ break;
+ case eq:
+ offset = shifted_branch_offset(L, false);
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ offset = shifted_branch_offset(L, false);
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bltz(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ blez(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else {
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (r2.is(zero_reg)) {
+ // No code needs to be emitted.
+ return;
+ } else {
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ } else {
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ break;
+ case eq:
+ r2 = scratch;
+ li(r2, rt);
+ offset = shifted_branch_offset(L, false);
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ r2 = scratch;
+ li(r2, rt);
+ offset = shifted_branch_offset(L, false);
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ slti(scratch, rs, rt.imm32_);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bltz(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ slti(scratch, rs, rt.imm32_);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ blez(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ sltiu(scratch, rs, rt.imm32_);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (rt.imm32_ == 0) {
+ // No code needs to be emitted.
+ return;
+ } else if (is_int16(rt.imm32_)) {
+ sltiu(scratch, rs, rt.imm32_);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ // Check that offset could actually hold on an int16_t.
+ ASSERT(is_int16(offset));
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
- // We use branch_offset as an argument for the branch instructions to be sure
- // it is called just before generating the branch instruction, as needed.
- switch (cond) {
- case cc_always:
- b(shifted_branch_offset(L, false));
- break;
- case eq:
- beq(rs, r2, shifted_branch_offset(L, false));
- break;
- case ne:
- bne(rs, r2, shifted_branch_offset(L, false));
- break;
+void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
+ BranchAndLinkShort(offset, bdslot);
+}
- // Signed comparison
- case greater:
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case less:
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case less_equal:
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case Uless:
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
+void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BranchAndLinkShort(offset, cond, rs, rt, bdslot);
+}
- default:
- UNREACHABLE();
+
+void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Jalr(L, bdslot);
+ } else {
+ BranchAndLinkShort(L, bdslot);
+ }
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jalr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchAndLinkShort(L, cond, rs, rt, bdslot);
}
- // Emit a nop in the branch delay slot.
- nop();
}
-// Trashes the at register if no scratch register is provided.
// We need to use a bgezal or bltzal, but they can't be used directly with the
// slt instructions. We could use sub or add instead but we would miss overflow
// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
- const Operand& rt, Register scratch) {
+void MacroAssembler::BranchAndLinkShort(int16_t offset,
+ BranchDelaySlot bdslot) {
+ bal(offset);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
Register r2 = no_reg;
+ Register scratch = at;
+
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -586,7 +1741,7 @@ void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
bal(offset);
break;
- // Signed comparison
+ // Signed comparison.
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
@@ -633,14 +1788,29 @@ void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
default:
UNREACHABLE();
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
-void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
- const Operand& rt, Register scratch) {
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
+ bal(shifted_branch_offset(L, false));
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ int32_t offset;
Register r2 = no_reg;
+ Register scratch = at;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -650,161 +1820,368 @@ void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
switch (cond) {
case cc_always:
- bal(shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
break;
case eq:
bne(rs, r2, 2);
nop();
- bal(shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
break;
case ne:
beq(rs, r2, 2);
nop();
- bal(shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
break;
- // Signed comparison
+ // Signed comparison.
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
break;
case greater_equal:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
break;
case less:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
break;
case less_equal:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
break;
case Uless:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
break;
case Uless_equal:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
break;
default:
UNREACHABLE();
}
- // Emit a nop in the branch delay slot.
- nop();
+
+ // Check that offset could actually hold on an int16_t.
+ ASSERT(is_int16(offset));
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint32_t imm28;
+ imm28 = jump_address(L);
+ imm28 &= kImm28Mask;
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ j(imm28);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint32_t imm32;
+ imm32 = jump_address(L);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jr(at);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint32_t imm32;
+ imm32 = jump_address(L);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jalr(at);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (target.is_reg()) {
+ jr(target.rm());
+ } else {
+ if (!MustUseReg(target.rmode_)) {
+ j(target.imm32_);
+ } else {
+ li(t9, target);
+ jr(t9);
+ }
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
void MacroAssembler::Jump(const Operand& target,
- Condition cond, Register rs, const Operand& rt) {
+ Condition cond, Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ BRANCH_ARGS_CHECK(cond, rs, rt);
if (target.is_reg()) {
if (cond == cc_always) {
jr(target.rm());
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jr(target.rm());
}
- } else { // !target.is_reg()
- if (!MustUseAt(target.rmode_)) {
+ } else { // Not register target.
+ if (!MustUseReg(target.rmode_)) {
if (cond == cc_always) {
j(target.imm32_);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
j(target.imm32_); // Will generate only one instruction.
}
- } else { // MustUseAt(target)
- li(at, target);
+ } else { // MustUseReg(target).
+ li(t9, target);
if (cond == cc_always) {
- jr(at);
+ jr(t9);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
- jr(at); // Will generate only one instruction.
+ Branch(2, NegateCondition(cond), rs, rt);
+ jr(t9); // Will generate only one instruction.
}
}
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
+ return 4 * kInstrSize;
+}
+
+
+int MacroAssembler::CallSize(Register reg) {
+ return 2 * kInstrSize;
+}
+
+
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
+void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (target.is_reg()) {
+ jalr(target.rm());
+ } else { // !target.is_reg().
+ if (!MustUseReg(target.rmode_)) {
+ jal(target.imm32_);
+ } else { // MustUseReg(target).
+ // Must record previous source positions before the
+ // li() generates a new code target.
+ positions_recorder()->WriteRecordedPositions();
+ li(t9, target);
+ jalr(t9);
+ }
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
void MacroAssembler::Call(const Operand& target,
- Condition cond, Register rs, const Operand& rt) {
+ Condition cond, Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ BRANCH_ARGS_CHECK(cond, rs, rt);
if (target.is_reg()) {
if (cond == cc_always) {
jalr(target.rm());
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jalr(target.rm());
}
- } else { // !target.is_reg()
- if (!MustUseAt(target.rmode_)) {
+ } else { // !target.is_reg().
+ if (!MustUseReg(target.rmode_)) {
if (cond == cc_always) {
jal(target.imm32_);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jal(target.imm32_); // Will generate only one instruction.
}
- } else { // MustUseAt(target)
- li(at, target);
+ } else { // MustUseReg(target)
+ li(t9, target);
if (cond == cc_always) {
- jalr(at);
+ jalr(t9);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
- jalr(at); // Will generate only one instruction.
+ Branch(2, NegateCondition(cond), rs, rt);
+ jalr(t9); // Will generate only one instruction.
}
}
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
-void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
- UNIMPLEMENTED_MIPS();
+
+void MacroAssembler::CallWithAstId(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
+ ASSERT(ast_id != kNoASTId);
+ ASSERT(ast_id_for_reloc_info_ == kNoASTId);
+ ast_id_for_reloc_info_ = ast_id;
+ Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
}
-void MacroAssembler::Drop(int count, Condition cond) {
- UNIMPLEMENTED_MIPS();
+void MacroAssembler::Drop(int count,
+ Condition cond,
+ Register reg,
+ const Operand& op) {
+ if (count <= 0) {
+ return;
+ }
+
+ Label skip;
+
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), reg, op);
+ }
+
+ if (count > 0) {
+ addiu(sp, sp, count * kPointerSize);
+ }
+
+ if (cond != al) {
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::DropAndRet(int drop,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ // This is a workaround to make sure only one branch instruction is
+ // generated. It relies on Drop and Ret not creating branches if
+ // cond == cc_always.
+ Label skip;
+ if (cond != cc_always) {
+ Branch(&skip, NegateCondition(cond), r1, r2);
+ }
+
+ Drop(drop);
+ Ret();
+
+ if (cond != cc_always) {
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::Swap(Register reg1,
+ Register reg2,
+ Register scratch) {
+ if (scratch.is(no_reg)) {
+ Xor(reg1, reg1, Operand(reg2));
+ Xor(reg2, reg2, Operand(reg1));
+ Xor(reg1, reg1, Operand(reg2));
+ } else {
+ mov(scratch, reg1);
+ mov(reg1, reg2);
+ mov(reg2, scratch);
+ }
}
void MacroAssembler::Call(Label* target) {
- UNIMPLEMENTED_MIPS();
+ BranchAndLink(target);
+}
+
+
+void MacroAssembler::Push(Handle<Object> handle) {
+ li(at, Operand(handle));
+ push(at);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
- void MacroAssembler::DebugBreak() {
- UNIMPLEMENTED_MIPS();
- }
-#endif
+void MacroAssembler::DebugBreak() {
+ ASSERT(allow_stub_calls());
+ mov(a0, zero_reg);
+ li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+ CEntryStub ces(1);
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
+
+#endif // ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
-// Exception handling
+// Exception handling.
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
@@ -822,7 +2199,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize
&& StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Save the current handler as the next handler.
- LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+ li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
lw(t1, MemOperand(t2));
addiu(sp, sp, -StackHandlerConstants::kSize);
@@ -848,7 +2225,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
li(t0, Operand(StackHandler::ENTRY));
// Save the current handler as the next handler.
- LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+ li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
lw(t1, MemOperand(t2));
addiu(sp, sp, -StackHandlerConstants::kSize);
@@ -864,57 +2241,692 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
void MacroAssembler::PopTryHandler() {
- UNIMPLEMENTED_MIPS();
+ ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+ pop(a1);
+ Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+ li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+ sw(a1, MemOperand(at));
}
+void MacroAssembler::Throw(Register value) {
+ // v0 is expected to hold the exception.
+ Move(v0, value);
-// -----------------------------------------------------------------------------
-// Activation frames
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop the sp to the top of the handler.
+ li(a3, Operand(ExternalReference(Isolate::k_handler_address,
+ isolate())));
+ lw(sp, MemOperand(a3));
+
+ // Restore the next handler and frame pointer, discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(a2);
+ sw(a2, MemOperand(a3));
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ MultiPop(a3.bit() | fp.bit());
+
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
+ // Set cp to NULL if fp is NULL.
+ Label done;
+ Branch(USE_DELAY_SLOT, &done, eq, fp, Operand(zero_reg));
+ mov(cp, zero_reg); // In branch delay slot.
+ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ bind(&done);
+
+#ifdef DEBUG
+ // When emitting debug_code, set ra as return address for the jump.
+ // 5 instructions: add: 1, pop: 2, jump: 2.
+ const int kOffsetRaInstructions = 5;
+ Label find_ra;
+
+ if (emit_debug_code()) {
+ // Compute ra for the Jump(t9).
+ const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+ // This branch-and-link sequence is needed to get the current PC on mips,
+ // saved to the ra register. Then adjusted for instruction count.
+ bal(&find_ra); // bal exposes branch-delay.
+ nop(); // Branch delay slot nop.
+ bind(&find_ra);
+ addiu(ra, ra, kOffsetRaBytes);
+ }
+#endif
+
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ pop(t9); // 2 instructions: lw, add sp.
+ Jump(t9); // 2 instructions: jr, nop (in delay slot).
+
+ if (emit_debug_code()) {
+ // Make sure that the expected number of instructions were generated.
+ ASSERT_EQ(kOffsetRaInstructions,
+ InstructionsGeneratedSince(&find_ra));
+ }
+}
+
+
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+ Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // v0 is expected to hold the exception.
+ Move(v0, value);
+
+ // Drop sp to the top stack handler.
+ li(a3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+ lw(sp, MemOperand(a3));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ lw(a2, MemOperand(sp, kStateOffset));
+ Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ lw(sp, MemOperand(sp, kNextOffset));
+ jmp(&loop);
+ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(a2);
+ sw(a2, MemOperand(a3));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(
+ Isolate::k_external_caught_exception_address, isolate());
+ li(a0, Operand(false, RelocInfo::NONE));
+ li(a2, Operand(external_caught));
+ sw(a0, MemOperand(a2));
+
+ // Set pending exception and v0 to out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ isolate())));
+ sw(v0, MemOperand(a2));
+ }
+
+ // Stack layout at this point. See also StackHandlerConstants.
+ // sp -> state (ENTRY)
+ // fp
+ // ra
+
+ // Discard handler state (a2 is not used) and restore frame pointer.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ MultiPop(a2.bit() | fp.bit()); // a2: discarded state.
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
+ Label cp_null;
+ Branch(USE_DELAY_SLOT, &cp_null, eq, fp, Operand(zero_reg));
+ mov(cp, zero_reg); // In the branch delay slot.
+ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ bind(&cp_null);
+
+#ifdef DEBUG
+ // When emitting debug_code, set ra as return address for the jump.
+ // 5 instructions: add: 1, pop: 2, jump: 2.
+ const int kOffsetRaInstructions = 5;
+ Label find_ra;
+
+ if (emit_debug_code()) {
+ // Compute ra for the Jump(t9).
+ const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+ // This branch-and-link sequence is needed to get the current PC on mips,
+ // saved to the ra register. Then adjusted for instruction count.
+ bal(&find_ra); // bal exposes branch-delay slot.
+ nop(); // Branch delay slot nop.
+ bind(&find_ra);
+ addiu(ra, ra, kOffsetRaBytes);
+ }
+#endif
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ pop(t9); // 2 instructions: lw, add sp.
+ Jump(t9); // 2 instructions: jr, nop (in delay slot).
+
+ if (emit_debug_code()) {
+ // Make sure that the expected number of instructions were generated.
+ ASSERT_EQ(kOffsetRaInstructions,
+ InstructionsGeneratedSince(&find_ra));
+ }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ li(result, 0x7091);
+ li(scratch1, 0x7191);
+ li(scratch2, 0x7291);
+ }
+ jmp(gc_required);
+ return;
+ }
+
+ ASSERT(!result.is(scratch1));
+ ASSERT(!result.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!scratch1.is(t9));
+ ASSERT(!scratch2.is(t9));
+ ASSERT(!result.is(t9));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ ASSERT_EQ(0, object_size & kObjectAlignmentMask);
+
+ // Check relative positions of allocation top and limit addresses.
+ // ARM adds additional checks to make sure the ldm instruction can be
+ // used. On MIPS we don't have ldm so we don't need additional checks either.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate());
+ intptr_t top =
+ reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ intptr_t limit =
+ reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register topaddr = scratch1;
+ Register obj_size_reg = scratch2;
+ li(topaddr, Operand(new_space_allocation_top));
+ li(obj_size_reg, Operand(object_size));
+
+ // This code stores a temporary value in t9.
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and allocation limit into t9.
+ lw(result, MemOperand(topaddr));
+ lw(t9, MemOperand(topaddr, kPointerSize));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry. t9 is used
+ // immediately below so this use of t9 does not cause difference with
+ // respect to register content between debug and release mode.
+ lw(t9, MemOperand(topaddr));
+ Check(eq, "Unexpected allocation top", result, Operand(t9));
+ }
+ // Load allocation limit into t9. Result already contains allocation top.
+ lw(t9, MemOperand(topaddr, limit - top));
+ }
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top.
+ Addu(scratch2, result, Operand(obj_size_reg));
+ Branch(gc_required, Ugreater, scratch2, Operand(t9));
+ sw(scratch2, MemOperand(topaddr));
+
+ // Tag object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Addu(result, result, Operand(kHeapObjectTag));
+ }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ li(result, 0x7091);
+ li(scratch1, 0x7191);
+ li(scratch2, 0x7291);
+ }
+ jmp(gc_required);
+ return;
+ }
+
+ ASSERT(!result.is(scratch1));
+ ASSERT(!result.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+
+ // Check relative positions of allocation top and limit addresses.
+ // ARM adds additional checks to make sure the ldm instruction can be
+ // used. On MIPS we don't have ldm so we don't need additional checks either.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate());
+ intptr_t top =
+ reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ intptr_t limit =
+ reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register topaddr = scratch1;
+ li(topaddr, Operand(new_space_allocation_top));
+
+ // This code stores a temporary value in t9.
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and allocation limit into t9.
+ lw(result, MemOperand(topaddr));
+ lw(t9, MemOperand(topaddr, kPointerSize));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry. t9 is used
+ // immediately below so this use of t9 does not cause difference with
+ // respect to register content between debug and release mode.
+ lw(t9, MemOperand(topaddr));
+ Check(eq, "Unexpected allocation top", result, Operand(t9));
+ }
+ // Load allocation limit into t9. Result already contains allocation top.
+ lw(t9, MemOperand(topaddr, limit - top));
+ }
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ sll(scratch2, object_size, kPointerSizeLog2);
+ Addu(scratch2, result, scratch2);
+ } else {
+ Addu(scratch2, result, Operand(object_size));
+ }
+ Branch(gc_required, Ugreater, scratch2, Operand(t9));
+
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ And(t9, scratch2, Operand(kObjectAlignmentMask));
+ Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
+ }
+ sw(scratch2, MemOperand(topaddr));
+
+ // Tag object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Addu(result, result, Operand(kHeapObjectTag));
+ }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ // Make sure the object has no tag before resetting top.
+ And(object, object, Operand(~kHeapObjectTagMask));
+#ifdef DEBUG
+ // Check that the object un-allocated is below the current top.
+ li(scratch, Operand(new_space_allocation_top));
+ lw(scratch, MemOperand(scratch));
+ Check(less, "Undo allocation of non allocated memory",
+ object, Operand(scratch));
+#endif
+ // Write the address of the object to un-allocate as the current top.
+ li(scratch, Operand(new_space_allocation_top));
+ sw(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ sll(scratch1, length, 1); // Length in bytes, not chars.
+ addiu(scratch1, scratch1,
+ kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
+ And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
+
+ // Allocate two-byte string in new space.
+ AllocateInNewSpace(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string
+ // while observing object alignment.
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT(kCharSize == 1);
+ addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
+ And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
+
+ // Allocate ASCII string in new space.
+ AllocateInNewSpace(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ AllocateInNewSpace(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+ InitializeNewString(result,
+ length,
+ Heap::kConsStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ AllocateInNewSpace(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+ InitializeNewString(result,
+ length,
+ Heap::kConsAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+// Allocates a heap number or jumps to the label if the young space is full and
+// a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* need_gc) {
+ // Allocate an object in the heap for the heap number and tag it as a heap
+ // object.
+ AllocateInNewSpace(HeapNumber::kSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ TAG_OBJECT);
+
+ // Store heap number map in the allocated object.
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+}
+
+
+void MacroAssembler::AllocateHeapNumberWithValue(Register result,
+ FPURegister value,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+ AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
+ sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+// Copies a fixed number of fields of heap objects from src to dst.
+void MacroAssembler::CopyFields(Register dst,
+ Register src,
+ RegList temps,
+ int field_count) {
+ ASSERT((temps & dst.bit()) == 0);
+ ASSERT((temps & src.bit()) == 0);
+ // Primitive implementation using only one temporary register.
+
+ Register tmp = no_reg;
+ // Find a temp register in temps list.
+ for (int i = 0; i < kNumRegisters; i++) {
+ if ((temps & (1 << i)) != 0) {
+ tmp.code_ = i;
+ break;
+ }
+ }
+ ASSERT(!tmp.is(no_reg));
+
+ for (int i = 0; i < field_count; i++) {
+ lw(tmp, FieldMemOperand(src, i * kPointerSize));
+ sw(tmp, FieldMemOperand(dst, i * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::CopyBytes(Register src,
+ Register dst,
+ Register length,
+ Register scratch) {
+ Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+
+ // Align src before copying in word size chunks.
+ bind(&align_loop);
+ Branch(&done, eq, length, Operand(zero_reg));
+ bind(&align_loop_1);
+ And(scratch, src, kPointerSize - 1);
+ Branch(&word_loop, eq, scratch, Operand(zero_reg));
+ lbu(scratch, MemOperand(src));
+ Addu(src, src, 1);
+ sb(scratch, MemOperand(dst));
+ Addu(dst, dst, 1);
+ Subu(length, length, Operand(1));
+ Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+
+ // Copy bytes in word size chunks.
+ bind(&word_loop);
+ if (emit_debug_code()) {
+ And(scratch, src, kPointerSize - 1);
+ Assert(eq, "Expecting alignment for CopyBytes",
+ scratch, Operand(zero_reg));
+ }
+ Branch(&byte_loop, lt, length, Operand(kPointerSize));
+ lw(scratch, MemOperand(src));
+ Addu(src, src, kPointerSize);
+
+ // TODO(kalmard) check if this can be optimized to use sw in most cases.
+ // Can't use unaligned access - copy byte by byte.
+ sb(scratch, MemOperand(dst, 0));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 3));
+ Addu(dst, dst, 4);
+
+ Subu(length, length, Operand(kPointerSize));
+ Branch(&word_loop);
+
+ // Copy the last bytes if any left.
+ bind(&byte_loop);
+ Branch(&done, eq, length, Operand(zero_reg));
+ bind(&byte_loop_1);
+ lbu(scratch, MemOperand(src));
+ Addu(src, src, 1);
+ sb(scratch, MemOperand(dst));
+ Addu(dst, dst, 1);
+ Subu(length, length, Operand(1));
+ Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+ bind(&done);
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+ lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ li(at, Operand(map));
+ Branch(fail, ne, scratch, Operand(at));
+}
-void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) {
- Label extra_push, end;
- andi(scratch, sp, 7);
+void MacroAssembler::DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
+ bind(&fail);
+}
- // We check for args and receiver size on the stack, all of them word sized.
- // We add one for sp, that we also want to store on the stack.
- if (((arg_count + 1) % kPointerSizeLog2) == 0) {
- Branch(ne, &extra_push, at, Operand(zero_reg));
- } else { // ((arg_count + 1) % 2) == 1
- Branch(eq, &extra_push, at, Operand(zero_reg));
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
}
+ lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ LoadRoot(at, index);
+ Branch(fail, ne, scratch, Operand(at));
+}
- // Save sp on the stack.
- mov(scratch, sp);
- Push(scratch);
- b(&end);
- // Align before saving sp on the stack.
- bind(&extra_push);
- mov(scratch, sp);
- addiu(sp, sp, -8);
- sw(scratch, MemOperand(sp));
+void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+ CpuFeatures::Scope scope(FPU);
+ if (IsMipsSoftFloatABI) {
+ Move(dst, v0, v1);
+ } else {
+ Move(dst, f0); // Reg f0 is o32 ABI FP return value.
+ }
+}
- // The stack is aligned and sp is stored on the top.
- bind(&end);
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+ CpuFeatures::Scope scope(FPU);
+ if (!IsMipsSoftFloatABI) {
+ Move(f12, dreg);
+ } else {
+ Move(a0, a1, dreg);
+ }
}
-void MacroAssembler::ReturnFromAlignedCall() {
- lw(sp, MemOperand(sp));
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
+ DoubleRegister dreg2) {
+ CpuFeatures::Scope scope(FPU);
+ if (!IsMipsSoftFloatABI) {
+ if (dreg2.is(f12)) {
+ ASSERT(!dreg1.is(f14));
+ Move(f14, dreg2);
+ Move(f12, dreg1);
+ } else {
+ Move(f12, dreg1);
+ Move(f14, dreg2);
+ }
+ } else {
+ Move(a0, a1, dreg1);
+ Move(a2, a3, dreg2);
+ }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+ Register reg) {
+ CpuFeatures::Scope scope(FPU);
+ if (!IsMipsSoftFloatABI) {
+ Move(f12, dreg);
+ Move(a2, reg);
+ } else {
+ Move(a2, reg);
+ Move(a0, a1, dreg);
+ }
+}
+
+
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+ // This macro takes the dst register to make the code more readable
+ // at the call sites. However, the dst register has to be t1 to
+ // follow the calling convention which requires the call type to be
+ // in t1.
+ ASSERT(dst.is(t1));
+ if (call_kind == CALL_AS_FUNCTION) {
+ li(dst, Operand(Smi::FromInt(1)));
+ } else {
+ li(dst, Operand(Smi::FromInt(0)));
+ }
}
// -----------------------------------------------------------------------------
-// JavaScript invokes
+// JavaScript invokes.
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_reg,
Label* done,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
bool definitely_matches = false;
Label regular_invoke;
@@ -950,10 +2962,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
} else if (actual.is_immediate()) {
- Branch(eq, &regular_invoke, expected.reg(), Operand(actual.immediate()));
+ Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
li(a0, Operand(actual.immediate()));
} else {
- Branch(eq, &regular_invoke, expected.reg(), Operand(actual.reg()));
+ Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
}
if (!definitely_matches) {
@@ -962,29 +2974,39 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
}
- ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline);
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
- CallBuiltin(adaptor);
- b(done);
- nop();
+ call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ SetCallKind(t1, call_kind);
+ Call(adaptor, RelocInfo::CODE_TARGET);
+ call_wrapper.AfterCall();
+ jmp(done);
} else {
- JumpToBuiltin(adaptor);
+ SetCallKind(t1, call_kind);
+ Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&regular_invoke);
}
}
+
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
Label done;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+ call_wrapper, call_kind);
if (flag == CALL_FUNCTION) {
+ SetCallKind(t1, call_kind);
Call(code);
} else {
ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(t1, call_kind);
Jump(code);
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -997,13 +3019,17 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ CallKind call_kind) {
Label done;
- InvokePrologue(expected, actual, code, no_reg, &done, flag);
+ InvokePrologue(expected, actual, code, no_reg, &done, flag,
+ NullCallWrapper(), call_kind);
if (flag == CALL_FUNCTION) {
+ SetCallKind(t1, call_kind);
Call(code, rmode);
} else {
+ SetCallKind(t1, call_kind);
Jump(code, rmode);
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -1014,7 +3040,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
// Contract with called JS functions requires that function is passed in a1.
ASSERT(function.is(a1));
Register expected_reg = a2;
@@ -1025,72 +3053,125 @@ void MacroAssembler::InvokeFunction(Register function,
lw(expected_reg,
FieldMemOperand(code_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
- lw(code_reg,
- MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
- addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag);
+ sra(expected_reg, expected_reg, kSmiTagSize);
+ lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
+}
+
+
+void MacroAssembler::InvokeFunction(JSFunction* function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ CallKind call_kind) {
+ ASSERT(function->is_compiled());
+
+ // Get the function and setup the context.
+ li(a1, Operand(Handle<JSFunction>(function)));
+ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Invoke the cached code.
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ if (V8::UseCrankshaft()) {
+ UNIMPLEMENTED_MIPS();
+ } else {
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
+ }
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object,
+ Register scratch,
+ Label* fail) {
+ ASSERT(kNotStringTag != 0);
+
+ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ And(scratch, scratch, Operand(kIsNotStringMask));
+ Branch(fail, ne, scratch, Operand(zero_reg));
}
// ---------------------------------------------------------------------------
// Support functions.
- void MacroAssembler::GetObjectType(Register function,
- Register map,
- Register type_reg) {
- lw(map, FieldMemOperand(function, HeapObject::kMapOffset));
- lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- }
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
- void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) {
- // Load builtin address.
- LoadExternalReference(t9, builtin_entry);
- lw(t9, MemOperand(t9)); // Deref address.
- addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
- // Call and allocate arguments slots.
- jalr(t9);
- // Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
- }
+ // Check that the function really is a function. Load map into result reg.
+ GetObjectType(function, result, scratch);
+ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ Branch(&non_instance, ne, scratch, Operand(zero_reg));
- void MacroAssembler::CallBuiltin(Register target) {
- // Target already holds target address.
- // Call and allocate arguments slots.
- jalr(target);
- // Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
- }
+ // Get the prototype or initial map from the function.
+ lw(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ LoadRoot(t8, Heap::kTheHoleValueRootIndex);
+ Branch(miss, eq, result, Operand(t8));
- void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) {
- // Load builtin address.
- LoadExternalReference(t9, builtin_entry);
- lw(t9, MemOperand(t9)); // Deref address.
- addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
- // Call and allocate arguments slots.
- jr(t9);
- // Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- }
+ // If the function does not have an initial map, we're done.
+ Label done;
+ GetObjectType(result, scratch, scratch);
+ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+ // Get the prototype from the initial map.
+ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ bind(&done);
+}
- void MacroAssembler::JumpToBuiltin(Register target) {
- // t9 already holds target address.
- // Call and allocate arguments slots.
- jr(t9);
- // Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- }
+
+void MacroAssembler::GetObjectType(Register object,
+ Register map,
+ Register type_reg) {
+ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+}
// -----------------------------------------------------------------------------
-// Runtime calls
+// Runtime calls.
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
@@ -1099,8 +3180,134 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
}
-void MacroAssembler::StubReturn(int argc) {
- UNIMPLEMENTED_MIPS();
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
+ Register r1, const Operand& r2) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Object* result;
+ { MaybeObject* maybe_result = stub->TryGetCode();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+ return result;
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Object* result;
+ { MaybeObject* maybe_result = stub->TryGetCode();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+ return result;
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+ ExternalReference function, int stack_space) {
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address();
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(),
+ next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(),
+ next_address);
+
+ // Allocate HandleScope in callee-save registers.
+ li(s3, Operand(next_address));
+ lw(s0, MemOperand(s3, kNextOffset));
+ lw(s1, MemOperand(s3, kLimitOffset));
+ lw(s2, MemOperand(s3, kLevelOffset));
+ Addu(s2, s2, Operand(1));
+ sw(s2, MemOperand(s3, kLevelOffset));
+
+ // The O32 ABI requires us to pass a pointer in a0 where the returned struct
+ // (4 bytes) will be placed. This is also built into the Simulator.
+ // Set up the pointer to the returned value (a0). It was allocated in
+ // EnterExitFrame.
+ addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub;
+ stub.GenerateCall(this, function);
+
+ // As mentioned above, on MIPS a pointer is returned - we need to dereference
+ // it to get the actual return value (which is also a pointer).
+ lw(v0, MemOperand(v0));
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+
+ // If result is non-zero, dereference to get the result value
+ // otherwise set it to undefined.
+ Label skip;
+ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ Branch(&skip, eq, v0, Operand(zero_reg));
+ lw(a0, MemOperand(v0));
+ bind(&skip);
+ mov(v0, a0);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ sw(s0, MemOperand(s3, kNextOffset));
+ if (emit_debug_code()) {
+ lw(a1, MemOperand(s3, kLevelOffset));
+ Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
+ }
+ Subu(s2, s2, Operand(1));
+ sw(s2, MemOperand(s3, kLevelOffset));
+ lw(at, MemOperand(s3, kLimitOffset));
+ Branch(&delete_allocated_handles, ne, s1, Operand(at));
+
+ // Check if the function scheduled an exception.
+ bind(&leave_exit_frame);
+ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
+ lw(t1, MemOperand(at));
+ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+ li(s0, Operand(stack_space));
+ LeaveExitFrame(false, s0);
+ Ret();
+
+ bind(&promote_scheduled_exception);
+ MaybeObject* result = TryTailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
+ if (result->IsFailure()) {
+ return result;
+ }
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ bind(&delete_allocated_handles);
+ sw(s1, MemOperand(s3, kLimitOffset));
+ mov(s0, v0);
+ mov(a0, v0);
+ PrepareCallCFunction(1, s1);
+ li(a0, Operand(ExternalReference::isolate_address()));
+ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
+ 1);
+ mov(v0, s0);
+ jmp(&leave_exit_frame);
+
+ return result;
}
@@ -1112,7 +3319,138 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
}
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::IndexFromHash(Register hash,
+ Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ STATIC_ASSERT(kSmiTag == 0);
+ Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
+ sll(index, hash, kSmiTagSize);
+}
+
+
+void MacroAssembler::ObjectToDoubleFPURegister(Register object,
+ FPURegister result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* not_number,
+ ObjectToDoubleFlags flags) {
+ Label done;
+ if ((flags & OBJECT_NOT_SMI) == 0) {
+ Label not_smi;
+ JumpIfNotSmi(object, &not_smi);
+ // Remove smi tag and convert to double.
+ sra(scratch1, object, kSmiTagSize);
+ mtc1(scratch1, result);
+ cvt_d_w(result, result);
+ Branch(&done);
+ bind(&not_smi);
+ }
+ // Check for heap number and load double value from it.
+ lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+ Branch(not_number, ne, scratch1, Operand(heap_number_map));
+
+ if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
+ // If exponent is all ones the number is either a NaN or +/-Infinity.
+ Register exponent = scratch1;
+ Register mask_reg = scratch2;
+ lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ li(mask_reg, HeapNumber::kExponentMask);
+
+ And(exponent, exponent, mask_reg);
+ Branch(not_number, eq, exponent, Operand(mask_reg));
+ }
+ ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
+ bind(&done);
+}
+
+
+void MacroAssembler::SmiToDoubleFPURegister(Register smi,
+ FPURegister value,
+ Register scratch1) {
+ sra(scratch1, smi, kSmiTagSize);
+ mtc1(scratch1, value);
+ cvt_d_w(value, value);
+}
+
+
+void MacroAssembler::AdduAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ ASSERT(!dst.is(overflow_dst));
+ ASSERT(!dst.is(scratch));
+ ASSERT(!overflow_dst.is(scratch));
+ ASSERT(!overflow_dst.is(left));
+ ASSERT(!overflow_dst.is(right));
+ ASSERT(!left.is(right));
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ addu(dst, left, right); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, right);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ addu(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ addu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+}
+
+
+void MacroAssembler::SubuAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ ASSERT(!dst.is(overflow_dst));
+ ASSERT(!dst.is(scratch));
+ ASSERT(!overflow_dst.is(scratch));
+ ASSERT(!overflow_dst.is(left));
+ ASSERT(!overflow_dst.is(right));
+ ASSERT(!left.is(right));
+ ASSERT(!scratch.is(left));
+ ASSERT(!scratch.is(right));
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ subu(dst, left, right); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ xor_(scratch, scratch, right); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ subu(dst, left, right); // Right is overwritten.
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, scratch); // Original right.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ subu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -1128,101 +3466,305 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// should remove this need and make the runtime routine entry code
// smarter.
li(a0, num_arguments);
- LoadExternalReference(a1, ExternalReference(f));
+ li(a1, Operand(ExternalReference(f, isolate())));
CEntryStub stub(1);
CallStub(&stub);
}
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ li(a0, Operand(function->nargs));
+ li(a1, Operand(ExternalReference(function, isolate())));
+ CEntryStub stub(1);
+ stub.SaveDoubles();
+ CallStub(&stub);
+}
+
+
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments) {
+ li(a0, Operand(num_arguments));
+ li(a1, Operand(ext));
+
+ CEntryStub stub(1);
+ CallStub(&stub);
+}
+
+
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size) {
- UNIMPLEMENTED_MIPS();
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ li(a0, Operand(num_arguments));
+ JumpToExternalReference(ext);
+}
+
+
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+ const ExternalReference& ext, int num_arguments, int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ li(a0, num_arguments);
+ return TryJumpToExternalReference(ext);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
}
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
- UNIMPLEMENTED_MIPS();
+ li(a1, Operand(builtin));
+ CEntryStub stub(1);
+ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
- bool* resolved) {
- UNIMPLEMENTED_MIPS();
- return Handle<Code>(reinterpret_cast<Code*>(NULL)); // UNIMPLEMENTED RETURN
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+ const ExternalReference& builtin) {
+ li(a1, Operand(builtin));
+ CEntryStub stub(1);
+ return TryTailCallStub(&stub);
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags) {
- UNIMPLEMENTED_MIPS();
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ GetBuiltinEntry(t9, id);
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(t9));
+ SetCallKind(t1, CALL_AS_METHOD);
+ Call(t9);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(t1, CALL_AS_METHOD);
+ Jump(t9);
+ }
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the builtins object into target register.
+ lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ // Load the JavaScript builtin function from the builtins object.
+ lw(target, FieldMemOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(!target.is(a1));
+ GetBuiltinFunction(a1, id);
+ // Load the code entry point from the builtins object.
+ lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- UNIMPLEMENTED_MIPS();
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ li(scratch1, Operand(value));
+ li(scratch2, Operand(ExternalReference(counter)));
+ sw(scratch1, MemOperand(scratch2));
+ }
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ li(scratch2, Operand(ExternalReference(counter)));
+ lw(scratch1, MemOperand(scratch2));
+ Addu(scratch1, scratch1, Operand(value));
+ sw(scratch1, MemOperand(scratch2));
+ }
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ li(scratch2, Operand(ExternalReference(counter)));
+ lw(scratch1, MemOperand(scratch2));
+ Subu(scratch1, scratch1, Operand(value));
+ sw(scratch1, MemOperand(scratch2));
+ }
}
// -----------------------------------------------------------------------------
-// Debugging
+// Debugging.
void MacroAssembler::Assert(Condition cc, const char* msg,
Register rs, Operand rt) {
- UNIMPLEMENTED_MIPS();
+ if (emit_debug_code())
+ Check(cc, msg, rs, rt);
+}
+
+
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+ Heap::RootListIndex index) {
+ if (emit_debug_code()) {
+ LoadRoot(at, index);
+ Check(eq, "Register did not match expected root", reg, Operand(at));
+ }
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (emit_debug_code()) {
+ ASSERT(!elements.is(at));
+ Label ok;
+ push(elements);
+ lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
+ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ Branch(&ok, eq, elements, Operand(at));
+ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
+ Branch(&ok, eq, elements, Operand(at));
+ Abort("JSObject with fast elements map has slow elements");
+ bind(&ok);
+ pop(elements);
+ }
}
void MacroAssembler::Check(Condition cc, const char* msg,
Register rs, Operand rt) {
- UNIMPLEMENTED_MIPS();
+ Label L;
+ Branch(&L, cc, rs, rt);
+ Abort(msg);
+ // Will not return here.
+ bind(&L);
}
void MacroAssembler::Abort(const char* msg) {
- UNIMPLEMENTED_MIPS();
+ Label abort_start;
+ bind(&abort_start);
+ // We want to pass the msg string like a smi to avoid GC
+ // problems, however msg is not guaranteed to be aligned
+ // properly. Instead, we pass an aligned pointer that is
+ // a proper v8 smi, but also pass the alignment difference
+ // from the real pointer as a smi.
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+ if (msg != NULL) {
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ AllowStubCallsScope allow_scope(this, true);
+
+ li(a0, Operand(p0));
+ push(a0);
+ li(a0, Operand(Smi::FromInt(p1 - p0)));
+ push(a0);
+ CallRuntime(Runtime::kAbort, 2);
+ // Will not return here.
+ if (is_trampoline_pool_blocked()) {
+ // If the calling code cares about the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the Abort macro constant.
+ // Currently in debug mode with debug_code enabled the number of
+ // generated instructions is 14, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 14;
+ int abort_instructions = InstructionsGeneratedSince(&abort_start);
+ ASSERT(abort_instructions <= kExpectedAbortInstructions);
+ while (abort_instructions++ < kExpectedAbortInstructions) {
+ nop();
+ }
+ }
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ for (int i = 1; i < context_chain_length; i++) {
+ lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ }
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in esi).
+ Move(dst, cp);
+ }
+
+ // We should not have found a 'with' context by walking the context chain
+ // (i.e., the static scope chain and runtime context chain do not agree).
+ // A variable occurring in such a scope should have slot type LOOKUP and
+ // not CONTEXT.
+ if (emit_debug_code()) {
+ lw(t9, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ Check(eq, "Yo dawg, I heard you liked function contexts "
+ "so I put function contexts in all your contexts",
+ dst, Operand(t9));
+ }
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ lw(function, FieldMemOperand(function,
+ GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ lw(function, MemOperand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch) {
+ // Load the initial map. The global functions all have initial maps.
+ lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (emit_debug_code()) {
+ Label ok, fail;
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+ Branch(&ok);
+ bind(&fail);
+ Abort("Global functions must have initial map");
+ bind(&ok);
+ }
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
- li(t0, Operand(Smi::FromInt(type)));
- li(t1, Operand(CodeObject()));
+ li(t8, Operand(Smi::FromInt(type)));
+ li(t9, Operand(CodeObject()));
sw(ra, MemOperand(sp, 4 * kPointerSize));
sw(fp, MemOperand(sp, 3 * kPointerSize));
sw(cp, MemOperand(sp, 2 * kPointerSize));
- sw(t0, MemOperand(sp, 1 * kPointerSize));
- sw(t1, MemOperand(sp, 0 * kPointerSize));
+ sw(t8, MemOperand(sp, 1 * kPointerSize));
+ sw(t9, MemOperand(sp, 0 * kPointerSize));
addiu(fp, sp, 3 * kPointerSize);
}
@@ -1235,92 +3777,474 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
-void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode,
- Register hold_argc,
- Register hold_argv,
- Register hold_function) {
- // Compute the argv pointer and keep it in a callee-saved register.
- // a0 is argc.
- sll(t0, a0, kPointerSizeLog2);
- add(hold_argv, sp, t0);
- addi(hold_argv, hold_argv, -kPointerSize);
-
- // Compute callee's stack pointer before making changes and save it as
- // t1 register so that it is restored as sp register on exit, thereby
- // popping the args.
- // t1 = sp + kPointerSize * #args
- add(t1, sp, t0);
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+ int stack_space) {
+ // Setup the frame structure on the stack.
+ STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
- // Align the stack at this point.
- AlignStack(0);
+ // This is how the stack will look:
+ // fp + 2 (==kCallerSPDisplacement) - old stack's end
+ // [fp + 1 (==kCallerPCOffset)] - saved old ra
+ // [fp + 0 (==kCallerFPOffset)] - saved old fp
+ // [fp - 1 (==kSPOffset)] - sp of the called function
+ // [fp - 2 (==kCodeOffset)] - CodeObject
+ // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
+ // new stack (will contain saved ra)
// Save registers.
- addiu(sp, sp, -12);
- sw(t1, MemOperand(sp, 8));
- sw(ra, MemOperand(sp, 4));
- sw(fp, MemOperand(sp, 0));
- mov(fp, sp); // Setup new frame pointer.
-
- // Push debug marker.
- if (mode == ExitFrame::MODE_DEBUG) {
- Push(zero_reg);
- } else {
- li(t0, Operand(CodeObject()));
- Push(t0);
+ addiu(sp, sp, -4 * kPointerSize);
+ sw(ra, MemOperand(sp, 3 * kPointerSize));
+ sw(fp, MemOperand(sp, 2 * kPointerSize));
+ addiu(fp, sp, 2 * kPointerSize); // Setup new frame pointer.
+
+ if (emit_debug_code()) {
+ sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
+ li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot.
+ sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+
// Save the frame pointer and the context in top.
- LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
- sw(fp, MemOperand(t0));
- LoadExternalReference(t0, ExternalReference(Top::k_context_address));
- sw(cp, MemOperand(t0));
+ li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
+ sw(fp, MemOperand(t8));
+ li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
+ sw(cp, MemOperand(t8));
+
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ if (save_doubles) {
+ // The stack must be allign to 0 modulo 8 for stores with sdc1.
+ ASSERT(kDoubleSize == frame_alignment);
+ if (frame_alignment > 0) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment)); // Align stack.
+ }
+ int space = FPURegister::kNumRegisters * kDoubleSize;
+ Subu(sp, sp, Operand(space));
+ // Remember: we only need to save every 2nd double FPU value.
+ for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ FPURegister reg = FPURegister::from_code(i);
+ sdc1(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ }
+
+ // Reserve place for the return address, stack space and an optional slot
+ // (used by the DirectCEntryStub to hold the return value if a struct is
+ // returned) and align the frame preparing for calling the runtime function.
+ ASSERT(stack_space >= 0);
+ Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
+ if (frame_alignment > 0) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment)); // Align stack.
+ }
- // Setup argc and the builtin function in callee-saved registers.
- mov(hold_argc, a0);
- mov(hold_function, a1);
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ addiu(at, sp, kPointerSize);
+ sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles,
+ Register argument_count) {
+ // Optionally restore all double registers.
+ if (save_doubles) {
+ // Remember: we only need to restore every 2nd double FPU value.
+ lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ FPURegister reg = FPURegister::from_code(i);
+ ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
+ }
+ }
+
// Clear top frame.
- LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
- sw(zero_reg, MemOperand(t0));
+ li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
+ sw(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
- LoadExternalReference(t0, ExternalReference(Top::k_context_address));
- lw(cp, MemOperand(t0));
+ li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
+ lw(cp, MemOperand(t8));
#ifdef DEBUG
- sw(a3, MemOperand(t0));
+ sw(a3, MemOperand(t8));
#endif
// Pop the arguments, restore registers, and return.
mov(sp, fp); // Respect ABI stack constraint.
- lw(fp, MemOperand(sp, 0));
- lw(ra, MemOperand(sp, 4));
- lw(sp, MemOperand(sp, 8));
- jr(ra);
- nop(); // Branch delay slot nop.
-}
-
-
-void MacroAssembler::AlignStack(int offset) {
- // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
- // and an offset of 1 aligns to 4 modulo 8 bytes.
- int activation_frame_alignment = OS::ActivationFrameAlignment();
- if (activation_frame_alignment != kPointerSize) {
- // This code needs to be made more general if this assert doesn't hold.
- ASSERT(activation_frame_alignment == 2 * kPointerSize);
- if (offset == 0) {
- andi(t0, sp, activation_frame_alignment - 1);
- Push(zero_reg, eq, t0, zero_reg);
- } else {
- andi(t0, sp, activation_frame_alignment - 1);
- addiu(t0, t0, -4);
- Push(zero_reg, eq, t0, zero_reg);
+ lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+ lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+ addiu(sp, sp, 8);
+ if (argument_count.is_valid()) {
+ sll(t8, argument_count, kPointerSizeLog2);
+ addu(sp, sp, t8);
+ }
+}
+
+
+void MacroAssembler::InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2) {
+ sll(scratch1, length, kSmiTagSize);
+ LoadRoot(scratch2, map_index);
+ sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
+ li(scratch1, Operand(String::kEmptyHashField));
+ sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+ sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if defined(V8_HOST_ARCH_MIPS)
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one Mips
+ // platform for another Mips platform with a different alignment.
+ return OS::ActivationFrameAlignment();
+#else // defined(V8_HOST_ARCH_MIPS)
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // defined(V8_HOST_ARCH_MIPS)
+}
+
+
+void MacroAssembler::AssertStackIsAligned() {
+ if (emit_debug_code()) {
+ const int frame_alignment = ActivationFrameAlignment();
+ const int frame_alignment_mask = frame_alignment - 1;
+
+ if (frame_alignment > kPointerSize) {
+ Label alignment_as_expected;
+ ASSERT(IsPowerOf2(frame_alignment));
+ andi(at, sp, frame_alignment_mask);
+ Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+ // Don't use Check here, as it will call Runtime_Abort re-entering here.
+ stop("Unexpected stack alignment");
+ bind(&alignment_as_expected);
+ }
}
+}
+
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
+ Register reg,
+ Register scratch,
+ Label* not_power_of_two_or_zero) {
+ Subu(scratch, reg, Operand(1));
+ Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
+ scratch, Operand(zero_reg));
+ and_(at, scratch, reg); // In the delay slot.
+ Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register reg1,
+ Register reg2,
+ Label* on_not_both_smi) {
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(1, kSmiTagMask);
+ or_(at, reg1, reg2);
+ andi(at, at, kSmiTagMask);
+ Branch(on_not_both_smi, ne, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1,
+ Register reg2,
+ Label* on_either_smi) {
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(1, kSmiTagMask);
+ // Both Smi tags must be 1 (not Smi).
+ and_(at, reg1, reg2);
+ andi(at, at, kSmiTagMask);
+ Branch(on_either_smi, eq, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::AbortIfSmi(Register object) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Assert(ne, "Operand is a smi", at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::AbortIfNotSmi(Register object) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Assert(eq, "Operand is a smi", at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::AbortIfNotString(Register object) {
+ STATIC_ASSERT(kSmiTag == 0);
+ And(t0, object, Operand(kSmiTagMask));
+ Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+ Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
+ pop(object);
+}
+
+
+void MacroAssembler::AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ ASSERT(!src.is(at));
+ LoadRoot(at, root_value_index);
+ Assert(eq, message, src, Operand(at));
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number) {
+ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
+}
+
+
+void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Test that both first and second are sequential ASCII strings.
+ // Assume that they are non-smis.
+ lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+ JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
+ scratch2,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that neither is a smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ And(scratch1, first, Operand(second));
+ And(scratch1, scratch1, Operand(kSmiTagMask));
+ Branch(failure, eq, scratch1, Operand(zero_reg));
+ JumpIfNonSmisNotBothSequentialAsciiStrings(first,
+ second,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
+ andi(scratch1, first, kFlatAsciiStringMask);
+ Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
+ andi(scratch2, second, kFlatAsciiStringMask);
+ Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure) {
+ int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ And(scratch, type, Operand(kFlatAsciiStringMask));
+ Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
+}
+
+
+static const int kRegisterPassedArguments = 4;
+
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+ int frame_alignment = ActivationFrameAlignment();
+
+ // Up to four simple arguments are passed in registers a0..a3.
+ // Those four arguments must have reserved argument slots on the stack for
+ // mips, even though those argument slots are not normally used.
+ // Remaining arguments are pushed on the stack, above (higher address than)
+ // the argument slots.
+ ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
+ int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments) +
+ (StandardFrameConstants::kCArgsSlotsSize /
+ kPointerSize);
+ if (frame_alignment > kPointerSize) {
+ // Make stack end at alignment and make room for num_arguments - 4 words
+ // and the original value of sp.
+ mov(scratch, sp);
+ Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+ ASSERT(IsPowerOf2(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment));
+ sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
}
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ CallCFunctionHelper(no_reg, function, t8, num_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ Register scratch,
+ int num_arguments) {
+ CallCFunctionHelper(function,
+ ExternalReference::the_hole_value_location(isolate()),
+ scratch,
+ num_arguments);
+}
+
+
+void MacroAssembler::CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
+ int num_arguments) {
+ // Make sure that the stack is aligned before calling a C function unless
+ // running in the simulator. The simulator has its own alignment check which
+ // provides more information.
+ // The argument stots are presumed to have been set up by
+ // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
+
+#if defined(V8_HOST_ARCH_MIPS)
+ if (emit_debug_code()) {
+ int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ if (frame_alignment > kPointerSize) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ Label alignment_as_expected;
+ And(at, sp, Operand(frame_alignment_mask));
+ Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+ // Don't use Check here, as it will call Runtime_Abort possibly
+ // re-entering here.
+ stop("Unexpected alignment in CallCFunction");
+ bind(&alignment_as_expected);
+ }
+ }
+#endif // V8_HOST_ARCH_MIPS
+
+ // Just call directly. The function called cannot cause a GC, or
+ // allow preemption, so the return address in the link register
+ // stays correct.
+
+ if (function.is(no_reg)) {
+ function = t9;
+ li(function, Operand(function_reference));
+ } else if (!function.is(t9)) {
+ mov(t9, function);
+ function = t9;
+ }
+
+ Call(function);
+
+ ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
+ int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments) +
+ (StandardFrameConstants::kCArgsSlotsSize /
+ kPointerSize);
+
+ if (OS::ActivationFrameAlignment() > kPointerSize) {
+ lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
+ }
+}
+
+
+#undef BRANCH_ARGS_CHECK
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ lw(descriptors,
+ FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
+ Label not_smi;
+ JumpIfNotSmi(descriptors, &not_smi);
+ li(descriptors, Operand(FACTORY->empty_descriptor_array()));
+ bind(&not_smi);
+}
+
+
+CodePatcher::CodePatcher(byte* address, int instructions)
+ : address_(address),
+ instructions_(instructions),
+ size_(instructions * Assembler::kInstrSize),
+ masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
+ // Create a new macro assembler pointing to the address of the code to patch.
+ // The size is adjusted with kGap on order for the assembler to generate size
+ // bytes of instructions without failing with buffer size constraints.
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+ // Indicate that code has changed.
+ CPU::FlushICache(address_, size_);
+
+ // Check that the code was patched as expected.
+ ASSERT(masm_.pc_ == address_ + size_);
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void CodePatcher::Emit(Instr instr) {
+ masm()->emit(instr);
+}
+
+
+void CodePatcher::Emit(Address addr) {
+ masm()->emit(reinterpret_cast<Instr>(addr));
+}
+
+
+void CodePatcher::ChangeBranchCondition(Condition cond) {
+ Instr instr = Assembler::instr_at(masm_.pc_);
+ ASSERT(Assembler::IsBranch(instr));
+ uint32_t opcode = Assembler::GetOpcodeField(instr);
+ // Currently only the 'eq' and 'ne' cond values are supported and the simple
+ // branch instructions (with opcode being the branch type).
+ // There are some special cases (see Assembler::IsBranch()) so extending this
+ // would be tricky.
+ ASSERT(opcode == BEQ ||
+ opcode == BNE ||
+ opcode == BLEZ ||
+ opcode == BGTZ ||
+ opcode == BEQL ||
+ opcode == BNEL ||
+ opcode == BLEZL ||
+ opcode == BGTZL);
+ opcode = (cond == eq) ? BEQ : BNE;
+ instr = (instr & ~kOpcodeMask) | opcode;
+ masm_.emit(instr);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 0f0365b7c..83bd73e09 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,7 @@
#include "assembler.h"
#include "mips/assembler-mips.h"
+#include "v8globals.h"
namespace v8 {
namespace internal {
@@ -37,76 +38,205 @@ namespace internal {
// Forward declaration.
class JumpTarget;
-// Register at is used for instruction generation. So it is not safe to use it
-// unless we know exactly what we do.
+// Reserved Register Usage Summary.
+//
+// Registers t8, t9, and at are reserved for use by the MacroAssembler.
+//
+// The programmer should know that the MacroAssembler may clobber these three,
+// but won't touch other registers except in special cases.
+//
+// Per the MIPS ABI, register t9 must be used for indirect function call
+// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
+// trying to update gp register for position-independent-code. Whenever
+// MIPS generated code calls C code, it must be via t9 register.
// Registers aliases
// cp is assumed to be a callee saved register.
-const Register cp = s7; // JavaScript context pointer
-const Register fp = s8_fp; // Alias fp
+const Register roots = s6; // Roots array pointer.
+const Register cp = s7; // JavaScript context pointer.
+const Register fp = s8_fp; // Alias for fp.
+// Registers used for condition evaluation.
+const Register condReg1 = s4;
+const Register condReg2 = s5;
+
+
+// Flags used for the AllocateInNewSpace functions.
+enum AllocationFlags {
+ // No special flags.
+ NO_ALLOCATION_FLAGS = 0,
+ // Return the pointer to the allocated already tagged as a heap object.
+ TAG_OBJECT = 1 << 0,
+ // The content of the result register already contains the allocation top in
+ // new space.
+ RESULT_CONTAINS_TOP = 1 << 1,
+ // Specify that the requested size of the space to allocate is specified in
+ // words instead of bytes.
+ SIZE_IN_WORDS = 1 << 2
+};
+
+// Flags used for the ObjectToDoubleFPURegister function.
+enum ObjectToDoubleFlags {
+ // No special flags.
+ NO_OBJECT_TO_DOUBLE_FLAGS = 0,
+ // Object is known to be a non smi.
+ OBJECT_NOT_SMI = 1 << 0,
+ // Don't load NaNs or infinities, branch to the non number case instead.
+ AVOID_NANS_AND_INFINITIES = 1 << 1
+};
-enum InvokeJSFlags {
- CALL_JS,
- JUMP_JS
+// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
+enum BranchDelaySlot {
+ USE_DELAY_SLOT,
+ PROTECT
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- MacroAssembler(void* buffer, int size);
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
+
+// Arguments macros.
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_ARGS cond, r1, r2
+
+// Prototypes.
+
+// Prototypes for functions with no target (eg Ret()).
+#define DECLARE_NOTARGET_PROTOTYPE(Name) \
+ void Name(BranchDelaySlot bd = PROTECT); \
+ void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, COND_TYPED_ARGS) { \
+ Name(COND_ARGS, bd); \
+ }
- // Jump, Call, and Ret pseudo instructions implementing inter-working.
- void Jump(const Operand& target,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(const Operand& target,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Jump(Register target,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Jump(byte* target, RelocInfo::Mode rmode,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Jump(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(Register target,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(byte* target, RelocInfo::Mode rmode,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Ret(Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Branch(Condition cond, int16_t offset, Register rs = zero_reg,
- const Operand& rt = Operand(zero_reg), Register scratch = at);
- void Branch(Condition cond, Label* L, Register rs = zero_reg,
- const Operand& rt = Operand(zero_reg), Register scratch = at);
- // conditionnal branch and link
- void BranchAndLink(Condition cond, int16_t offset, Register rs = zero_reg,
- const Operand& rt = Operand(zero_reg),
- Register scratch = at);
- void BranchAndLink(Condition cond, Label* L, Register rs = zero_reg,
- const Operand& rt = Operand(zero_reg),
- Register scratch = at);
+// Prototypes for functions with a target.
+
+// Cases when relocation may be needed.
+#define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
+ void Name(target_type target, \
+ RelocInfo::Mode rmode, \
+ BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, \
+ target_type target, \
+ RelocInfo::Mode rmode) { \
+ Name(target, rmode, bd); \
+ } \
+ void Name(target_type target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, \
+ target_type target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS) { \
+ Name(target, rmode, COND_ARGS, bd); \
+ }
+
+// Cases when relocation is not needed.
+#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
+ void Name(target_type target, BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, target_type target) { \
+ Name(target, bd); \
+ } \
+ void Name(target_type target, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, \
+ target_type target, \
+ COND_TYPED_ARGS) { \
+ Name(target, COND_ARGS, bd); \
+ }
+
+// Target prototypes.
+
+#define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
+ DECLARE_NORELOC_PROTOTYPE(Name, Register) \
+ DECLARE_NORELOC_PROTOTYPE(Name, const Operand&) \
+ DECLARE_RELOC_PROTOTYPE(Name, byte*) \
+ DECLARE_RELOC_PROTOTYPE(Name, Handle<Code>)
+
+#define DECLARE_BRANCH_PROTOTYPES(Name) \
+ DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
+ DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
+
+
+DECLARE_JUMP_CALL_PROTOTYPES(Jump)
+DECLARE_JUMP_CALL_PROTOTYPES(Call)
+
+DECLARE_BRANCH_PROTOTYPES(Branch)
+DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
+
+DECLARE_NOTARGET_PROTOTYPE(Ret)
+
+#undef COND_TYPED_ARGS
+#undef COND_ARGS
+#undef DECLARE_NOTARGET_PROTOTYPE
+#undef DECLARE_NORELOC_PROTOTYPE
+#undef DECLARE_RELOC_PROTOTYPE
+#undef DECLARE_JUMP_CALL_PROTOTYPES
+#undef DECLARE_BRANCH_PROTOTYPES
+
+ void CallWithAstId(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond = al,
+ Register r1 = zero_reg,
+ const Operand& r2 = Operand(zero_reg));
+
+ int CallSize(Register reg);
+ int CallSize(Handle<Code> code, RelocInfo::Mode rmode);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
- void Drop(int count, Condition cond = cc_always);
+ void Drop(int count,
+ Condition cond = cc_always,
+ Register reg = no_reg,
+ const Operand& op = Operand(no_reg));
+
+ void DropAndRet(int drop = 0,
+ Condition cond = cc_always,
+ Register reg = no_reg,
+ const Operand& op = Operand(no_reg));
+
+ // Swap two registers. If the scratch register is omitted then a slightly
+ // less efficient form using xor instead of mov is emitted.
+ void Swap(Register reg1, Register reg2, Register scratch = no_reg);
void Call(Label* target);
+ inline void Move(Register dst, Register src) {
+ if (!dst.is(src)) {
+ mov(dst, src);
+ }
+ }
+
+ inline void Move(FPURegister dst, FPURegister src) {
+ if (!dst.is(src)) {
+ mov_d(dst, src);
+ }
+ }
+
+ inline void Move(Register dst_low, Register dst_high, FPURegister src) {
+ mfc1(dst_low, src);
+ mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+ }
+
+ inline void Move(FPURegister dst, Register src_low, Register src_high) {
+ mtc1(src_low, dst);
+ mtc1(src_high, FPURegister::from_code(dst.code() + 1));
+ }
+
// Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in
// CodeGenerator::ProcessDeferred().
// Currently the branch delay slot is filled by the MacroAssembler.
// Use rather b(Label) for code generation.
void jmp(Label* L) {
- Branch(cc_always, L);
+ Branch(L);
}
// Load an object from the root table.
@@ -116,19 +246,164 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
- // Load an external reference.
- void LoadExternalReference(Register reg, ExternalReference ext) {
- li(reg, Operand(ext));
+ // Store an object to the root table.
+ void StoreRoot(Register source,
+ Heap::RootListIndex index);
+ void StoreRoot(Register source,
+ Heap::RootListIndex index,
+ Condition cond, Register src1, const Operand& src2);
+
+
+ // Check if object is in new space.
+ // scratch can be object itself, but it will be clobbered.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc, // eq for new space, ne otherwise.
+ Label* branch);
+
+
+ // For the page containing |object| mark the region covering [address]
+ // dirty. The object address must be in the first 8K of an allocated page.
+ void RecordWriteHelper(Register object,
+ Register address,
+ Register scratch);
+
+ // For the page containing |object| mark the region covering
+ // [object+offset] dirty. The object address must be in the first 8K
+ // of an allocated page. The 'scratch' registers are used in the
+ // implementation and all 3 registers are clobbered by the
+ // operation, as well as the 'at' register. RecordWrite updates the
+ // write barrier even when storing smis.
+ void RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1);
+
+ // For the page containing |object| mark the region covering
+ // [address] dirty. The object address must be in the first 8K of an
+ // allocated page. All 3 registers are clobbered by the operation,
+ // as well as the ip register. RecordWrite updates the write barrier
+ // even when storing smis.
+ void RecordWrite(Register object,
+ Register address,
+ Register scratch);
+
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support.
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss);
+
+ inline void MarkCode(NopMarkerTypes type) {
+ nop(type);
}
- // Sets the remembered set bit for [address+offset].
- void RecordWrite(Register object, Register offset, Register scratch);
+ // Check if the given instruction is a 'type' marker.
+ // ie. check if it is a sll zero_reg, zero_reg, <type> (referenced as
+ // nop(type)). These instructions are generated to mark special location in
+ // the code, like some special IC code.
+ static inline bool IsMarkedCode(Instr instr, int type) {
+ ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+ return IsNop(instr, type);
+ }
+ static inline int GetCodeMarker(Instr instr) {
+ uint32_t opcode = ((instr & kOpcodeMask));
+ uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
+ uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
+ uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
+
+ // Return <n> if we have a sll zero_reg, zero_reg, n
+ // else return -1.
+ bool sllzz = (opcode == SLL &&
+ rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ rs == static_cast<uint32_t>(ToNumber(zero_reg)));
+ int type =
+ (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
+ ASSERT((type == -1) ||
+ ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
+ return type;
+ }
+
+
+
+ // ---------------------------------------------------------------------------
+ // Allocation support.
+
+ // Allocate an object in new space. The object_size is specified
+ // either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. If the new space is exhausted control continues at the
+ // gc_required label. The allocated object is returned in result. If
+ // the flag tag_allocated_object is true the result is tagged as as
+ // a heap object. All registers are clobbered also when control
+ // continues at the gc_required label.
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. The caller must make sure that no pointers
+ // are left to the object(s) no longer allocated as they would be invalid when
+ // allocation is undone.
+ void UndoAllocationInNewSpace(Register object, Register scratch);
+
+
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocates a heap number or jumps to the gc_required label if the young
+ // space is full and a scavenge is needed. All registers are clobbered also
+ // when control continues at the gc_required label.
+ void AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* gc_required);
+ void AllocateHeapNumberWithValue(Register result,
+ FPURegister value,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
- // Instruction macros
+ // Instruction macros.
-#define DEFINE_INSTRUCTION(instr) \
+#define DEFINE_INSTRUCTION(instr) \
void instr(Register rd, Register rs, const Operand& rt); \
void instr(Register rd, Register rs, Register rt) { \
instr(rd, rs, Operand(rt)); \
@@ -137,7 +412,7 @@ class MacroAssembler: public Assembler {
instr(rs, rt, Operand(j)); \
}
-#define DEFINE_INSTRUCTION2(instr) \
+#define DEFINE_INSTRUCTION2(instr) \
void instr(Register rs, const Operand& rt); \
void instr(Register rs, Register rt) { \
instr(rs, Operand(rt)); \
@@ -146,8 +421,8 @@ class MacroAssembler: public Assembler {
instr(rs, Operand(j)); \
}
- DEFINE_INSTRUCTION(Add);
DEFINE_INSTRUCTION(Addu);
+ DEFINE_INSTRUCTION(Subu);
DEFINE_INSTRUCTION(Mul);
DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Multu);
@@ -158,46 +433,75 @@ class MacroAssembler: public Assembler {
DEFINE_INSTRUCTION(Or);
DEFINE_INSTRUCTION(Xor);
DEFINE_INSTRUCTION(Nor);
+ DEFINE_INSTRUCTION2(Neg);
DEFINE_INSTRUCTION(Slt);
DEFINE_INSTRUCTION(Sltu);
+ // MIPS32 R2 instruction macro.
+ DEFINE_INSTRUCTION(Ror);
+
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
- //------------Pseudo-instructions-------------
+ // ---------------------------------------------------------------------------
+ // Pseudo-instructions.
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
- // Move the logical ones complement of source to dest.
- void movn(Register rd, Register rt);
-
- // load int32 in the rd register
+ // Load int32 in the rd register.
void li(Register rd, Operand j, bool gen2instr = false);
inline void li(Register rd, int32_t j, bool gen2instr = false) {
li(rd, Operand(j), gen2instr);
}
-
- // Exception-generating instructions and debugging support
- void stop(const char* msg);
-
+ inline void li(Register dst, Handle<Object> value, bool gen2instr = false) {
+ li(dst, Operand(value), gen2instr);
+ }
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
- // saved in higher memory addresses
+ // saved in higher memory addresses.
void MultiPush(RegList regs);
void MultiPushReversed(RegList regs);
- void Push(Register src) {
+
+ // Lower case push() for compatibility with arch-independent code.
+ void push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
- inline void push(Register src) { Push(src); }
+
+ // Push a handle.
+ void Push(Handle<Object> handle);
+
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
+ Subu(sp, sp, Operand(2 * kPointerSize));
+ sw(src1, MemOperand(sp, 1 * kPointerSize));
+ sw(src2, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ Subu(sp, sp, Operand(3 * kPointerSize));
+ sw(src1, MemOperand(sp, 2 * kPointerSize));
+ sw(src2, MemOperand(sp, 1 * kPointerSize));
+ sw(src3, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ Subu(sp, sp, Operand(4 * kPointerSize));
+ sw(src1, MemOperand(sp, 3 * kPointerSize));
+ sw(src2, MemOperand(sp, 2 * kPointerSize));
+ sw(src3, MemOperand(sp, 1 * kPointerSize));
+ sw(src4, MemOperand(sp, 0 * kPointerSize));
+ }
void Push(Register src, Condition cond, Register tst1, Register tst2) {
- // Since we don't have conditionnal execution we use a Branch.
- Branch(cond, 3, tst1, Operand(tst2));
- Addu(sp, sp, Operand(-kPointerSize));
+ // Since we don't have conditional execution we use a Branch.
+ Branch(3, cond, tst1, Operand(tst2));
+ Subu(sp, sp, Operand(kPointerSize));
sw(src, MemOperand(sp, 0));
}
@@ -205,137 +509,365 @@ class MacroAssembler: public Assembler {
// registers specified in regs. Pop order is the opposite as in MultiPush.
void MultiPop(RegList regs);
void MultiPopReversed(RegList regs);
- void Pop(Register dst) {
+
+ // Lower case pop() for compatibility with arch-independent code.
+ void pop(Register dst) {
lw(dst, MemOperand(sp, 0));
Addu(sp, sp, Operand(kPointerSize));
}
- void Pop() {
- Add(sp, sp, Operand(kPointerSize));
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ ASSERT(!src1.is(src2));
+ lw(src2, MemOperand(sp, 0 * kPointerSize));
+ lw(src1, MemOperand(sp, 1 * kPointerSize));
+ Addu(sp, sp, 2 * kPointerSize);
}
+ void Pop(uint32_t count = 1) {
+ Addu(sp, sp, Operand(count * kPointerSize));
+ }
- // ---------------------------------------------------------------------------
- // Activation frames
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters();
+ void PopSafepointRegisters();
+ void PushSafepointRegistersAndDoubles();
+ void PopSafepointRegistersAndDoubles();
+ // Store value in register src in the safepoint stack slot for
+ // register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst);
+ void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
+
+ // MIPS32 R2 instruction macro.
+ void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ // Convert unsigned word to double.
+ void Cvt_d_uw(FPURegister fd, FPURegister fs);
+ void Cvt_d_uw(FPURegister fd, Register rs);
+
+ // Convert double to unsigned word.
+ void Trunc_uw_d(FPURegister fd, FPURegister fs);
+ void Trunc_uw_d(FPURegister fd, Register rs);
+
+ // Convert the HeapNumber pointed to by source to a 32bits signed integer
+ // dest. If the HeapNumber does not fit into a 32bits signed integer branch
+ // to not_int32 label. If FPU is available double_scratch is used but not
+ // scratch2.
+ void ConvertToInt32(Register source,
+ Register dest,
+ Register scratch,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label *not_int32);
+
+ // Helper for EmitECMATruncate.
+ // This will truncate a floating-point value outside of the singed 32bit
+ // integer range to a 32bit signed integer.
+ // Expects the double value loaded in input_high and input_low.
+ // Exits with the answer in 'result'.
+ // Note that this code does not work for values in the 32bit range!
+ void EmitOutOfInt32RangeTruncate(Register result,
+ Register input_high,
+ Register input_low,
+ Register scratch);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer and all other registers clobbered.
+ void EmitECMATruncate(Register result,
+ FPURegister double_input,
+ FPURegister single_scratch,
+ Register scratch,
+ Register scratch2,
+ Register scratch3);
+
+ // -------------------------------------------------------------------------
+ // Activation frames.
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
- // Enter specific kind of exit frame; either EXIT or
- // EXIT_DEBUG. Expects the number of arguments in register a0 and
- // the builtin function to call in register a1.
- // On output hold_argc, hold_function, and hold_argv are setup.
- void EnterExitFrame(ExitFrame::Mode mode,
- Register hold_argc,
- Register hold_argv,
- Register hold_function);
+ void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+ void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
- // Leave the current exit frame. Expects the return value in v0.
- void LeaveExitFrame(ExitFrame::Mode mode);
+ // Enter exit frame.
+ // argc - argument count to be dropped by LeaveExitFrame.
+ // save_doubles - saves FPU registers on stack, currently disabled.
+ // stack_space - extra stack space.
+ void EnterExitFrame(bool save_doubles,
+ int stack_space = 0);
- // Align the stack by optionally pushing a Smi zero.
- void AlignStack(int offset);
+ // Leave the current exit frame.
+ void LeaveExitFrame(bool save_doubles, Register arg_count);
- void SetupAlignedCall(Register scratch, int arg_count = 0);
- void ReturnFromAlignedCall();
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+ // Make sure the stack is aligned. Only emits code in debug mode.
+ void AssertStackIsAligned();
- // ---------------------------------------------------------------------------
- // JavaScript invokes
+ void LoadContext(Register dst, int context_chain_length);
+
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch);
+
+ // -------------------------------------------------------------------------
+ // JavaScript invokes.
+
+ // Setup call kind marking in t1. The method takes t1 as an
+ // explicit first parameter to make the code more readable at the
+ // call sites.
+ void SetCallKind(Register dst, CallKind kind);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
- InvokeFlag flag);
+ InvokeFlag flag,
+ CallKind call_kind);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
+
+ void InvokeFunction(JSFunction* function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ CallKind call_kind);
+
+ void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ void IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail);
+
+ void IsObjectJSStringType(Register object,
+ Register scratch,
+ Label* fail);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void SaveRegistersToMemory(RegList regs);
- void RestoreRegistersFromMemory(RegList regs);
- void CopyRegistersFromMemoryToStack(Register base, RegList regs);
- void CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs);
+ // -------------------------------------------------------------------------
+ // Debugger Support.
+
void DebugBreak();
#endif
- // ---------------------------------------------------------------------------
- // Exception handling
+ // -------------------------------------------------------------------------
+ // Exception handling.
// Push a new try handler and link into try handler chain.
// The return address must be passed in register ra.
+ // Clobber t0, t1, t2.
void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain.
// Must preserve the result register.
void PopTryHandler();
+ // Passes thrown value (in v0) to the handler of top of the try handler chain.
+ void Throw(Register value);
- // ---------------------------------------------------------------------------
+ // Propagates an uncatchable exception to the top of the current JS stack's
+ // handler chain.
+ void ThrowUncatchable(UncatchableExceptionType type, Register value);
+
+ // Copies a fixed number of fields of heap objects from src to dst.
+ void CopyFields(Register dst, Register src, RegList temps, int field_count);
+
+ // Copies a number of bytes from src to dst. All registers are clobbered. On
+ // exit src and dst will point to the place just after where the last byte was
+ // read or written and length will be zero.
+ void CopyBytes(Register src,
+ Register dst,
+ Register length,
+ Register scratch);
+
+ // -------------------------------------------------------------------------
// Support functions.
+ // Try to get function prototype of a function and puts the value in
+ // the result register. Checks that the function really is a
+ // function and jumps to the miss label if the fast checks fail. The
+ // function register will be untouched; the other registers may be
+ // clobbered.
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss);
+
void GetObjectType(Register function,
Register map,
Register type_reg);
- inline void BranchOnSmi(Register value, Label* smi_label,
- Register scratch = at) {
- ASSERT_EQ(0, kSmiTag);
- andi(scratch, value, kSmiTagMask);
- Branch(eq, smi_label, scratch, Operand(zero_reg));
- }
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if the map of an object is equal to a specified map (either
+ // given directly or as an index into the root list) and branch to
+ // label if not. Skip the smi check if not required (object is known
+ // to be a heap object).
+ void CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ void CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
+ // Generates code for reporting that an illegal operation has
+ // occurred.
+ void IllegalOperation(int num_arguments);
- inline void BranchOnNotSmi(Register value, Label* not_smi_label,
- Register scratch = at) {
- ASSERT_EQ(0, kSmiTag);
- andi(scratch, value, kSmiTagMask);
- Branch(ne, not_smi_label, scratch, Operand(zero_reg));
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
+ // Get the number of least significant bits from a register.
+ void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+ void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
+
+ // Load the value of a number object into a FPU double register. If the
+ // object is not a number a jump to the label not_number is performed
+ // and the FPU double register is unchanged.
+ void ObjectToDoubleFPURegister(
+ Register object,
+ FPURegister value,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* not_number,
+ ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
+
+ // Load the value of a smi object into a FPU double register. The register
+ // scratch1 can be the same register as smi in which case smi will hold the
+ // untagged value afterwards.
+ void SmiToDoubleFPURegister(Register smi,
+ FPURegister value,
+ Register scratch1);
+
+ // -------------------------------------------------------------------------
+ // Overflow handling functions.
+ // Usage: first call the appropriate arithmetic function, then call one of the
+ // jump functions with the overflow_dst register as the second parameter.
+
+ void AdduAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch = at);
+
+ void SubuAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch = at);
+
+ void BranchOnOverflow(Label* label,
+ Register overflow_check,
+ BranchDelaySlot bd = PROTECT) {
+ Branch(label, lt, overflow_check, Operand(zero_reg), bd);
}
- void CallBuiltin(ExternalReference builtin_entry);
- void CallBuiltin(Register target);
- void JumpToBuiltin(ExternalReference builtin_entry);
- void JumpToBuiltin(Register target);
+ void BranchOnNoOverflow(Label* label,
+ Register overflow_check,
+ BranchDelaySlot bd = PROTECT) {
+ Branch(label, ge, overflow_check, Operand(zero_reg), bd);
+ }
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
+ void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+ Ret(lt, overflow_check, Operand(zero_reg), bd);
+ }
+ void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+ Ret(ge, overflow_check, Operand(zero_reg), bd);
+ }
- // ---------------------------------------------------------------------------
- // Runtime calls
+ // -------------------------------------------------------------------------
+ // Runtime calls.
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void CallJSExitStub(CodeStub* stub);
- // Return from a code stub after popping its arguments.
- void StubReturn(int argc);
+ // Call a code stub and return the code object called. Try to generate
+ // the code if necessary. Do not perform a GC but instead return a retry
+ // after GC failure.
+ MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub,
+ Condition cond = cc_always,
+ Register r1 = zero_reg,
+ const Operand& r2 =
+ Operand(zero_reg));
+
+ // Tail call a code stub (jump).
+ void TailCallStub(CodeStub* stub);
+
+ // Tail call a code stub (jump) and return the code object called. Try to
+ // generate the code if necessary. Do not perform a GC but instead return
+ // a retry after GC failure.
+ MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
+ Condition cond = cc_always,
+ Register r1 = zero_reg,
+ const Operand& r2 =
+ Operand(zero_reg));
+
+ void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
- void CallRuntime(Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+ // Convenience function: call an external reference.
+ void CallExternalReference(const ExternalReference& ext,
+ int num_arguments);
+
// Tail call of a runtime routine (jump).
// Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
@@ -343,40 +875,85 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
+ // Tail call of a runtime routine (jump). Try to generate the code if
+ // necessary. Do not perform a GC but instead return a retry after GC
+ // failure.
+ MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+ const ExternalReference& ext, int num_arguments, int result_size);
+
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
+ // Before calling a C-function from generated code, align arguments on stack
+ // and add space for the four mips argument slots.
+ // After aligning the frame, non-register arguments must be stored on the
+ // stack, after the argument-slots using helper: CFunctionArgumentOperand().
+ // The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_arguments, Register scratch);
+
+ // Arguments 1-4 are placed in registers a0 thru a3 respectively.
+ // Arguments 5..n are stored to stack using following:
+ // sw(t0, CFunctionArgumentOperand(5));
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, Register scratch, int num_arguments);
+ void GetCFunctionDoubleResult(const DoubleRegister dst);
+
+ // There are two ways of passing double arguments on MIPS, depending on
+ // whether soft or hard floating point ABI is used. These functions
+ // abstract parameter passing for the three different ways we call
+ // C functions from generated code.
+ void SetCallCDoubleArguments(DoubleRegister dreg);
+ void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
+ void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Restores context.
+ MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
+ int stack_space);
+
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin);
+ MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
+
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
- // setup the function in r1.
+ // setup the function in a1.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
struct Unresolved {
int pc;
- uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
+ uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
const char* name;
};
- List<Unresolved>* unresolved() { return &unresolved_; }
- Handle<Object> CodeObject() { return code_object_; }
-
-
- // ---------------------------------------------------------------------------
- // Stack limit support
-
- void StackLimitCheck(Label* on_stack_limit_hit);
+ Handle<Object> CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+ }
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
+ // -------------------------------------------------------------------------
+ // StatsCounter support.
void SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
@@ -386,12 +963,14 @@ class MacroAssembler: public Assembler {
Register scratch1, Register scratch2);
- // ---------------------------------------------------------------------------
- // Debugging
+ // -------------------------------------------------------------------------
+ // Debugging.
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg, Register rs, Operand rt);
+ void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
+ void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg, Register rs, Operand rt);
@@ -405,17 +984,157 @@ class MacroAssembler: public Assembler {
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
- private:
- List<Unresolved> unresolved_;
- bool generating_stub_;
- bool allow_stub_calls_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
+ // ---------------------------------------------------------------------------
+ // Number utilities.
+
+ // Check whether the value of reg is a power of two and not zero. If not
+ // control continues at the label not_power_of_two. If reg is a power of two
+ // the register scratch contains the value of (reg - 1) when control falls
+ // through.
+ void JumpIfNotPowerOfTwoOrZero(Register reg,
+ Register scratch,
+ Label* not_power_of_two_or_zero);
+
+ // -------------------------------------------------------------------------
+ // Smi utilities.
+
+ // Try to convert int32 to smi. If the value is to large, preserve
+ // the original value and jump to not_a_smi. Destroys scratch and
+ // sets flags.
+ // This is only used by crankshaft atm so it is unimplemented on MIPS.
+ void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ void SmiTag(Register reg) {
+ Addu(reg, reg, reg);
+ }
+ void SmiTag(Register dst, Register src) {
+ Addu(dst, src, src);
+ }
+
+ void SmiUntag(Register reg) {
+ sra(reg, reg, kSmiTagSize);
+ }
+
+ void SmiUntag(Register dst, Register src) {
+ sra(dst, src, kSmiTagSize);
+ }
+
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value, Label* smi_label,
+ Register scratch = at) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(smi_label, eq, scratch, Operand(zero_reg));
+ }
+
+ // Jump if the register contains a non-smi.
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label,
+ Register scratch = at) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(not_smi_label, ne, scratch, Operand(zero_reg));
+ }
+
+ // Jump if either of the registers contain a non-smi.
+ void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+ // Jump if either of the registers contain a smi.
+ void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+ // Abort execution if argument is a smi. Used in debug code.
+ void AbortIfSmi(Register object);
+ void AbortIfNotSmi(Register object);
+
+ // Abort execution if argument is a string. Used in debug code.
+ void AbortIfNotString(Register object);
+
+ // Abort execution if argument is not the root value with the given index.
+ void AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
+
+ // ---------------------------------------------------------------------------
+ // HeapNumber utilities.
+
+ void JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number);
+
+ // -------------------------------------------------------------------------
+ // String utilities.
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Check if instance type is sequential ASCII string and jump to label if
+ // it is not.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure);
+
+ // Test that both first and second are sequential ASCII strings.
+ // Assume that they are non-smis.
+ void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Test that both first and second are sequential ASCII strings.
+ // Check that they are non-smis.
+ void JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ void LoadInstanceDescriptors(Register map, Register descriptors);
+
+ private:
+ void CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
+ int num_arguments);
+
+ void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
+ void BranchShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+ void BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void J(Label* L, BranchDelaySlot bdslot);
+ void Jr(Label* L, BranchDelaySlot bdslot);
+ void Jalr(Label* L, BranchDelaySlot bdslot);
+
+ void Jump(intptr_t target, RelocInfo::Mode rmode,
+ BranchDelaySlot bd = PROTECT);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
+ BranchDelaySlot bd = PROTECT);
+ void Call(intptr_t target, RelocInfo::Mode rmode,
+ BranchDelaySlot bd = PROTECT);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
+ BranchDelaySlot bd = PROTECT);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@@ -423,28 +1142,102 @@ class MacroAssembler: public Assembler {
Handle<Code> code_constant,
Register code_reg,
Label* done,
- InvokeFlag flag);
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
- // EnterFrame clobbers t0 and t1.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+
+ void InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2);
+
+ // Compute memory operands for safepoint stack slots.
+ static int SafepointRegisterStackIndex(int reg_code);
+ MemOperand SafepointRegisterSlot(Register reg);
+ MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+
+ bool UseAbsoluteCodePointers();
+
+ bool generating_stub_;
+ bool allow_stub_calls_;
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+
+ // Needs access to SafepointRegisterStackIndex for optimized frame
+ // traversal.
+ friend class OptimizedFrame;
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+ CodePatcher(byte* address, int instructions);
+ virtual ~CodePatcher();
+
+ // Macro assembler to emit code.
+ MacroAssembler* masm() { return &masm_; }
+
+ // Emit an instruction directly.
+ void Emit(Instr instr);
+
+ // Emit an address directly.
+ void Emit(Address addr);
+
+ // Change the condition part of an instruction leaving the rest of the current
+ // instruction unchanged.
+ void ChangeBranchCondition(Condition cond);
+
+ private:
+ byte* address_; // The address of the code being patched.
+ int instructions_; // Number of instructions of the expected patch size.
+ int size_; // Number of bytes of the expected patch size.
+ MacroAssembler masm_; // Macro assembler used to generate the code.
};
// -----------------------------------------------------------------------------
// Static helper functions.
+static MemOperand ContextOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+static inline MemOperand GlobalObjectOperand() {
+ return ContextOperand(cp, Context::GLOBAL_INDEX);
+}
+
+
// Generate a MemOperand for loading a field from an object.
static inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+static inline MemOperand CFunctionArgumentOperand(int index) {
+ ASSERT(index > StandardFrameConstants::kCArgSlotCount);
+ // Argument 5 takes the slot just past the four Arg-slots.
+ int offset =
+ (index - 5) * kPointerSize + StandardFrameConstants::kCArgsSlotsSize;
+ return MemOperand(sp, offset);
+}
+
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
@@ -458,4 +1251,3 @@ static inline MemOperand FieldMemOperand(Register object, int offset) {
} } // namespace v8::internal
#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
new file mode 100644
index 000000000..cfc8f651c
--- /dev/null
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -0,0 +1,1251 @@
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
+#include "unicode.h"
+#include "log.h"
+#include "code-stubs.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "mips/regexp-macro-assembler-mips.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - t1 : Pointer to current code object (Code*) including heap object tag.
+ * - t2 : Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - t3 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - t4 : points to tip of backtrack stack
+ * - t5 : Unused.
+ * - t6 : End of input (points to byte after last character in input).
+ * - fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - sp : points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *
+ * - fp[56] direct_call (if 1, direct call from JavaScript code,
+ * if 0, call through the runtime system).
+ * - fp[52] stack_area_base (High end of the memory area to use as
+ * backtracking stack).
+ * - fp[48] int* capture_array (int[num_saved_registers_], for output).
+ * - fp[44] secondary link/return address used by native call.
+ * --- sp when called ---
+ * - fp[40] return address (lr).
+ * - fp[36] old frame pointer (r11).
+ * - fp[0..32] backup of registers s0..s7.
+ * --- frame pointer ----
+ * - fp[-4] end of input (Address of end of string).
+ * - fp[-8] start of input (Address of first character in string).
+ * - fp[-12] start index (character index of start).
+ * - fp[-16] void* input_string (location of a handle containing the string).
+ * - fp[-20] Offset of location before start of input (effectively character
+ * position -1). Used to initialize capture registers to a
+ * non-position.
+ * - fp[-24] At start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - fp[-28] register 0 (Only positions must be stored in the first
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * Address secondary_return_address, // Only used by native call.
+ * int* capture_output_array,
+ * byte* stack_area_base,
+ * bool direct_call = false)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in mips/simulator-mips.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the ra register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
+ Mode mode,
+ int registers_to_save)
+ : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_(),
+ internal_failure_label_() {
+ ASSERT_EQ(0, registers_to_save % 2);
+ __ jmp(&entry_label_); // We'll write the entry code later.
+ // If the code gets too big or corrupted, an internal exception will be
+ // raised, and we will exit right away.
+ __ bind(&internal_failure_label_);
+ __ li(v0, Operand(FAILURE));
+ __ Ret();
+ __ bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+ internal_failure_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerMIPS::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ Addu(current_input_offset(),
+ current_input_offset(), Operand(by * char_size()));
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
+ ASSERT(reg >= 0);
+ ASSERT(reg < num_registers_);
+ if (by != 0) {
+ __ lw(a0, register_location(reg));
+ __ Addu(a0, a0, Operand(by));
+ __ sw(a0, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::Backtrack() {
+ CheckPreemption();
+ // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ Pop(a0);
+ __ Addu(a0, a0, code_pointer());
+ __ Jump(Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::Bind(Label* label) {
+ __ bind(label);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
+ BranchOrBacktrack(on_equal, eq, current_character(), Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
+ BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the string at all?
+ __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+ BranchOrBacktrack(&not_at_start, eq, a0, Operand(zero_reg));
+
+ // If we did, are we still at the start of the input?
+ __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+ __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+ BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
+ __ bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the string at all?
+ __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+ BranchOrBacktrack(on_not_at_start, eq, a0, Operand(zero_reg));
+ // If we did, are we still at the start of the input?
+ __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+ __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
+ BranchOrBacktrack(on_less, lt, current_character(), Operand(limit));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string) {
+ if (on_failure == NULL) {
+ // Instead of inlining a backtrack for each test, (re)use the global
+ // backtrack target.
+ on_failure = &backtrack_label_;
+ }
+
+ if (check_end_of_string) {
+ // Is last character of required match inside string.
+ CheckPosition(cp_offset + str.length() - 1, on_failure);
+ }
+
+ __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+ if (cp_offset != 0) {
+ int byte_offset = cp_offset * char_size();
+ __ Addu(a0, a0, Operand(byte_offset));
+ }
+
+ // a0 : Address of characters to match against str.
+ int stored_high_byte = 0;
+ for (int i = 0; i < str.length(); i++) {
+ if (mode_ == ASCII) {
+ __ lbu(a1, MemOperand(a0, 0));
+ __ addiu(a0, a0, char_size());
+ ASSERT(str[i] <= String::kMaxAsciiCharCode);
+ BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
+ } else {
+ __ lhu(a1, MemOperand(a0, 0));
+ __ addiu(a0, a0, char_size());
+ uc16 match_char = str[i];
+ int match_high_byte = (match_char >> 8);
+ if (match_high_byte == 0) {
+ BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
+ } else {
+ if (match_high_byte != stored_high_byte) {
+ __ li(a2, Operand(match_high_byte));
+ stored_high_byte = match_high_byte;
+ }
+ __ Addu(a3, a2, Operand(match_char & 0xff));
+ BranchOrBacktrack(on_failure, ne, a1, Operand(a3));
+ }
+ }
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
+ Label backtrack_non_equal;
+ __ lw(a0, MemOperand(backtrack_stackpointer(), 0));
+ __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
+ __ Addu(backtrack_stackpointer(),
+ backtrack_stackpointer(),
+ Operand(kPointerSize));
+ __ bind(&backtrack_non_equal);
+ BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ __ lw(a0, register_location(start_reg)); // Index of start of capture.
+ __ lw(a1, register_location(start_reg + 1)); // Index of end of capture.
+ __ Subu(a1, a1, a0); // Length of capture.
+
+ // If length is zero, either the capture is empty or it is not participating.
+ // In either case succeed immediately.
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ __ Addu(t5, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+
+ if (mode_ == ASCII) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+ __ Addu(a0, a0, Operand(end_of_input_address()));
+ __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
+ __ Addu(a1, a0, Operand(a1));
+
+ // a0 - Address of start of capture.
+ // a1 - Address of end of capture.
+ // a2 - Address of current input position.
+
+ Label loop;
+ __ bind(&loop);
+ __ lbu(a3, MemOperand(a0, 0));
+ __ addiu(a0, a0, char_size());
+ __ lbu(t0, MemOperand(a2, 0));
+ __ addiu(a2, a2, char_size());
+
+ __ Branch(&loop_check, eq, t0, Operand(a3));
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case.
+ __ Or(t0, t0, Operand(0x20)); // Also convert input character.
+ __ Branch(&fail, ne, t0, Operand(a3));
+ __ Subu(a3, a3, Operand('a'));
+ __ Branch(&fail, hi, a3, Operand('z' - 'a')); // Is a3 a lowercase letter?
+
+ __ bind(&loop_check);
+ __ Branch(&loop, lt, a0, Operand(a1));
+ __ jmp(&success);
+
+ __ bind(&fail);
+ GoTo(on_no_match);
+
+ __ bind(&success);
+ // Compute new value of character position after the matched part.
+ __ Subu(current_input_offset(), a2, end_of_input_address());
+ } else {
+ ASSERT(mode_ == UC16);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() | backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+
+ int argument_count = 4;
+ __ PrepareCallCFunction(argument_count, a2);
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // a0: Address byte_offset1 - Address captured substring's start.
+ // a1: Address byte_offset2 - Address of current character position.
+ // a2: size_t byte_length - length of capture in bytes(!).
+ // a3: Isolate* isolate.
+
+ // Address of start of capture.
+ __ Addu(a0, a0, Operand(end_of_input_address()));
+ // Length of capture.
+ __ mov(a2, a1);
+ // Save length in callee-save register for use on return.
+ __ mov(s3, a1);
+ // Address of current input position.
+ __ Addu(a1, current_input_offset(), Operand(end_of_input_address()));
+ // Isolate.
+ __ li(a3, Operand(ExternalReference::isolate_address()));
+
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ __ CallCFunction(function, argument_count);
+
+ // Restore regexp engine registers.
+ __ MultiPop(regexp_registers_to_retain);
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+
+ // Check if function returned non-zero for success or zero for failure.
+ BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
+ // On success, increment position by length of capture.
+ __ Addu(current_input_offset(), current_input_offset(), Operand(s3));
+ }
+
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ Label success;
+
+ // Find length of back-referenced capture.
+ __ lw(a0, register_location(start_reg));
+ __ lw(a1, register_location(start_reg + 1));
+ __ Subu(a1, a1, a0); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ __ Addu(t5, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+
+ // Compute pointers to match string and capture string.
+ __ Addu(a0, a0, Operand(end_of_input_address()));
+ __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
+ __ Addu(a1, a1, Operand(a0));
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == ASCII) {
+ __ lbu(a3, MemOperand(a0, 0));
+ __ addiu(a0, a0, char_size());
+ __ lbu(t0, MemOperand(a2, 0));
+ __ addiu(a2, a2, char_size());
+ } else {
+ ASSERT(mode_ == UC16);
+ __ lhu(a3, MemOperand(a0, 0));
+ __ addiu(a0, a0, char_size());
+ __ lhu(t0, MemOperand(a2, 0));
+ __ addiu(a2, a2, char_size());
+ }
+ BranchOrBacktrack(on_no_match, ne, a3, Operand(t0));
+ __ Branch(&loop, lt, a0, Operand(a1));
+
+ // Move current character position to position after match.
+ __ Subu(current_input_offset(), a2, end_of_input_address());
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotRegistersEqual(int reg1,
+ int reg2,
+ Label* on_not_equal) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
+ BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ BranchOrBacktrack(on_equal, eq, a0, Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check.
+ switch (type) {
+ case 's':
+ // Match space-characters.
+ if (mode_ == ASCII) {
+ // ASCII space characters are '\t'..'\r' and ' '.
+ Label success;
+ __ Branch(&success, eq, current_character(), Operand(' '));
+ // Check range 0x09..0x0d.
+ __ Subu(a0, current_character(), Operand('\t'));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand('\r' - '\t'));
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // Match non-space characters.
+ if (mode_ == ASCII) {
+ // ASCII space characters are '\t'..'\r' and ' '.
+ BranchOrBacktrack(on_no_match, eq, current_character(), Operand(' '));
+ __ Subu(a0, current_character(), Operand('\t'));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand('\r' - '\t'));
+ return true;
+ }
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9').
+ __ Subu(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
+ return true;
+ case 'D':
+ // Match non ASCII-digits.
+ __ Subu(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+ __ Subu(a0, a0, Operand(0x0b));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b));
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+ __ Subu(a0, a0, Operand(0x0b));
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
+ } else {
+ Label done;
+ BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b));
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
+ __ bind(&done);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ li(a0, Operand(map));
+ __ Addu(a0, a0, current_character());
+ __ lbu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ Branch(&done, hi, current_character(), Operand('z'));
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ li(a0, Operand(map));
+ __ Addu(a0, a0, current_character());
+ __ lbu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
+ if (mode_ != ASCII) {
+ __ bind(&done);
+ }
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::Fail() {
+ __ li(v0, Operand(FAILURE));
+ __ jmp(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
+ if (masm_->has_exception()) {
+ // If the code gets corrupted due to long regular expressions and lack of
+ // space on trampolines, an internal exception flag is set. If this case
+ // is detected, we will jump into exit sequence right away.
+ __ bind_to(&entry_label_, internal_failure_label_.pos());
+ } else {
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+ // Push arguments
+ // Save callee-save registers.
+ // Start new stack frame.
+ // Store link register in existing stack-cell.
+ // Order here should correspond to order of offset constants in header file.
+ RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() |
+ s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit();
+ RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+ __ MultiPush(argument_registers | registers_to_retain | ra.bit());
+ // Set frame pointer in space for it if this is not a direct call
+ // from generated code.
+ __ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
+ __ push(a0); // Make room for "position - 1" constant (value irrelevant).
+ __ push(a0); // Make room for "at start" constant (value irrelevant).
+
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ lw(a0, MemOperand(a0));
+ __ Subu(a0, sp, a0);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(v0, Operand(EXCEPTION));
+ __ jmp(&exit_label_);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(a0);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
+
+ __ bind(&stack_ok);
+ // Allocate space on stack for registers.
+ __ Subu(sp, sp, Operand(num_registers_ * kPointerSize));
+ // Load string end.
+ __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ // Load input start.
+ __ lw(a0, MemOperand(frame_pointer(), kInputStart));
+ // Find negative length (offset of start relative to end).
+ __ Subu(current_input_offset(), a0, end_of_input_address());
+ // Set a0 to address of char before start of the input string
+ // (effectively string position -1).
+ __ lw(a1, MemOperand(frame_pointer(), kStartIndex));
+ __ Subu(a0, current_input_offset(), Operand(char_size()));
+ __ sll(t5, a1, (mode_ == UC16) ? 1 : 0);
+ __ Subu(a0, a0, t5);
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ mov(t5, a1);
+ __ li(a1, Operand(1));
+ __ movn(a1, zero_reg, t5);
+ __ sw(a1, MemOperand(frame_pointer(), kAtStart));
+
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1.
+
+ // Address of register 0.
+ __ Addu(a1, frame_pointer(), Operand(kRegisterZero));
+ __ li(a2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ sw(a0, MemOperand(a1));
+ __ Addu(a1, a1, Operand(-kPointerSize));
+ __ Subu(a2, a2, Operand(1));
+ __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+ }
+
+ // Initialize backtrack stack pointer.
+ __ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+ // Initialize code pointer register
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+ // Load previous char as initial value of current character register.
+ Label at_start;
+ __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+ __ Branch(&at_start, ne, a0, Operand(zero_reg));
+ LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
+ __ jmp(&start_label_);
+ __ bind(&at_start);
+ __ li(current_character(), Operand('\n'));
+ __ jmp(&start_label_);
+
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // Copy captures to output.
+ __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+ __ lw(a0, MemOperand(frame_pointer(), kRegisterOutput));
+ __ lw(a2, MemOperand(frame_pointer(), kStartIndex));
+ __ Subu(a1, end_of_input_address(), a1);
+ // a1 is length of input in bytes.
+ if (mode_ == UC16) {
+ __ srl(a1, a1, 1);
+ }
+ // a1 is length of input in characters.
+ __ Addu(a1, a1, Operand(a2));
+ // a1 is length of string in characters.
+
+ ASSERT_EQ(0, num_saved_registers_ % 2);
+ // Always an even number of capture registers. This allows us to
+ // unroll the loop once to add an operation between a load of a register
+ // and the following use of that register.
+ for (int i = 0; i < num_saved_registers_; i += 2) {
+ __ lw(a2, register_location(i));
+ __ lw(a3, register_location(i + 1));
+ if (mode_ == UC16) {
+ __ sra(a2, a2, 1);
+ __ Addu(a2, a2, a1);
+ __ sra(a3, a3, 1);
+ __ Addu(a3, a3, a1);
+ } else {
+ __ Addu(a2, a1, Operand(a2));
+ __ Addu(a3, a1, Operand(a3));
+ }
+ __ sw(a2, MemOperand(a0));
+ __ Addu(a0, a0, kPointerSize);
+ __ sw(a3, MemOperand(a0));
+ __ Addu(a0, a0, kPointerSize);
+ }
+ }
+ __ li(v0, Operand(SUCCESS));
+ }
+ // Exit and return v0.
+ __ bind(&exit_label_);
+ // Skip sp past regexp registers and local variables..
+ __ mov(sp, frame_pointer());
+ // Restore registers s0..s7 and return (restoring ra to pc).
+ __ MultiPop(registers_to_retain | ra.bit());
+ __ Ret();
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code.
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() | backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+ CallCheckStackGuardState(a0);
+ __ MultiPop(regexp_registers_to_retain);
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
+
+ // String might have moved: Reload end of string from frame.
+ __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ // Reached if the backtrack-stack limit has been hit.
+ // Put regexp engine registers on stack first.
+ RegList regexp_registers = current_input_offset().bit() |
+ current_character().bit();
+ __ MultiPush(regexp_registers);
+ Label grow_failed;
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, a0);
+ __ mov(a0, backtrack_stackpointer());
+ __ Addu(a1, frame_pointer(), Operand(kStackHighEnd));
+ __ li(a2, Operand(ExternalReference::isolate_address()));
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(masm_->isolate());
+ __ CallCFunction(grow_stack, num_arguments);
+ // Restore regexp registers.
+ __ MultiPop(regexp_registers);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ __ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
+ // Otherwise use return value as new stack pointer.
+ __ mov(backtrack_stackpointer(), v0);
+ // Restore saved registers and continue.
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ li(v0, Operand(EXCEPTION));
+ __ jmp(&exit_label_);
+ }
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code = FACTORY->NewCode(code_desc,
+ Code::ComputeFlags(Code::REGEXP),
+ masm_->CodeObject());
+ LOG(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
+ int comparand,
+ Label* if_ge) {
+ __ lw(a0, register_location(reg));
+ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
+ int comparand,
+ Label* if_lt) {
+ __ lw(a0, register_location(reg));
+ BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
+ Label* if_eq) {
+ __ lw(a0, register_location(reg));
+ BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerMIPS::Implementation() {
+ return kMIPSImplementation;
+}
+
+
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works).
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
+ Pop(a0);
+ __ sw(a0, register_location(register_index));
+}
+
+
+void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ Label after_constant;
+ __ Branch(&after_constant);
+ int offset = masm_->pc_offset();
+ int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+ __ emit(0);
+ masm_->label_at_put(label, offset);
+ __ bind(&after_constant);
+ if (is_int16(cp_offset)) {
+ __ lw(a0, MemOperand(code_pointer(), cp_offset));
+ } else {
+ __ Addu(a0, code_pointer(), cp_offset);
+ __ lw(a0, MemOperand(a0, 0));
+ }
+ }
+ Push(a0);
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ __ lw(a0, register_location(register_index));
+ Push(a0);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
+ __ lw(current_input_offset(), register_location(reg));
+}
+
+
+void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
+ __ lw(backtrack_stackpointer(), register_location(reg));
+ __ lw(a0, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Branch(&after_position,
+ ge,
+ current_input_offset(),
+ Operand(-by * char_size()));
+ __ li(current_input_offset(), -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
+ ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ __ li(a0, Operand(to));
+ __ sw(a0, register_location(register_index));
+}
+
+
+void RegExpMacroAssemblerMIPS::Succeed() {
+ __ jmp(&success_label_);
+}
+
+
+void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ if (cp_offset == 0) {
+ __ sw(current_input_offset(), register_location(reg));
+ } else {
+ __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ __ sw(a0, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
+ ASSERT(reg_from <= reg_to);
+ __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ sw(a0, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
+ __ lw(a1, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Subu(a0, backtrack_stackpointer(), a1);
+ __ sw(a0, register_location(reg));
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, scratch);
+ __ mov(a2, frame_pointer());
+ // Code* of self.
+ __ li(a1, Operand(masm_->CodeObject()));
+ // a0 becomes return address pointer.
+ ExternalReference stack_guard_check =
+ ExternalReference::re_check_stack_guard_state(masm_->isolate());
+ CallCFunctionUsingStub(stack_guard_check, num_arguments);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame) {
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ ASSERT(isolate == Isolate::Current());
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
+ // Prepare for possible GC.
+ HandleScope handles;
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+ // Current string.
+ bool is_ascii = subject->IsAsciiRepresentation();
+
+ ASSERT(re_code->instruction_start() <= *return_address);
+ ASSERT(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ MaybeObject* result = Execution::HandleStackGuardInterrupt();
+
+ if (*code_handle != re_code) { // Return address no longer valid.
+ int delta = *code_handle - re_code;
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ // String might have changed.
+ if (subject->IsAsciiRepresentation() != is_ascii) {
+ // If we changed between an ASCII and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ ASSERT(StringShape(*subject).IsSequential() ||
+ StringShape(*subject).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+ // Find the current start address of the same character at the current string
+ // position.
+ int start_index = frame_entry<int>(re_frame, kStartIndex);
+ const byte* new_address = StringCharacterPosition(*subject, start_index);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+ int byte_length = end_address - start_address;
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
+ frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+ frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ }
+
+ return 0;
+}
+
+
+MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
+ ASSERT(register_index < (1<<30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return MemOperand(frame_pointer(),
+ kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ BranchOrBacktrack(on_outside_input,
+ ge,
+ current_input_offset(),
+ Operand(-cp_offset * char_size()));
+}
+
+
+void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
+ Condition condition,
+ Register rs,
+ const Operand& rt) {
+ if (condition == al) { // Unconditional.
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+ }
+ if (to == NULL) {
+ __ Branch(&backtrack_label_, condition, rs, rt);
+ return;
+ }
+ __ Branch(to, condition, rs, rt);
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeCall(Label* to, Condition cond, Register rs,
+ const Operand& rt) {
+ __ BranchAndLink(to, cond, rs, rt);
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeReturn() {
+ __ pop(ra);
+ __ Addu(t5, ra, Operand(masm_->CodeObject()));
+ __ Jump(t5);
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
+ __ bind(name);
+ __ Subu(ra, ra, Operand(masm_->CodeObject()));
+ __ push(ra);
+}
+
+
+void RegExpMacroAssemblerMIPS::Push(Register source) {
+ ASSERT(!source.is(backtrack_stackpointer()));
+ __ Addu(backtrack_stackpointer(),
+ backtrack_stackpointer(),
+ Operand(-kPointerSize));
+ __ sw(source, MemOperand(backtrack_stackpointer()));
+}
+
+
+void RegExpMacroAssemblerMIPS::Pop(Register target) {
+ ASSERT(!target.is(backtrack_stackpointer()));
+ __ lw(target, MemOperand(backtrack_stackpointer()));
+ __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), kPointerSize);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ lw(a0, MemOperand(a0));
+ SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+
+ __ li(a0, Operand(stack_limit));
+ __ lw(a0, MemOperand(a0));
+ SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
+ ExternalReference function,
+ int num_arguments) {
+ // Must pass all arguments in registers. The stub pushes on the stack.
+ ASSERT(num_arguments <= 4);
+ __ li(code_pointer(), Operand(function));
+ RegExpCEntryStub stub;
+ __ CallStub(&stub);
+ if (OS::ActivationFrameAlignment() != 0) {
+ __ lw(sp, MemOperand(sp, 16));
+ }
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ Register offset = current_input_offset();
+ if (cp_offset != 0) {
+ __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = a0;
+ }
+ // We assume that we cannot do unaligned loads on MIPS, so this function
+ // must only be used to load a single character at a time.
+ ASSERT(characters == 1);
+ __ Addu(t5, end_of_input_address(), Operand(offset));
+ if (mode_ == ASCII) {
+ __ lbu(current_character(), MemOperand(t5, 0));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ lhu(current_character(), MemOperand(t5, 0));
+ }
+}
+
+
+void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
+ int stack_alignment = OS::ActivationFrameAlignment();
+ if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
+ // Stack is already aligned for call, so decrement by alignment
+ // to make room for storing the return address.
+ __ Subu(sp, sp, Operand(stack_alignment));
+ __ sw(ra, MemOperand(sp, 0));
+ __ mov(a0, sp);
+ __ mov(t9, t1);
+ __ Call(t9);
+ __ lw(ra, MemOperand(sp, 0));
+ __ Addu(sp, sp, Operand(stack_alignment));
+ __ Jump(Operand(ra));
+}
+
+
+#undef __
+
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h
new file mode 100644
index 000000000..ad7ada547
--- /dev/null
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h
@@ -0,0 +1,252 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerMIPS: public RegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerMIPS();
+ virtual ~RegExpMacroAssemblerMIPS();
+};
+#else // V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save);
+ virtual ~RegExpMacroAssemblerMIPS();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(uint32_t c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ virtual void CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual void Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame);
+ private:
+ // Offsets from frame_pointer() of function parameters and stored registers.
+ static const int kFramePointer = 0;
+
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Registers s0 to s7, fp, and ra.
+ static const int kStoredRegisters = kFramePointer;
+ // Return address (stored from link register, read into pc on return).
+ static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+ static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
+ // Stack frame header.
+ static const int kStackFrameHeader = kReturnAddress + kPointerSize;
+ // Stack parameters placed by caller.
+ static const int kRegisterOutput = kStackFrameHeader + 20;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kInputEnd = kFramePointer - kPointerSize;
+ static const int kInputStart = kInputEnd - kPointerSize;
+ static const int kStartIndex = kInputStart - kPointerSize;
+ static const int kInputString = kStartIndex - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kInputStartMinusOne = kInputString - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kAtStart - kPointerSize;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ MemOperand register_location(int register_index);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ inline Register current_input_offset() { return t2; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return t3; }
+
+ // Register holding address of the end of the input string.
+ inline Register end_of_input_address() { return t6; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ inline Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return t4; }
+
+ // Register holding pointer to the current code object.
+ inline Register code_pointer() { return t1; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument).
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Label* to,
+ Condition condition,
+ Register rs,
+ const Operand& rt);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to,
+ Condition cond,
+ Register rs,
+ const Operand& rt);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ // Calls a C function and cleans up the frame alignment done by
+ // by FrameAlign. The called function *is* allowed to trigger a garbage
+ // collection, but may not take more than four arguments (no arguments
+ // passed on the stack), and the first argument will be a pointer to the
+ // return address.
+ inline void CallCFunctionUsingStub(ExternalReference function,
+ int num_arguments);
+
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1).
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+ Label internal_failure_label_;
+};
+
+#endif // V8_INTERPRETED_REGEXP
+
+
+}} // namespace v8::internal
+
+#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+
diff --git a/deps/v8/src/mips/register-allocator-mips-inl.h b/deps/v8/src/mips/register-allocator-mips-inl.h
deleted file mode 100644
index a876bee49..000000000
--- a/deps/v8/src/mips/register-allocator-mips-inl.h
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-#define V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
-#include "v8.h"
-#include "mips/assembler-mips.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- // The code for this test relies on the order of register codes.
- return reg.is(cp) || reg.is(s8_fp) || reg.is(sp);
-}
-
-
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // zero_reg
- 1, // at
- 2, // v0
- 3, // v1
- 4, // a0
- 5, // a1
- 6, // a2
- 7, // a3
- 8, // t0
- 9, // t1
- 10, // t2
- 11, // t3
- 12, // t4
- 13, // t5
- 14, // t
- 15, // t7
- 16, // t8
- 17, // t9
- 18, // s0
- 19, // s1
- 20, // s2
- 21, // s3
- 22, // s4
- 23, // s5
- 24, // s6
- 25, // s7
- 26, // k0
- 27, // k1
- 28, // gp
- 29, // sp
- 30, // s8_fp
- 31, // ra
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] = {
- zero_reg,
- at,
- v0,
- v1,
- a0,
- a1,
- a2,
- a3,
- t0,
- t1,
- t2,
- t3,
- t4,
- t5,
- t6,
- t7,
- s0,
- s1,
- s2,
- s3,
- s4,
- s5,
- s6,
- s7,
- t8,
- t9,
- k0,
- k1,
- gp,
- sp,
- s8_fp,
- ra
- };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The non-reserved a1 and ra registers are live on JS function entry.
- Use(a1); // JS function.
- Use(ra); // Return address.
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
diff --git a/deps/v8/src/mips/register-allocator-mips.cc b/deps/v8/src/mips/register-allocator-mips.cc
deleted file mode 100644
index 2c5d61bee..000000000
--- a/deps/v8/src/mips/register-allocator-mips.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Result::ToRegister(Register target) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- // No byte registers on MIPS.
- UNREACHABLE();
- return Result();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 59a537324..30e12e75b 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -26,6 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
+#include <math.h>
+#include <limits.h>
#include <cstdarg>
#include "v8.h"
@@ -33,27 +35,29 @@
#include "disasm.h"
#include "assembler.h"
-#include "globals.h" // Need the BitCast
+#include "globals.h" // Need the BitCast.
#include "mips/constants-mips.h"
#include "mips/simulator-mips.h"
-namespace v8i = v8::internal;
-
-#if !defined(__mips) || defined(USE_SIMULATOR)
// Only build the simulator if not compiling for real MIPS hardware.
-namespace assembler {
-namespace mips {
+#if defined(USE_SIMULATOR)
-using ::v8::internal::Object;
-using ::v8::internal::PrintF;
-using ::v8::internal::OS;
-using ::v8::internal::ReadLine;
-using ::v8::internal::DeleteArray;
+namespace v8 {
+namespace internal {
-// Utils functions
+// Utils functions.
bool HaveSameSign(int32_t a, int32_t b) {
- return ((a ^ b) > 0);
+ return ((a ^ b) >= 0);
+}
+
+
+uint32_t get_fcsr_condition_bit(uint32_t cc) {
+ if (cc == 0) {
+ return 23;
+ } else {
+ return 24 + cc;
+ }
}
@@ -63,15 +67,18 @@ bool HaveSameSign(int32_t a, int32_t b) {
// Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
-// The Debugger class is used by the simulator while debugging simulated MIPS
+// The MipsDebugger class is used by the simulator while debugging simulated
// code.
-class Debugger {
+class MipsDebugger {
public:
- explicit Debugger(Simulator* sim);
- ~Debugger();
+ explicit MipsDebugger(Simulator* sim);
+ ~MipsDebugger();
void Stop(Instruction* instr);
void Debug();
+ // Print all registers with a nice formatting.
+ void PrintAllRegs();
+ void PrintAllRegsIncludingFPU();
private:
// We set the breakpoint code to 0xfffff to easily recognize it.
@@ -81,6 +88,10 @@ class Debugger {
Simulator* sim_;
int32_t GetRegisterValue(int regnum);
+ int32_t GetFPURegisterValueInt(int regnum);
+ int64_t GetFPURegisterValueLong(int regnum);
+ float GetFPURegisterValueFloat(int regnum);
+ double GetFPURegisterValueDouble(int regnum);
bool GetValue(const char* desc, int32_t* value);
// Set or delete a breakpoint. Returns true if successful.
@@ -91,18 +102,17 @@ class Debugger {
// execution to skip past breakpoints when run from the debugger.
void UndoBreakpoints();
void RedoBreakpoints();
-
- // Print all registers with a nice formatting.
- void PrintAllRegs();
};
-Debugger::Debugger(Simulator* sim) {
+MipsDebugger::MipsDebugger(Simulator* sim) {
sim_ = sim;
}
-Debugger::~Debugger() {
+
+MipsDebugger::~MipsDebugger() {
}
+
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
@@ -115,36 +125,58 @@ static void InitializeCoverage() {
}
-void Debugger::Stop(Instruction* instr) {
- UNIMPLEMENTED_MIPS();
- char* str = reinterpret_cast<char*>(instr->InstructionBits());
- if (strlen(str) > 0) {
+void MipsDebugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->Bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char** msg_address =
+ reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
+ char* msg = *msg_address;
+ ASSERT(msg != NULL);
+
+ // Update this stop description.
+ if (!watched_stops[code].desc) {
+ watched_stops[code].desc = msg;
+ }
+
+ if (strlen(msg) > 0) {
if (coverage_log != NULL) {
fprintf(coverage_log, "%s\n", str);
fflush(coverage_log);
}
- instr->SetInstructionBits(0x0); // Overwrite with nop.
+ // Overwrite the instruction and address with nops.
+ instr->SetInstructionBits(kNopInstr);
+ reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr);
}
- sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+ sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstructionSize);
}
-#else // ndef GENERATED_CODE_COVERAGE
+
+#else // GENERATED_CODE_COVERAGE
#define UNSUPPORTED() printf("Unsupported instruction.\n");
static void InitializeCoverage() {}
-void Debugger::Stop(Instruction* instr) {
- const char* str = reinterpret_cast<char*>(instr->InstructionBits());
- PrintF("Simulator hit %s\n", str);
- sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+void MipsDebugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->Bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
+ Instruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watched_stops[code].desc) {
+ sim_->watched_stops[code].desc = msg;
+ }
+ PrintF("Simulator hit %s (%u)\n", msg, code);
+ sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
Debug();
}
#endif // GENERATED_CODE_COVERAGE
-int32_t Debugger::GetRegisterValue(int regnum) {
+int32_t MipsDebugger::GetRegisterValue(int regnum) {
if (regnum == kNumSimuRegisters) {
return sim_->get_pc();
} else {
@@ -153,11 +185,54 @@ int32_t Debugger::GetRegisterValue(int regnum) {
}
-bool Debugger::GetValue(const char* desc, int32_t* value) {
+int32_t MipsDebugger::GetFPURegisterValueInt(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register(regnum);
+ }
+}
+
+
+int64_t MipsDebugger::GetFPURegisterValueLong(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_long(regnum);
+ }
+}
+
+
+float MipsDebugger::GetFPURegisterValueFloat(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_float(regnum);
+ }
+}
+
+
+double MipsDebugger::GetFPURegisterValueDouble(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_double(regnum);
+ }
+}
+
+
+bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
int regnum = Registers::Number(desc);
+ int fpuregnum = FPURegisters::Number(desc);
+
if (regnum != kInvalidRegister) {
*value = GetRegisterValue(regnum);
return true;
+ } else if (fpuregnum != kInvalidFPURegister) {
+ *value = GetFPURegisterValueInt(fpuregnum);
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
} else {
return SScanF(desc, "%i", value) == 1;
}
@@ -165,7 +240,7 @@ bool Debugger::GetValue(const char* desc, int32_t* value) {
}
-bool Debugger::SetBreakpoint(Instruction* breakpc) {
+bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
return false;
@@ -180,7 +255,7 @@ bool Debugger::SetBreakpoint(Instruction* breakpc) {
}
-bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
+bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
@@ -191,32 +266,33 @@ bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
}
-void Debugger::UndoBreakpoints() {
+void MipsDebugger::UndoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
-void Debugger::RedoBreakpoints() {
+void MipsDebugger::RedoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
-void Debugger::PrintAllRegs() {
+
+void MipsDebugger::PrintAllRegs() {
#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
PrintF("\n");
- // at, v0, a0
+ // at, v0, a0.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(1), REG_INFO(2), REG_INFO(4));
- // v1, a1
+ // v1, a1.
PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
"", REG_INFO(3), REG_INFO(5));
- // a2
+ // a2.
PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
- // a3
+ // a3.
PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7));
PrintF("\n");
// t0-t7, s0-s7
@@ -225,22 +301,57 @@ void Debugger::PrintAllRegs() {
REG_INFO(8+i), REG_INFO(16+i));
}
PrintF("\n");
- // t8, k0, LO
+ // t8, k0, LO.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(24), REG_INFO(26), REG_INFO(32));
- // t9, k1, HI
+ // t9, k1, HI.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(25), REG_INFO(27), REG_INFO(33));
- // sp, fp, gp
+ // sp, fp, gp.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(29), REG_INFO(30), REG_INFO(28));
- // pc
+ // pc.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(31), REG_INFO(34));
+
#undef REG_INFO
+#undef FPU_REG_INFO
}
-void Debugger::Debug() {
+
+void MipsDebugger::PrintAllRegsIncludingFPU() {
+#define FPU_REG_INFO(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
+ GetFPURegisterValueInt(n+1), \
+ GetFPURegisterValueInt(n), \
+ GetFPURegisterValueDouble(n)
+
+ PrintAllRegs();
+
+ PrintF("\n\n");
+ // f0, f1, f2, ... f31.
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(6) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(8) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(10));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(12));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(14));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(16));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(18));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(20));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(22));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(24));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(26));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(28));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(30));
+
+#undef REG_INFO
+#undef FPU_REG_INFO
+}
+
+
+void MipsDebugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
@@ -253,8 +364,9 @@ void Debugger::Debug() {
char cmd[COMMAND_SIZE + 1];
char arg1[ARG_SIZE + 1];
char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
- // make sure to have a proper terminating character if reaching the limit
+ // Make sure to have a proper terminating character if reaching the limit.
cmd[COMMAND_SIZE] = 0;
arg1[ARG_SIZE] = 0;
arg2[ARG_SIZE] = 0;
@@ -267,10 +379,10 @@ void Debugger::Debug() {
if (last_pc != sim_->get_pc()) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
+ // Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer,
- reinterpret_cast<byte_*>(sim_->get_pc()));
+ reinterpret_cast<byte*>(sim_->get_pc()));
PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
last_pc = sim_->get_pc();
}
@@ -280,19 +392,21 @@ void Debugger::Debug() {
} else {
// Use sscanf to parse the individual parts of the command line. At the
// moment no command expects more than two parameters.
- int args = SScanF(line,
+ int argc = SScanF(line,
"%" XSTR(COMMAND_SIZE) "s "
"%" XSTR(ARG_SIZE) "s "
"%" XSTR(ARG_SIZE) "s",
cmd, arg1, arg2);
if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- if (!(reinterpret_cast<Instruction*>(sim_->get_pc())->IsTrap())) {
+ Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc());
+ if (!(instr->IsTrap()) ||
+ instr->InstructionBits() == rtCallRedirInstr) {
sim_->InstructionDecode(
- reinterpret_cast<Instruction*>(sim_->get_pc()));
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
} else {
// Allow si to jump over generated breakpoints.
PrintF("/!\\ Jumping over generated breakpoint.\n");
- sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
}
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// Execute the one instruction we broke at with breakpoints disabled.
@@ -300,23 +414,65 @@ void Debugger::Debug() {
// Leave the debugger shell.
done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
- if (args == 2) {
+ if (argc == 2) {
int32_t value;
+ float fvalue;
if (strcmp(arg1, "all") == 0) {
PrintAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ PrintAllRegsIncludingFPU();
} else {
- if (GetValue(arg1, &value)) {
+ int regnum = Registers::Number(arg1);
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (regnum != kInvalidRegister) {
+ value = GetRegisterValue(regnum);
PrintF("%s: 0x%08x %d \n", arg1, value, value);
+ } else if (fpuregnum != kInvalidFPURegister) {
+ if (fpuregnum % 2 == 1) {
+ value = GetFPURegisterValueInt(fpuregnum);
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+ } else {
+ double dfvalue;
+ int32_t lvalue1 = GetFPURegisterValueInt(fpuregnum);
+ int32_t lvalue2 = GetFPURegisterValueInt(fpuregnum + 1);
+ dfvalue = GetFPURegisterValueDouble(fpuregnum);
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
+ FPURegisters::Name(fpuregnum+1),
+ FPURegisters::Name(fpuregnum),
+ lvalue1,
+ lvalue2,
+ dfvalue);
+ }
} else {
PrintF("%s unrecognized\n", arg1);
}
}
} else {
- PrintF("print <register>\n");
+ if (argc == 3) {
+ if (strcmp(arg2, "single") == 0) {
+ int32_t value;
+ float fvalue;
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValueInt(fpuregnum);
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("print <fpu register> single\n");
+ }
+ } else {
+ PrintF("print <register> or print <fpu register> single\n");
+ }
}
} else if ((strcmp(cmd, "po") == 0)
|| (strcmp(cmd, "printobject") == 0)) {
- if (args == 2) {
+ if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
@@ -333,45 +489,106 @@ void Debugger::Debug() {
} else {
PrintF("printobject <value>\n");
}
- } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int32_t* cur = NULL;
+ int32_t* end = NULL;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
+ } else { // Command "mem".
+ int32_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int32_t*>(value);
+ next_arg++;
+ }
+
+ int32_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else if (argc == next_arg + 1) {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ PrintF(" 0x%08x: 0x%08x %10d",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+ int value = *cur;
+ Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ if (current_heap->Contains(obj) || ((value & 1) == 0)) {
+ PrintF(" (");
+ if ((value & 1) == 0) {
+ PrintF("smi %d", value / 2);
+ } else {
+ obj->ShortPrint();
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) ||
+ (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
+ // Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- byte_* cur = NULL;
- byte_* end = NULL;
-
- if (args == 1) {
- cur = reinterpret_cast<byte_*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstructionSize);
- } else if (args == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte_*>(value);
- // no length parameter passed, assume 10 instructions
- end = cur + (10 * Instruction::kInstructionSize);
+ byte* cur = NULL;
+ byte* end = NULL;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * Instruction::kInstrSize);
+ } else if (argc == 2) {
+ int regnum = Registers::Number(arg1);
+ if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * Instruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * Instruction::kInstrSize);
+ }
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte_*>(value1);
- end = cur + (value2 * Instruction::kInstructionSize);
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * Instruction::kInstrSize);
}
}
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n", cur, buffer.start());
- cur += Instruction::kInstructionSize;
+ PrintF(" 0x%08x %s\n",
+ reinterpret_cast<intptr_t>(cur), buffer.start());
+ cur += Instruction::kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
v8::internal::OS::DebugBreak();
PrintF("regaining control from gdb\n");
} else if (strcmp(cmd, "break") == 0) {
- if (args == 2) {
+ if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
@@ -389,44 +606,104 @@ void Debugger::Debug() {
}
} else if (strcmp(cmd, "flags") == 0) {
PrintF("No flags on MIPS !\n");
- } else if (strcmp(cmd, "unstop") == 0) {
- PrintF("Unstop command not implemented on MIPS.");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int32_t value;
+ intptr_t stop_pc = sim_->get_pc() -
+ 2 * Instruction::kInstrSize;
+ Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+ Instruction* msg_address =
+ reinterpret_cast<Instruction*>(stop_pc +
+ Instruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->IsStopInstruction(stop_instr)) {
+ stop_instr->SetInstructionBits(kNopInstr);
+ msg_address->SetInstructionBits(kNopInstr);
+ } else {
+ PrintF("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ PrintF("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->PrintStopInfo(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->PrintStopInfo(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->EnableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->EnableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->DisableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->DisableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ PrintF("Wrong usage. Use help command for more information.\n");
+ }
} else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
- // Print registers and disassemble
+ // Print registers and disassemble.
PrintAllRegs();
PrintF("\n");
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
+ // Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- byte_* cur = NULL;
- byte_* end = NULL;
+ byte* cur = NULL;
+ byte* end = NULL;
- if (args == 1) {
- cur = reinterpret_cast<byte_*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstructionSize);
- } else if (args == 2) {
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * Instruction::kInstrSize);
+ } else if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte_*>(value);
+ cur = reinterpret_cast<byte*>(value);
// no length parameter passed, assume 10 instructions
- end = cur + (10 * Instruction::kInstructionSize);
+ end = cur + (10 * Instruction::kInstrSize);
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte_*>(value1);
- end = cur + (value2 * Instruction::kInstructionSize);
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * Instruction::kInstrSize);
}
}
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n", cur, buffer.start());
- cur += Instruction::kInstructionSize;
+ PrintF(" 0x%08x %s\n",
+ reinterpret_cast<intptr_t>(cur), buffer.start());
+ cur += Instruction::kInstrSize;
}
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
PrintF("cont\n");
@@ -438,20 +715,43 @@ void Debugger::Debug() {
PrintF(" use register name 'all' to print all registers\n");
PrintF("printobject <register>\n");
PrintF(" print an object from a register (alias 'po')\n");
+ PrintF("stack [<words>]\n");
+ PrintF(" dump stack content, default dump 10 words)\n");
+ PrintF("mem <address> [<words>]\n");
+ PrintF(" dump memory content, default dump 10 words)\n");
PrintF("flags\n");
PrintF(" print flags\n");
PrintF("disasm [<instructions>]\n");
- PrintF("disasm [[<address>] <instructions>]\n");
- PrintF(" disassemble code, default is 10 instructions from pc\n");
+ PrintF("disasm [<address/register>]\n");
+ PrintF("disasm [[<address/register>] <instructions>]\n");
+ PrintF(" disassemble code, default is 10 instructions\n");
+ PrintF(" from pc (alias 'di')\n");
PrintF("gdb\n");
PrintF(" enter gdb\n");
PrintF("break <address>\n");
PrintF(" set a break point on the address\n");
PrintF("del\n");
PrintF(" delete the breakpoint\n");
- PrintF("unstop\n");
- PrintF(" ignore the stop instruction at the current location");
- PrintF(" from now on\n");
+ PrintF("stop feature:\n");
+ PrintF(" Description:\n");
+ PrintF(" Stops are debug instructions inserted by\n");
+ PrintF(" the Assembler::stop() function.\n");
+ PrintF(" When hitting a stop, the Simulator will\n");
+ PrintF(" stop and and give control to the Debugger.\n");
+ PrintF(" All stop codes are watched:\n");
+ PrintF(" - They can be enabled / disabled: the Simulator\n");
+ PrintF(" will / won't stop when hitting them.\n");
+ PrintF(" - The Simulator keeps track of how many times they \n");
+ PrintF(" are met. (See the info command.) Going over a\n");
+ PrintF(" disabled stop still increases its counter. \n");
+ PrintF(" Commands:\n");
+ PrintF(" stop info all/<code> : print infos about number <code>\n");
+ PrintF(" or all stop(s).\n");
+ PrintF(" stop enable/disable all/<code> : enables / disables\n");
+ PrintF(" all or number <code> stop(s)\n");
+ PrintF(" stop unstop\n");
+ PrintF(" ignore the stop instruction at the current location\n");
+ PrintF(" from now on\n");
} else {
PrintF("Unknown command: %s\n", cmd);
}
@@ -471,29 +771,120 @@ void Debugger::Debug() {
}
-// Create one simulator per thread and keep it in thread local storage.
-static v8::internal::Thread::LocalStorageKey simulator_key;
+static bool ICacheMatch(void* one, void* two) {
+ ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+ ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ return one == two;
+}
+
+
+static uint32_t ICacheHash(void* key) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
-bool Simulator::initialized_ = false;
+void Simulator::FlushICache(v8::internal::HashMap* i_cache,
+ void* start_addr,
+ size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePage(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ ASSERT_EQ(0, start & CachePage::kPageMask);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePage(i_cache, start, size);
+ }
+}
-void Simulator::Initialize() {
- if (initialized_) return;
- simulator_key = v8::internal::Thread::CreateThreadLocalKey();
- initialized_ = true;
- ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
+ v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
+ ICacheHash(page),
+ true);
+ if (entry->value == NULL) {
+ CachePage* new_page = new CachePage();
+ entry->value = new_page;
+ }
+ return reinterpret_cast<CachePage*>(entry->value);
+}
+
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
+ intptr_t start,
+ int size) {
+ ASSERT(size <= CachePage::kPageSize);
+ ASSERT(AllOnOnePage(start, size - 1));
+ ASSERT((start & CachePage::kLineMask) == 0);
+ ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* valid_bytemap = cache_page->ValidityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+
+void Simulator::CheckICache(v8::internal::HashMap* i_cache,
+ Instruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* cache_valid_byte = cache_page->ValidityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ CHECK(memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset),
+ Instruction::kInstrSize) == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
}
-Simulator::Simulator() {
- Initialize();
+void Simulator::Initialize(Isolate* isolate) {
+ if (isolate->simulator_initialized()) return;
+ isolate->set_simulator_initialized(true);
+ ::v8::internal::ExternalReference::set_redirector(isolate,
+ &RedirectExternalReference);
+}
+
+
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+ i_cache_ = isolate_->simulator_i_cache();
+ if (i_cache_ == NULL) {
+ i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ isolate_->set_simulator_i_cache(i_cache_);
+ }
+ Initialize(isolate);
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
- size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
- stack_ = reinterpret_cast<char*>(malloc(stack_size));
+ stack_ = reinterpret_cast<char*>(malloc(stack_size_));
pc_modified_ = false;
icount_ = 0;
+ break_count_ = 0;
break_pc_ = NULL;
break_instr_ = 0;
@@ -502,16 +893,23 @@ Simulator::Simulator() {
for (int i = 0; i < kNumSimuRegisters; i++) {
registers_[i] = 0;
}
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ FCSR_ = 0;
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
// some buffer below.
- registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
+ registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size_ - 64;
// The ra and pc are initialized to a known bad value that will cause an
// access violation if the simulator ever tries to execute it.
registers_[pc] = bad_ra;
registers_[ra] = bad_ra;
InitializeCoverage();
+ for (int i = 0; i < kNumExceptions; i++) {
+ exceptions[i] = 0;
+ }
}
@@ -524,12 +922,18 @@ Simulator::Simulator() {
// offset from the swi instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, bool fp_return)
+ Redirection(void* external_function, ExternalReference::Type type)
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr),
- fp_return_(fp_return),
- next_(list_) {
- list_ = this;
+ type_(type),
+ next_(NULL) {
+ Isolate* isolate = Isolate::Current();
+ next_ = isolate->simulator_redirection();
+ Simulator::current(isolate)->
+ FlushICache(isolate->simulator_i_cache(),
+ reinterpret_cast<void*>(&swi_instruction_),
+ Instruction::kInstrSize);
+ isolate->set_simulator_redirection(this);
}
void* address_of_swi_instruction() {
@@ -537,14 +941,16 @@ class Redirection {
}
void* external_function() { return external_function_; }
- bool fp_return() { return fp_return_; }
+ ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function, bool fp_return) {
- Redirection* current;
- for (current = list_; current != NULL; current = current->next_) {
+ static Redirection* Get(void* external_function,
+ ExternalReference::Type type) {
+ Isolate* isolate = Isolate::Current();
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
- return new Redirection(external_function, fp_return);
+ return new Redirection(external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -557,31 +963,30 @@ class Redirection {
private:
void* external_function_;
uint32_t swi_instruction_;
- bool fp_return_;
+ ExternalReference::Type type_;
Redirection* next_;
- static Redirection* list_;
};
-Redirection* Redirection::list_ = NULL;
-
-
void* Simulator::RedirectExternalReference(void* external_function,
- bool fp_return) {
- Redirection* redirection = Redirection::Get(external_function, fp_return);
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(external_function, type);
return redirection->address_of_swi_instruction();
}
// Get the active Simulator for the current thread.
-Simulator* Simulator::current() {
- Initialize();
- Simulator* sim = reinterpret_cast<Simulator*>(
- v8::internal::Thread::GetThreadLocal(simulator_key));
+Simulator* Simulator::current(Isolate* isolate) {
+ v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ ASSERT(isolate_data != NULL);
+ ASSERT(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
if (sim == NULL) {
- // TODO(146): delete the simulator object when a thread goes away.
- sim = new Simulator();
- v8::internal::Thread::SetThreadLocal(simulator_key, sim);
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
+ sim = new Simulator(isolate);
+ isolate_data->set_simulator(sim);
}
return sim;
}
@@ -595,18 +1000,26 @@ void Simulator::set_register(int reg, int32_t value) {
pc_modified_ = true;
}
- // zero register always hold 0.
+ // Zero register always holds 0.
registers_[reg] = (reg == 0) ? 0 : value;
}
+
void Simulator::set_fpu_register(int fpureg, int32_t value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg] = value;
}
+
+void Simulator::set_fpu_register_float(int fpureg, float value) {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *BitCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+
void Simulator::set_fpu_register_double(int fpureg, double value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- *v8i::BitCast<double*>(&FPUregisters_[fpureg]) = value;
+ *BitCast<double*>(&FPUregisters_[fpureg]) = value;
}
@@ -620,22 +1033,171 @@ int32_t Simulator::get_register(int reg) const {
return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
}
+
int32_t Simulator::get_fpu_register(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
return FPUregisters_[fpureg];
}
+
+int64_t Simulator::get_fpu_register_long(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+ return *BitCast<int64_t*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+
+float Simulator::get_fpu_register_float(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *BitCast<float*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+
double Simulator::get_fpu_register_double(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *v8i::BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+ return *BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
}
+
+// For use in calls that take two double values, constructed either
+// from a0-a3 or f12 and f14.
+void Simulator::GetFpArgs(double* x, double* y) {
+ if (!IsMipsSoftFloatABI) {
+ *x = get_fpu_register_double(12);
+ *y = get_fpu_register_double(14);
+ } else {
+ // We use a char buffer to get around the strict-aliasing rules which
+ // otherwise allow the compiler to optimize away the copy.
+ char buffer[sizeof(*x)];
+ int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+
+ // Registers a0 and a1 -> x.
+ reg_buffer[0] = get_register(a0);
+ reg_buffer[1] = get_register(a1);
+ memcpy(x, buffer, sizeof(buffer));
+
+ // Registers a2 and a3 -> y.
+ reg_buffer[0] = get_register(a2);
+ reg_buffer[1] = get_register(a3);
+ memcpy(y, buffer, sizeof(buffer));
+ }
+}
+
+
+// For use in calls that take one double value, constructed either
+// from a0 and a1 or f12.
+void Simulator::GetFpArgs(double* x) {
+ if (!IsMipsSoftFloatABI) {
+ *x = get_fpu_register_double(12);
+ } else {
+ // We use a char buffer to get around the strict-aliasing rules which
+ // otherwise allow the compiler to optimize away the copy.
+ char buffer[sizeof(*x)];
+ int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+ // Registers a0 and a1 -> x.
+ reg_buffer[0] = get_register(a0);
+ reg_buffer[1] = get_register(a1);
+ memcpy(x, buffer, sizeof(buffer));
+ }
+}
+
+
+// For use in calls that take one double value constructed either
+// from a0 and a1 or f12 and one integer value.
+void Simulator::GetFpArgs(double* x, int32_t* y) {
+ if (!IsMipsSoftFloatABI) {
+ *x = get_fpu_register_double(12);
+ *y = get_register(a2);
+ } else {
+ // We use a char buffer to get around the strict-aliasing rules which
+ // otherwise allow the compiler to optimize away the copy.
+ char buffer[sizeof(*x)];
+ int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+ // Registers 0 and 1 -> x.
+ reg_buffer[0] = get_register(a0);
+ reg_buffer[1] = get_register(a1);
+ memcpy(x, buffer, sizeof(buffer));
+
+ // Register 2 -> y.
+ reg_buffer[0] = get_register(a2);
+ memcpy(y, buffer, sizeof(*y));
+ }
+}
+
+
+// The return value is either in v0/v1 or f0.
+void Simulator::SetFpResult(const double& result) {
+ if (!IsMipsSoftFloatABI) {
+ set_fpu_register_double(0, result);
+ } else {
+ char buffer[2 * sizeof(registers_[0])];
+ int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to v0 and v1.
+ set_register(v0, reg_buffer[0]);
+ set_register(v1, reg_buffer[1]);
+ }
+}
+
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+
+bool Simulator::test_fcsr_bit(uint32_t cc) {
+ return FCSR_ & (1 << cc);
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(double original, double rounded) {
+ bool ret = false;
+
+ if (!isfinite(original) || !isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > INT_MAX || rounded < INT_MIN) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
registers_[pc] = value;
}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+
// Raw access to the PC register without the special adjustment when reading.
int32_t Simulator::get_pc() const {
return registers_[pc];
@@ -651,24 +1213,40 @@ int32_t Simulator::get_pc() const {
// get the correct MIPS-like behaviour on unaligned accesses.
int Simulator::ReadW(int32_t addr, Instruction* instr) {
- if ((addr & v8i::kPointerAlignmentMask) == 0) {
+ if (addr >=0 && addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ MipsDebugger dbg(this);
+ dbg.Debug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
}
- PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
- OS::Abort();
+ PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MipsDebugger dbg(this);
+ dbg.Debug();
return 0;
}
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
- if ((addr & v8i::kPointerAlignmentMask) == 0) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ MipsDebugger dbg(this);
+ dbg.Debug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
return;
}
- PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
- OS::Abort();
+ PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MipsDebugger dbg(this);
+ dbg.Debug();
}
@@ -677,7 +1255,9 @@ double Simulator::ReadD(int32_t addr, Instruction* instr) {
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
- PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
return 0;
}
@@ -689,7 +1269,9 @@ void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
}
@@ -699,7 +1281,9 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
return 0;
}
@@ -710,7 +1294,9 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
- PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
return 0;
}
@@ -722,7 +1308,9 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
}
@@ -733,7 +1321,9 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
}
@@ -746,7 +1336,7 @@ uint32_t Simulator::ReadBU(int32_t addr) {
int32_t Simulator::ReadB(int32_t addr) {
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- return ((*ptr << 24) >> 24) & 0xff;
+ return *ptr;
}
@@ -773,7 +1363,7 @@ uintptr_t Simulator::StackLimit() const {
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
- instr, format);
+ reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED_MIPS();
}
@@ -782,19 +1372,36 @@ void Simulator::Format(Instruction* instr, const char* format) {
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair which is essentially two 32-bit values stuffed into a
// 64-bit value. With the code below we assume that all runtime calls return
-// 64 bits of result. If they don't, the r1 result register contains a bogus
+// 64 bits of result. If they don't, the v1 result register contains a bogus
// value, which is fine because it is caller-saved.
typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
- int32_t arg3);
-typedef double (*SimulatorRuntimeFPCall)(double fparg0,
- double fparg1);
-
+ int32_t arg3,
+ int32_t arg4,
+ int32_t arg5);
+typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
+ int32_t arg1,
+ int32_t arg2,
+ int32_t arg3);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+
+// This signature supports direct call to accessor getter callback.
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
+ int32_t arg1);
// Software interrupt instructions are used by the simulator to call into the
-// C-based V8 runtime.
+// C-based V8 runtime. They are also used for debugging with simulator.
void Simulator::SoftwareInterrupt(Instruction* instr) {
+ // There are several instructions that could get us here,
+ // the break_ instruction, or several variants of traps. All
+ // Are "SPECIAL" class opcode, and are distinuished by function.
+ int32_t func = instr->FunctionFieldRaw();
+ uint32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
+
// We first check if we met a call_rt_redirected.
if (instr->InstructionBits() == rtCallRedirInstr) {
Redirection* redirection = Redirection::FromSwiInstruction(instr);
@@ -802,55 +1409,257 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int32_t arg1 = get_register(a1);
int32_t arg2 = get_register(a2);
int32_t arg3 = get_register(a3);
- // fp args are (not always) in f12 and f14.
- // See MIPS conventions for more details.
- double fparg0 = get_fpu_register_double(f12);
- double fparg1 = get_fpu_register_double(f14);
+ int32_t arg4 = 0;
+ int32_t arg5 = 0;
+
+ // Need to check if sp is valid before assigning arg4, arg5.
+ // This is a fix for cctest test-api/CatchStackOverflow which causes
+ // the stack to overflow. For some reason arm doesn't need this
+ // stack check here.
+ int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
+ int32_t* stack = reinterpret_cast<int32_t*>(stack_);
+ if (stack_pointer >= stack && stack_pointer < stack + stack_size_ - 5) {
+ // Args 4 and 5 are on the stack after the reserved space for args 0..3.
+ arg4 = stack_pointer[4];
+ arg5 = stack_pointer[5];
+ }
+
+ bool fp_call =
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+ if (!IsMipsSoftFloatABI) {
+ // With the hard floating point calling convention, double
+ // arguments are passed in FPU registers. Fetch the arguments
+ // from there and call the builtin using soft floating point
+ // convention.
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ arg0 = get_fpu_register(f12);
+ arg1 = get_fpu_register(f13);
+ arg2 = get_fpu_register(f14);
+ arg3 = get_fpu_register(f15);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ arg0 = get_fpu_register(f12);
+ arg1 = get_fpu_register(f13);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ arg0 = get_fpu_register(f12);
+ arg1 = get_fpu_register(f13);
+ arg2 = get_register(a2);
+ break;
+ default:
+ break;
+ }
+ }
+
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_ra = get_register(ra);
- if (redirection->fp_return()) {
- intptr_t external =
+
+ intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function());
+
+ // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
+ // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
+ // simulator. Soft-float has additional abstraction of ExternalReference,
+ // to support serialization.
+ if (fp_call) {
SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ double dval0, dval1;
+ int32_t ival;
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ GetFpArgs(&dval0, &dval1);
+ PrintF("Call to host function at %p with args %f, %f",
+ FUNCTION_ADDR(target), dval0, dval1);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ GetFpArgs(&dval0);
+ PrintF("Call to host function at %p with arg %f",
+ FUNCTION_ADDR(target), dval0);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ GetFpArgs(&dval0, &ival);
+ PrintF("Call to host function at %p with args %f, %d",
+ FUNCTION_ADDR(target), dval0, ival);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ double result = target(arg0, arg1, arg2, arg3);
+ if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+ SetFpResult(result);
+ } else {
+ int32_t gpreg_pair[2];
+ memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
+ set_register(v0, gpreg_pair[0]);
+ set_register(v1, gpreg_pair[1]);
+ }
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ // See DirectCEntryStub::GenerateCall for explanation of register usage.
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p with args %f, %f\n",
- FUNCTION_ADDR(target), fparg0, fparg1);
+ PrintF("Call to host function at %p args %08x\n",
+ FUNCTION_ADDR(target), arg1);
}
- double result = target(fparg0, fparg1);
- set_fpu_register_double(f0, result);
+ v8::Handle<v8::Value> result = target(arg1);
+ *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+ set_register(v0, arg0);
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ // See DirectCEntryStub::GenerateCall for explanation of register usage.
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x\n",
+ FUNCTION_ADDR(target), arg1, arg2);
+ }
+ v8::Handle<v8::Value> result = target(arg1, arg2);
+ *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+ set_register(v0, arg0);
} else {
- intptr_t external =
- reinterpret_cast<int32_t>(redirection->external_function());
SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
+ reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim) {
PrintF(
- "Call to host function at %p with args %08x, %08x, %08x, %08x\n",
+ "Call to host function at %p "
+ "args %08x, %08x, %08x, %08x, %08x, %08x\n",
FUNCTION_ADDR(target),
arg0,
arg1,
arg2,
- arg3);
+ arg3,
+ arg4,
+ arg5);
}
- int64_t result = target(arg0, arg1, arg2, arg3);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
- }
- set_register(v0, lo_res);
- set_register(v1, hi_res);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ set_register(v0, static_cast<int32_t>(result));
+ set_register(v1, static_cast<int32_t>(result >> 32));
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08x : %08x\n", get_register(v1), get_register(v0));
}
set_register(ra, saved_ra);
set_pc(get_register(ra));
+
+ } else if (func == BREAK && code <= kMaxStopCode) {
+ if (IsWatchpoint(code)) {
+ PrintWatchpoint(code);
+ } else {
+ IncreaseStopCounter(code);
+ HandleStop(code, instr);
+ }
} else {
- Debugger dbg(this);
+ // All remaining break_ codes, and all traps are handled here.
+ MipsDebugger dbg(this);
dbg.Debug();
}
}
+
+// Stop helper functions.
+bool Simulator::IsWatchpoint(uint32_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+
+void Simulator::PrintWatchpoint(uint32_t code) {
+ MipsDebugger dbg(this);
+ ++break_count_;
+ PrintF("\n---- break %d marker: %3d (instr count: %8d) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.PrintAllRegs(); // Print registers and continue running.
+}
+
+
+void Simulator::HandleStop(uint32_t code, Instruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (IsEnabledStop(code)) {
+ MipsDebugger dbg(this);
+ dbg.Stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * Instruction::kInstrSize);
+ }
+}
+
+
+bool Simulator::IsStopInstruction(Instruction* instr) {
+ int32_t func = instr->FunctionFieldRaw();
+ uint32_t code = static_cast<uint32_t>(instr->Bits(25, 6));
+ return (func == BREAK) && code > kMaxWatchpointCode && code <= kMaxStopCode;
+}
+
+
+bool Simulator::IsEnabledStop(uint32_t code) {
+ ASSERT(code <= kMaxStopCode);
+ ASSERT(code > kMaxWatchpointCode);
+ return !(watched_stops[code].count & kStopDisabledBit);
+}
+
+
+void Simulator::EnableStop(uint32_t code) {
+ if (!IsEnabledStop(code)) {
+ watched_stops[code].count &= ~kStopDisabledBit;
+ }
+}
+
+
+void Simulator::DisableStop(uint32_t code) {
+ if (IsEnabledStop(code)) {
+ watched_stops[code].count |= kStopDisabledBit;
+ }
+}
+
+
+void Simulator::IncreaseStopCounter(uint32_t code) {
+ ASSERT(code <= kMaxStopCode);
+ if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) {
+ PrintF("Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n", code);
+ watched_stops[code].count = 0;
+ EnableStop(code);
+ } else {
+ watched_stops[code].count++;
+ }
+}
+
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint32_t code) {
+ if (code <= kMaxWatchpointCode) {
+ PrintF("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watched_stops[code].count & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watched_stops[code].desc) {
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
+ code, code, state, count, watched_stops[code].desc);
+ } else {
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
+ code, code, state, count);
+ }
+ }
+}
+
+
void Simulator::SignalExceptions() {
for (int i = 1; i < kNumExceptions; i++) {
if (exceptions[i] != 0) {
@@ -859,51 +1668,52 @@ void Simulator::SignalExceptions() {
}
}
-// Handle execution based on instruction types.
-void Simulator::DecodeTypeRegister(Instruction* instr) {
- // Instruction fields
- Opcode op = instr->OpcodeFieldRaw();
- int32_t rs_reg = instr->RsField();
- int32_t rs = get_register(rs_reg);
- uint32_t rs_u = static_cast<uint32_t>(rs);
- int32_t rt_reg = instr->RtField();
- int32_t rt = get_register(rt_reg);
- uint32_t rt_u = static_cast<uint32_t>(rt);
- int32_t rd_reg = instr->RdField();
- uint32_t sa = instr->SaField();
- int32_t fs_reg= instr->FsField();
-
- // ALU output
- // It should not be used as is. Instructions using it should always initialize
- // it first.
- int32_t alu_out = 0x12345678;
- // Output or temporary for floating point.
- double fp_out = 0.0;
-
- // For break and trap instructions.
- bool do_interrupt = false;
-
- // For jr and jalr
- // Get current pc.
- int32_t current_pc = get_pc();
- // Next pc
- int32_t next_pc = 0;
+// Handle execution based on instruction types.
- // ---------- Configuration
+void Simulator::ConfigureTypeRegister(Instruction* instr,
+ int32_t& alu_out,
+ int64_t& i64hilo,
+ uint64_t& u64hilo,
+ int32_t& next_pc,
+ bool& do_interrupt) {
+ // Every local variable declared here needs to be const.
+ // This is to make sure that changed values are sent back to
+ // DecodeTypeRegister correctly.
+
+ // Instruction fields.
+ const Opcode op = instr->OpcodeFieldRaw();
+ const int32_t rs_reg = instr->RsValue();
+ const int32_t rs = get_register(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->RtValue();
+ const int32_t rt = get_register(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->RdValue();
+ const uint32_t sa = instr->SaValue();
+
+ const int32_t fs_reg = instr->FsValue();
+
+
+ // ---------- Configuration.
switch (op) {
- case COP1: // Coprocessor instructions
+ case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
+ case BC1: // Handled in DecodeTypeImmed, should never come here.
UNREACHABLE();
break;
+ case CFC1:
+ // At the moment only FCSR is supported.
+ ASSERT(fs_reg == kFCSRRegister);
+ alu_out = FCSR_;
+ break;
case MFC1:
alu_out = get_fpu_register(fs_reg);
break;
case MFHC1:
- fp_out = get_fpu_register_double(fs_reg);
- alu_out = *v8i::BitCast<int32_t*>(&fp_out);
+ UNIMPLEMENTED_MIPS();
break;
+ case CTC1:
case MTC1:
case MTHC1:
// Do the store in the execution step.
@@ -923,13 +1733,22 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case JR:
case JALR:
- next_pc = get_register(instr->RsField());
+ next_pc = get_register(instr->RsValue());
break;
case SLL:
alu_out = rt << sa;
break;
case SRL:
- alu_out = rt_u >> sa;
+ if (rs_reg == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = rt_u >> sa;
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
+ }
break;
case SRA:
alu_out = rt >> sa;
@@ -938,7 +1757,16 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
alu_out = rt << rs;
break;
case SRLV:
- alu_out = rt_u >> rs;
+ if (sa == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = rt_u >> rs;
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+ }
break;
case SRAV:
alu_out = rt >> rs;
@@ -950,14 +1778,10 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
alu_out = get_register(LO);
break;
case MULT:
- UNIMPLEMENTED_MIPS();
+ i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
break;
case MULTU:
- UNIMPLEMENTED_MIPS();
- break;
- case DIV:
- case DIVU:
- exceptions[kDivideByZero] = rt == 0;
+ u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
break;
case ADD:
if (HaveSameSign(rs, rt)) {
@@ -1003,8 +1827,9 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case SLTU:
alu_out = rs_u < rt_u ? 1 : 0;
break;
- // Break and trap instructions
+ // Break and trap instructions.
case BREAK:
+
do_interrupt = true;
break;
case TGE:
@@ -1025,6 +1850,15 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case TNE:
do_interrupt = rs != rt;
break;
+ case MOVN:
+ case MOVZ:
+ case MOVCI:
+ // No action taken on decode.
+ break;
+ case DIV:
+ case DIVU:
+ // div and divu never raise exceptions.
+ break;
default:
UNREACHABLE();
};
@@ -1034,43 +1868,130 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case MUL:
alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
break;
+ case CLZ:
+ alu_out = __builtin_clz(rs_u);
+ break;
default:
UNREACHABLE();
- }
+ };
+ break;
+ case SPECIAL3:
+ switch (instr->FunctionFieldRaw()) {
+ case INS: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
+ break;
+ }
+ case EXT: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rs_u & (mask << lsb)) >> lsb;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ };
break;
default:
UNREACHABLE();
};
+}
+
+
+void Simulator::DecodeTypeRegister(Instruction* instr) {
+ // Instruction fields.
+ const Opcode op = instr->OpcodeFieldRaw();
+ const int32_t rs_reg = instr->RsValue();
+ const int32_t rs = get_register(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->RtValue();
+ const int32_t rt = get_register(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->RdValue();
+
+ const int32_t fs_reg = instr->FsValue();
+ const int32_t ft_reg = instr->FtValue();
+ const int32_t fd_reg = instr->FdValue();
+ int64_t i64hilo = 0;
+ uint64_t u64hilo = 0;
+
+ // ALU output.
+ // It should not be used as is. Instructions using it should always
+ // initialize it first.
+ int32_t alu_out = 0x12345678;
+
+ // For break and trap instructions.
+ bool do_interrupt = false;
+
+ // For jr and jalr.
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc
+ int32_t next_pc = 0;
+
+ // Setup the variables if needed before executing the instruction.
+ ConfigureTypeRegister(instr,
+ alu_out,
+ i64hilo,
+ u64hilo,
+ next_pc,
+ do_interrupt);
// ---------- Raise exceptions triggered.
SignalExceptions();
- // ---------- Execution
+ // ---------- Execution.
switch (op) {
case COP1:
switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
+ case BC1: // Branch on coprocessor condition.
UNREACHABLE();
break;
+ case CFC1:
+ set_register(rt_reg, alu_out);
case MFC1:
- case MFHC1:
set_register(rt_reg, alu_out);
break;
+ case MFHC1:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case CTC1:
+ // At the moment only FCSR is supported.
+ ASSERT(fs_reg == kFCSRRegister);
+ FCSR_ = registers_[rt_reg];
+ break;
case MTC1:
- // We don't need to set the higher bits to 0, because MIPS ISA says
- // they are in an unpredictable state after executing MTC1.
FPUregisters_[fs_reg] = registers_[rt_reg];
- FPUregisters_[fs_reg+1] = Unpredictable;
break;
case MTHC1:
- // Here we need to keep the lower bits unchanged.
- FPUregisters_[fs_reg+1] = registers_[rt_reg];
+ UNIMPLEMENTED_MIPS();
break;
case S:
+ float f;
switch (instr->FunctionFieldRaw()) {
case CVT_D_S:
+ f = get_fpu_register_float(fs_reg);
+ set_fpu_register_double(fd_reg, static_cast<double>(f));
+ break;
case CVT_W_S:
case CVT_L_S:
+ case TRUNC_W_S:
+ case TRUNC_L_S:
+ case ROUND_W_S:
+ case ROUND_L_S:
+ case FLOOR_W_S:
+ case FLOOR_L_S:
+ case CEIL_W_S:
+ case CEIL_L_S:
case CVT_PS_S:
UNIMPLEMENTED_MIPS();
break;
@@ -1079,10 +2000,138 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
}
break;
case D:
+ double ft, fs;
+ uint32_t cc, fcsr_cc;
+ int64_t i64;
+ fs = get_fpu_register_double(fs_reg);
+ ft = get_fpu_register_double(ft_reg);
+ cc = instr->FCccValue();
+ fcsr_cc = get_fcsr_condition_bit(cc);
switch (instr->FunctionFieldRaw()) {
- case CVT_S_D:
- case CVT_W_D:
- case CVT_L_D:
+ case ADD_D:
+ set_fpu_register_double(fd_reg, fs + ft);
+ break;
+ case SUB_D:
+ set_fpu_register_double(fd_reg, fs - ft);
+ break;
+ case MUL_D:
+ set_fpu_register_double(fd_reg, fs * ft);
+ break;
+ case DIV_D:
+ set_fpu_register_double(fd_reg, fs / ft);
+ break;
+ case ABS_D:
+ set_fpu_register_double(fd_reg, fs < 0 ? -fs : fs);
+ break;
+ case MOV_D:
+ set_fpu_register_double(fd_reg, fs);
+ break;
+ case NEG_D:
+ set_fpu_register_double(fd_reg, -fs);
+ break;
+ case SQRT_D:
+ set_fpu_register_double(fd_reg, sqrt(fs));
+ break;
+ case C_UN_D:
+ set_fcsr_bit(fcsr_cc, isnan(fs) || isnan(ft));
+ break;
+ case C_EQ_D:
+ set_fcsr_bit(fcsr_cc, (fs == ft));
+ break;
+ case C_UEQ_D:
+ set_fcsr_bit(fcsr_cc, (fs == ft) || (isnan(fs) || isnan(ft)));
+ break;
+ case C_OLT_D:
+ set_fcsr_bit(fcsr_cc, (fs < ft));
+ break;
+ case C_ULT_D:
+ set_fcsr_bit(fcsr_cc, (fs < ft) || (isnan(fs) || isnan(ft)));
+ break;
+ case C_OLE_D:
+ set_fcsr_bit(fcsr_cc, (fs <= ft));
+ break;
+ case C_ULE_D:
+ set_fcsr_bit(fcsr_cc, (fs <= ft) || (isnan(fs) || isnan(ft)));
+ break;
+ case CVT_W_D: // Convert double to word.
+ // Rounding modes are not yet supported.
+ ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ case ROUND_W_D: // Round double to word.
+ {
+ double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case TRUNC_W_D: // Truncate double to word (round towards 0).
+ {
+ double rounded = trunc(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case FLOOR_W_D: // Round double to word towards negative infinity.
+ {
+ double rounded = floor(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case CEIL_W_D: // Round double to word towards positive infinity.
+ {
+ double rounded = ceil(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case CVT_S_D: // Convert double to float (single).
+ set_fpu_register_float(fd_reg, static_cast<float>(fs));
+ break;
+ case CVT_L_D: { // Mips32r2: Truncate double to 64-bit long-word.
+ double rounded = trunc(fs);
+ i64 = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ }
+ case TRUNC_L_D: { // Mips32r2 instruction.
+ double rounded = trunc(fs);
+ i64 = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ }
+ case ROUND_L_D: { // Mips32r2 instruction.
+ double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ i64 = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ }
+ case FLOOR_L_D: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(floor(fs));
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ case CEIL_L_D: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(ceil(fs));
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ case C_F_D:
UNIMPLEMENTED_MIPS();
break;
default:
@@ -1091,11 +2140,13 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case W:
switch (instr->FunctionFieldRaw()) {
- case CVT_S_W:
- UNIMPLEMENTED_MIPS();
+ case CVT_S_W: // Convert word to float (single).
+ alu_out = get_fpu_register(fs_reg);
+ set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
break;
case CVT_D_W: // Convert word to double.
- set_fpu_register(rd_reg, static_cast<double>(rs));
+ alu_out = get_fpu_register(fs_reg);
+ set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
break;
default:
UNREACHABLE();
@@ -1103,8 +2154,14 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case L:
switch (instr->FunctionFieldRaw()) {
+ case CVT_D_L: // Mips32r2 instruction.
+ // Watch the signs here, we want 2 32-bit vals
+ // to make a sign-64.
+ i64 = (uint32_t) get_fpu_register(fs_reg);
+ i64 |= ((int64_t) get_fpu_register(fs_reg + 1) << 32);
+ set_fpu_register_double(fd_reg, static_cast<double>(i64));
+ break;
case CVT_S_L:
- case CVT_D_L:
UNIMPLEMENTED_MIPS();
break;
default:
@@ -1121,7 +2178,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case JR: {
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
- current_pc+Instruction::kInstructionSize);
+ current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
set_pc(next_pc);
pc_modified_ = true;
@@ -1129,27 +2186,38 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
}
case JALR: {
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
- current_pc+Instruction::kInstructionSize);
+ current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
- set_register(31, current_pc + 2* Instruction::kInstructionSize);
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
set_pc(next_pc);
pc_modified_ = true;
break;
}
// Instructions using HI and LO registers.
case MULT:
+ set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+ break;
case MULTU:
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(u64hilo >> 32));
break;
case DIV:
- // Divide by zero was checked in the configuration step.
- set_register(LO, rs / rt);
- set_register(HI, rs % rt);
+ // Divide by zero was not checked in the configuration step - div and
+ // divu do not raise exceptions. On division by 0, the result will
+ // be UNPREDICTABLE.
+ if (rt != 0) {
+ set_register(LO, rs / rt);
+ set_register(HI, rs % rt);
+ }
break;
case DIVU:
- set_register(LO, rs_u / rt_u);
- set_register(HI, rs_u % rt_u);
+ if (rt_u != 0) {
+ set_register(LO, rs_u / rt_u);
+ set_register(HI, rs_u % rt_u);
+ }
break;
- // Break and trap instructions
+ // Break and trap instructions.
case BREAK:
case TGE:
case TGEU:
@@ -1161,6 +2229,23 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
SoftwareInterrupt(instr);
}
break;
+ // Conditional moves.
+ case MOVN:
+ if (rt) set_register(rd_reg, rs);
+ break;
+ case MOVCI: {
+ uint32_t cc = instr->FBccValue();
+ uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
+ if (instr->Bit(16)) { // Read Tf bit.
+ if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+ } else {
+ if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+ }
+ break;
+ }
+ case MOVZ:
+ if (!rt) set_register(rd_reg, rs);
+ break;
default: // For other special opcodes we do the default operation.
set_register(rd_reg, alu_out);
};
@@ -1173,9 +2258,23 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
set_register(LO, Unpredictable);
set_register(HI, Unpredictable);
break;
+ default: // For other special2 opcodes we do the default operation.
+ set_register(rd_reg, alu_out);
+ }
+ break;
+ case SPECIAL3:
+ switch (instr->FunctionFieldRaw()) {
+ case INS:
+ // Ins instr leaves result in Rt, rather than Rd.
+ set_register(rt_reg, alu_out);
+ break;
+ case EXT:
+ // Ext instr leaves result in Rt, rather than Rd.
+ set_register(rt_reg, alu_out);
+ break;
default:
UNREACHABLE();
- }
+ };
break;
// Unimplemented opcodes raised an error in the configuration step before,
// so we can use the default here to set the destination register in common
@@ -1185,22 +2284,22 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
};
}
-// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
+
+// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq).
void Simulator::DecodeTypeImmediate(Instruction* instr) {
- // Instruction fields
+ // Instruction fields.
Opcode op = instr->OpcodeFieldRaw();
- int32_t rs = get_register(instr->RsField());
+ int32_t rs = get_register(instr->RsValue());
uint32_t rs_u = static_cast<uint32_t>(rs);
- int32_t rt_reg = instr->RtField(); // destination register
+ int32_t rt_reg = instr->RtValue(); // Destination register.
int32_t rt = get_register(rt_reg);
- int16_t imm16 = instr->Imm16Field();
+ int16_t imm16 = instr->Imm16Value();
- int32_t ft_reg = instr->FtField(); // destination register
- int32_t ft = get_register(ft_reg);
+ int32_t ft_reg = instr->FtValue(); // Destination register.
- // zero extended immediate
+ // Zero extended immediate.
uint32_t oe_imm16 = 0xffff & imm16;
- // sign extended immediate
+ // Sign extended immediate.
int32_t se_imm16 = imm16;
// Get current pc.
@@ -1208,31 +2307,44 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Next pc.
int32_t next_pc = bad_ra;
- // Used for conditional branch instructions
+ // Used for conditional branch instructions.
bool do_branch = false;
bool execute_branch_delay_instruction = false;
- // Used for arithmetic instructions
+ // Used for arithmetic instructions.
int32_t alu_out = 0;
- // Floating point
+ // Floating point.
double fp_out = 0.0;
+ uint32_t cc, cc_value, fcsr_cc;
- // Used for memory instructions
+ // Used for memory instructions.
int32_t addr = 0x0;
+ // Value to be written in memory.
+ uint32_t mem_value = 0x0;
- // ---------- Configuration (and execution for REGIMM)
+ // ---------- Configuration (and execution for REGIMM).
switch (op) {
- // ------------- COP1. Coprocessor instructions
+ // ------------- COP1. Coprocessor instructions.
case COP1:
switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
- UNIMPLEMENTED_MIPS();
+ case BC1: // Branch on coprocessor condition.
+ cc = instr->FBccValue();
+ fcsr_cc = get_fcsr_condition_bit(cc);
+ cc_value = test_fcsr_bit(fcsr_cc);
+ do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
break;
default:
UNREACHABLE();
};
break;
- // ------------- REGIMM class
+ // ------------- REGIMM class.
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
@@ -1257,9 +2369,9 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case BGEZAL:
// Branch instructions common part.
execute_branch_delay_instruction = true;
- // Set next_pc
+ // Set next_pc.
if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
if (instr->IsLinkingInstruction()) {
set_register(31, current_pc + kBranchReturnOffset);
}
@@ -1269,8 +2381,8 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
default:
break;
};
- break; // case REGIMM
- // ------------- Branch instructions
+ break; // case REGIMM.
+ // ------------- Branch instructions.
// When comparing to zero, the encoding of rt field is always 0, so we don't
// need to replace rt with zero.
case BEQ:
@@ -1285,7 +2397,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case BGTZ:
do_branch = rs > 0;
break;
- // ------------- Arithmetic instructions
+ // ------------- Arithmetic instructions.
case ADDI:
if (HaveSameSign(rs, se_imm16)) {
if (rs > 0) {
@@ -1318,11 +2430,26 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case LUI:
alu_out = (oe_imm16 << 16);
break;
- // ------------- Memory instructions
+ // ------------- Memory instructions.
case LB:
addr = rs + se_imm16;
alu_out = ReadB(addr);
break;
+ case LH:
+ addr = rs + se_imm16;
+ alu_out = ReadH(addr, instr);
+ break;
+ case LWL: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = (1 << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = ReadW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
case LW:
addr = rs + se_imm16;
alu_out = ReadW(addr, instr);
@@ -1331,12 +2458,47 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
addr = rs + se_imm16;
alu_out = ReadBU(addr);
break;
+ case LHU:
+ addr = rs + se_imm16;
+ alu_out = ReadHU(addr, instr);
+ break;
+ case LWR: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = ReadW(addr, instr);
+ alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ break;
+ }
case SB:
addr = rs + se_imm16;
break;
+ case SH:
+ addr = rs + se_imm16;
+ break;
+ case SWL: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = ReadW(addr, instr) & mask;
+ mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
+ break;
+ }
case SW:
addr = rs + se_imm16;
break;
+ case SWR: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint32_t mask = (1 << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = ReadW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
case LWC1:
addr = rs + se_imm16;
alu_out = ReadW(addr, instr);
@@ -1356,26 +2518,26 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// ---------- Raise exceptions triggered.
SignalExceptions();
- // ---------- Execution
+ // ---------- Execution.
switch (op) {
- // ------------- Branch instructions
+ // ------------- Branch instructions.
case BEQ:
case BNE:
case BLEZ:
case BGTZ:
// Branch instructions common part.
execute_branch_delay_instruction = true;
- // Set next_pc
+ // Set next_pc.
if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2* Instruction::kInstructionSize);
+ set_register(31, current_pc + 2* Instruction::kInstrSize);
}
} else {
- next_pc = current_pc + 2 * Instruction::kInstructionSize;
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
}
break;
- // ------------- Arithmetic instructions
+ // ------------- Arithmetic instructions.
case ADDI:
case ADDIU:
case SLTI:
@@ -1386,18 +2548,31 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case LUI:
set_register(rt_reg, alu_out);
break;
- // ------------- Memory instructions
+ // ------------- Memory instructions.
case LB:
+ case LH:
+ case LWL:
case LW:
case LBU:
+ case LHU:
+ case LWR:
set_register(rt_reg, alu_out);
break;
case SB:
WriteB(addr, static_cast<int8_t>(rt));
break;
+ case SH:
+ WriteH(addr, static_cast<uint16_t>(rt), instr);
+ break;
+ case SWL:
+ WriteW(addr, mem_value, instr);
+ break;
case SW:
WriteW(addr, rt, instr);
break;
+ case SWR:
+ WriteW(addr, mem_value, instr);
+ break;
case LWC1:
set_fpu_register(ft_reg, alu_out);
break;
@@ -1410,7 +2585,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
case SDC1:
addr = rs + se_imm16;
- WriteD(addr, ft, instr);
+ WriteD(addr, get_fpu_register_double(ft_reg), instr);
break;
default:
break;
@@ -1422,7 +2597,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// We don't check for end_sim_pc. First it should not be met as the current
// pc is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+ reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
}
@@ -1432,42 +2607,47 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
}
}
-// Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
+
+// Type 3: instructions using a 26 bytes immediate. (eg: j, jal).
void Simulator::DecodeTypeJump(Instruction* instr) {
// Get current pc.
int32_t current_pc = get_pc();
// Get unchanged bits of pc.
int32_t pc_high_bits = current_pc & 0xf0000000;
- // Next pc
- int32_t next_pc = pc_high_bits | (instr->Imm26Field() << 2);
+ // Next pc.
+ int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
- // Execute branch delay slot
+ // Execute branch delay slot.
// We don't check for end_sim_pc. First it should not be met as the current pc
// is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+ reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
// Update pc and ra if necessary.
// Do this after the branch delay execution.
if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2* Instruction::kInstructionSize);
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
}
set_pc(next_pc);
pc_modified_ = true;
}
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
+ if (v8::internal::FLAG_check_icache) {
+ CheckICache(isolate_->simulator_i_cache(), instr);
+ }
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
+ // Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte_*>(instr));
- PrintF(" 0x%08x %s\n", instr, buffer.start());
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+ PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr),
+ buffer.start());
}
switch (instr->InstructionType()) {
@@ -1485,7 +2665,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
if (!pc_modified_) {
set_register(pc, reinterpret_cast<int32_t>(instr) +
- Instruction::kInstructionSize);
+ Instruction::kInstrSize);
}
}
@@ -1511,7 +2691,7 @@ void Simulator::Execute() {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
- Debugger dbg(this);
+ MipsDebugger dbg(this);
dbg.Debug();
} else {
InstructionDecode(instr);
@@ -1522,10 +2702,10 @@ void Simulator::Execute() {
}
-int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
va_list parameters;
va_start(parameters, argument_count);
- // Setup arguments
+ // Setup arguments.
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
@@ -1538,7 +2718,7 @@ int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- - kArgsSlotsSize);
+ - kCArgsSlotsSize);
if (OS::ActivationFrameAlignment() != 0) {
entry_stack &= -OS::ActivationFrameAlignment();
}
@@ -1550,7 +2730,7 @@ int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
va_end(parameters);
set_register(sp, entry_stack);
- // Prepare to execute the code at entry
+ // Prepare to execute the code at entry.
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
// when the PC reaches this value. By saving the "end simulation" value into
@@ -1586,7 +2766,7 @@ int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
set_register(gp, callee_saved_value);
set_register(fp, callee_saved_value);
- // Start the simulation
+ // Start the simulation.
Execute();
// Check that the callee-saved registers have been preserved.
@@ -1643,8 +2823,8 @@ uintptr_t Simulator::PopAddress() {
#undef UNSUPPORTED
-} } // namespace assembler::mips
+} } // namespace v8::internal
-#endif // !__mips || USE_SIMULATOR
+#endif // USE_SIMULATOR
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 6e42683a2..69dddfad3 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,19 +37,40 @@
#define V8_MIPS_SIMULATOR_MIPS_H_
#include "allocation.h"
+#include "constants-mips.h"
-#if defined(__mips) && !defined(USE_SIMULATOR)
+#if !defined(USE_SIMULATOR)
+// Running without a simulator on a native mips platform.
+
+namespace v8 {
+namespace internal {
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- entry(p0, p1, p2, p3, p4);
+ entry(p0, p1, p2, p3, p4)
+
+typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
+ void*, int*, Address, int, Isolate*);
+
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type arm_regexp_matcher.
+// The fifth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
+ p0, p1, p2, p3, NULL, p4, p5, p6, p7))
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+ uintptr_t c_limit) {
return c_limit;
}
@@ -60,6 +81,8 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline void UnregisterCTryCatch() { }
};
+} } // namespace v8::internal
+
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
// the address of "this" to get a value on the current execution stack and then
@@ -70,39 +93,51 @@ class SimulatorStack : public v8::internal::AllStatic {
(reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0)
-// Call the generated regexp code directly. The entry function pointer should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- entry(p0, p1, p2, p3, p4, p5, p6)
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
+#else // !defined(USE_SIMULATOR)
+// Running with a simulator.
+#include "hashmap.h"
+#include "assembler.h"
-#else // #if !defined(__mips) || defined(USE_SIMULATOR)
+namespace v8 {
+namespace internal {
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(\
- assembler::mips::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
- p0, p1, p2, p3, p4))
+// -----------------------------------------------------------------------------
+// Utility functions
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- assembler::mips::Simulator::current()->Call(\
- FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() {
+ memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
+ }
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+ char* ValidityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+ char* CachedData(int offset) {
+ return &data_[offset];
+ }
-namespace assembler {
-namespace mips {
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
class Simulator {
public:
- friend class Debugger;
+ friend class MipsDebugger;
// Registers are declared in order. See SMRL chapter 2.
enum Register {
@@ -119,7 +154,7 @@ class Simulator {
sp,
s8,
ra,
- // LO, HI, and pc
+ // LO, HI, and pc.
LO,
HI,
pc, // pc must be the last register.
@@ -132,29 +167,35 @@ class Simulator {
// Generated code will always use doubles. So we will only use even registers.
enum FPURegister {
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
- f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters
+ f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters.
f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
f26, f27, f28, f29, f30, f31,
kNumFPURegisters
};
- Simulator();
+ explicit Simulator(Isolate* isolate);
~Simulator();
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
- static Simulator* current();
+ static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the MIPS
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
- // Same for FPURegisters
+ // Same for FPURegisters.
void set_fpu_register(int fpureg, int32_t value);
+ void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
int32_t get_fpu_register(int fpureg) const;
+ int64_t get_fpu_register_long(int fpureg) const;
+ float get_fpu_register_float(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
+ void set_fcsr_bit(uint32_t cc, bool value);
+ bool test_fcsr_bit(uint32_t cc);
+ bool set_fcsr_round_error(double original, double rounded);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
@@ -167,12 +208,12 @@ class Simulator {
void Execute();
// Call on program start.
- static void Initialize();
+ static void Initialize(Isolate* isolate);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
- int32_t Call(byte_* entry, int argument_count, ...);
+ int32_t Call(byte* entry, int argument_count, ...);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -180,6 +221,14 @@ class Simulator {
// Pop an address from the JS stack.
uintptr_t PopAddress();
+ // ICache checking.
+ static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+ size_t size);
+
+ // Returns true if pc register contains one of the 'special_values' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
@@ -223,15 +272,35 @@ class Simulator {
inline int32_t SetDoubleHIW(double* addr);
inline int32_t SetDoubleLOW(double* addr);
-
// Executing is handled based on the instruction type.
void DecodeTypeRegister(Instruction* instr);
+
+ // Helper function for DecodeTypeRegister.
+ void ConfigureTypeRegister(Instruction* instr,
+ int32_t& alu_out,
+ int64_t& i64hilo,
+ uint64_t& u64hilo,
+ int32_t& next_pc,
+ bool& do_interrupt);
+
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
// Used for breakpoints and traps.
void SoftwareInterrupt(Instruction* instr);
+ // Stop helper functions.
+ bool IsWatchpoint(uint32_t code);
+ void PrintWatchpoint(uint32_t code);
+ void HandleStop(uint32_t code, Instruction* instr);
+ bool IsStopInstruction(Instruction* instr);
+ bool IsEnabledStop(uint32_t code);
+ void EnableStop(uint32_t code);
+ void DisableStop(uint32_t code);
+ void IncreaseStopCounter(uint32_t code);
+ void PrintStopInfo(uint32_t code);
+
+
// Executes one instruction.
void InstructionDecode(Instruction* instr);
// Execute one instruction placed in a branch delay slot.
@@ -239,11 +308,17 @@ class Simulator {
if (instr->IsForbiddenInBranchDelay()) {
V8_Fatal(__FILE__, __LINE__,
"Eror:Unexpected %i opcode in a branch delay slot.",
- instr->OpcodeField());
+ instr->OpcodeValue());
}
InstructionDecode(instr);
}
+ // ICache.
+ static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+ int size);
+ static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+
enum Exception {
none,
kIntegerOverflow,
@@ -258,30 +333,68 @@ class Simulator {
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
- bool fp_return);
+ ExternalReference::Type type);
+
+ // For use in calls that take double value arguments.
+ void GetFpArgs(double* x, double* y);
+ void GetFpArgs(double* x);
+ void GetFpArgs(double* x, int32_t* y);
+ void SetFpResult(const double& result);
- // Used for real time calls that takes two double values as arguments and
- // returns a double.
- void SetFpResult(double result);
// Architecture state.
// Registers.
int32_t registers_[kNumSimuRegisters];
// Coprocessor Registers.
int32_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
// Simulator support.
+ // Allocate 1MB for stack.
+ static const size_t stack_size_ = 1 * 1024*1024;
char* stack_;
bool pc_modified_;
int icount_;
- static bool initialized_;
+ int break_count_;
+
+ // Icache simulation.
+ v8::internal::HashMap* i_cache_;
+
+ v8::internal::Isolate* isolate_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1 << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watched_stops[code].count is unset.
+ // The value watched_stops[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count;
+ char* desc;
+ };
+ StopCountAndDesc watched_stops[kMaxStopCode + 1];
};
-} } // namespace assembler::mips
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+ FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ Simulator::current(Isolate::Current())->Call( \
+ entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
// The simulator has its own stack. Thus it has a different stack limit from
@@ -291,21 +404,22 @@ class Simulator {
// trouble down the line.
class SimulatorStack : public v8::internal::AllStatic {
public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return assembler::mips::Simulator::current()->StackLimit();
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- assembler::mips::Simulator* sim = assembler::mips::Simulator::current();
+ Simulator* sim = Simulator::current(Isolate::Current());
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
- assembler::mips::Simulator::current()->PopAddress();
+ Simulator::current(Isolate::Current())->PopAddress();
}
};
-#endif // !defined(__mips) || defined(USE_SIMULATOR)
+} } // namespace v8::internal
+#endif // !defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
-
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 683b8626e..3e5a0091c 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "stub-cache.h"
namespace v8 {
@@ -39,6 +39,124 @@ namespace internal {
#define __ ACCESS_MASM(masm)
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register name,
+ Register offset,
+ Register scratch,
+ Register scratch2) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+
+ uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
+ uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+
+ // Check the relative positions of the address fields.
+ ASSERT(value_off_addr > key_off_addr);
+ ASSERT((value_off_addr - key_off_addr) % 4 == 0);
+ ASSERT((value_off_addr - key_off_addr) < (256 * 4));
+
+ Label miss;
+ Register offsets_base_addr = scratch;
+
+ // Check that the key in the entry matches the name.
+ __ li(offsets_base_addr, Operand(key_offset));
+ __ sll(scratch2, offset, 1);
+ __ addu(scratch2, offsets_base_addr, scratch2);
+ __ lw(scratch2, MemOperand(scratch2));
+ __ Branch(&miss, ne, name, Operand(scratch2));
+
+ // Get the code entry from the cache.
+ __ Addu(offsets_base_addr, offsets_base_addr,
+ Operand(value_off_addr - key_off_addr));
+ __ sll(scratch2, offset, 1);
+ __ addu(scratch2, offsets_base_addr, scratch2);
+ __ lw(scratch2, MemOperand(scratch2));
+
+ // Check that the flags match what we're looking for.
+ __ lw(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
+ __ And(scratch2, scratch2, Operand(~Code::kFlagsNotUsedInLookup));
+ __ Branch(&miss, ne, scratch2, Operand(flags));
+
+ // Re-load code entry from cache.
+ __ sll(offset, offset, 1);
+ __ addu(offset, offset, offsets_base_addr);
+ __ lw(offset, MemOperand(offset));
+
+ // Jump to the first instruction in the code stub.
+ __ Addu(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(offset);
+
+ // Miss: fall through.
+ __ bind(&miss);
+}
+
+
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ String* name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(name->IsSymbol());
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(at, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+ __ Branch(miss_label, ne, at, Operand(zero_reg));
+
+
+ // Check that receiver is a JSObject.
+ __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ Register tmp = properties;
+ __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+ __ Branch(miss_label, ne, map, Operand(tmp));
+
+ // Restore the temporarily used register.
+ __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+ masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
+ if (result->IsFailure()) return result;
+
+ __ bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ return result;
+}
+
+
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
@@ -46,14 +164,96 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register scratch,
Register extra,
Register extra2) {
- UNIMPLEMENTED_MIPS();
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure that code is valid. The shifting code relies on the
+ // entry size being 8.
+ ASSERT(sizeof(Entry) == 8);
+
+ // Make sure the flags does not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!scratch.is(receiver));
+ ASSERT(!scratch.is(name));
+ ASSERT(!extra.is(receiver));
+ ASSERT(!extra.is(name));
+ ASSERT(!extra.is(scratch));
+ ASSERT(!extra2.is(receiver));
+ ASSERT(!extra2.is(name));
+ ASSERT(!extra2.is(scratch));
+ ASSERT(!extra2.is(extra));
+
+ // Check scratch, extra and extra2 registers are valid.
+ ASSERT(!scratch.is(no_reg));
+ ASSERT(!extra.is(no_reg));
+ ASSERT(!extra2.is(no_reg));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss, t0);
+
+ // Get the map of the receiver and compute the hash.
+ __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
+ __ lw(t8, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Addu(scratch, scratch, Operand(t8));
+ __ Xor(scratch, scratch, Operand(flags));
+ __ And(scratch,
+ scratch,
+ Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ Subu(scratch, scratch, Operand(name));
+ __ Addu(scratch, scratch, Operand(flags));
+ __ And(scratch,
+ scratch,
+ Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
}
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
- UNIMPLEMENTED_MIPS();
+ // Load the global or builtins object from the current context.
+ __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ __ lw(prototype,
+ FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
+ // Load the initial map. The global functions all have initial maps.
+ __ lw(prototype,
+ FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ Isolate* isolate = masm->isolate();
+ // Check we're still in the same context.
+ __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ ASSERT(!prototype.is(at));
+ __ li(at, isolate->global());
+ __ Branch(miss, ne, prototype, Operand(at));
+ // Get the global function with the given index.
+ JSFunction* function =
+ JSFunction::cast(isolate->global_context()->get(index));
+ // Load its initial map. The global functions all have initial maps.
+ __ li(prototype, Handle<Map>(function->initial_map()));
+ // Load the prototype from the initial map.
+ __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
}
@@ -63,7 +263,18 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst, Register src,
JSObject* holder, int index) {
- UNIMPLEMENTED_MIPS();
+ // Adjust for the number of properties stored in the holder.
+ index -= holder->map()->inobject_properties();
+ if (index < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
+ __ lw(dst, FieldMemOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ __ lw(dst, FieldMemOperand(dst, offset));
+ }
}
@@ -71,7 +282,76 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
Register scratch,
Label* miss_label) {
- UNIMPLEMENTED_MIPS();
+ // Check that the receiver isn't a smi.
+ __ And(scratch, receiver, Operand(kSmiTagMask));
+ __ Branch(miss_label, eq, scratch, Operand(zero_reg));
+
+ // Check that the object is a JS array.
+ __ GetObjectType(receiver, scratch, scratch);
+ __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+ // Load length directly from the JS array.
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Ret();
+}
+
+
+// Generate code to check if an object is a string. If the object is a
+// heap object, its map's instance type is left in the scratch1 register.
+// If this is not needed, scratch1 and scratch2 may be the same register.
+static void GenerateStringCheck(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* smi,
+ Label* non_string_object) {
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, smi, t0);
+
+ // Check that the object is a string.
+ __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ And(scratch2, scratch1, Operand(kIsNotStringMask));
+ // The cast is to resolve the overload for the argument of 0x0.
+ __ Branch(non_string_object,
+ ne,
+ scratch2,
+ Operand(static_cast<int32_t>(kStringTag)));
+}
+
+
+// Generate code to load the length from a string object and return the length.
+// If the receiver object is not a string or a wrapped string object the
+// execution continues at the miss label. The register containing the
+// receiver is potentially clobbered.
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss,
+ bool support_wrappers) {
+ Label check_wrapper;
+
+ // Check if the object is a string leaving the instance type in the
+ // scratch1 register.
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
+ support_wrappers ? &check_wrapper : miss);
+
+ // Load length directly from the string.
+ __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
+ __ Ret();
+
+ if (support_wrappers) {
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
+
+ // Unwrap the value and check if the wrapped value is a string.
+ __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
+ __ Ret();
+ }
}
@@ -80,11 +360,13 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Label* miss_label) {
- UNIMPLEMENTED_MIPS();
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ __ mov(v0, scratch1);
+ __ Ret();
}
-// Generate StoreField code, value is passed in r0 register.
+// Generate StoreField code, value is passed in a0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
@@ -95,12 +377,652 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Register name_reg,
Register scratch,
Label* miss_label) {
- UNIMPLEMENTED_MIPS();
+ // a0 : value.
+ Label exit;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver_reg, miss_label, scratch);
+
+ // Check that the map of the receiver hasn't changed.
+ __ lw(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ __ Branch(miss_label, ne, scratch, Operand(Handle<Map>(object->map())));
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ push(receiver_reg);
+ __ li(a2, Operand(Handle<Map>(transition)));
+ __ Push(a2, a0);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3, 1);
+ return;
+ }
+
+ if (transition != NULL) {
+ // Update the map of the object; no write barrier updating is
+ // needed because the map is never in new space.
+ __ li(t0, Operand(Handle<Map>(transition)));
+ __ sw(t0, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ }
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ sw(a0, FieldMemOperand(receiver_reg, offset));
+
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(a0, &exit, scratch);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array.
+ __ lw(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ sw(a0, FieldMemOperand(scratch, offset));
+
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(a0, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+ }
+
+ // Return the value (register v0).
+ __ bind(&exit);
+ __ mov(v0, a0);
+ __ Ret();
}
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+ Code* code = NULL;
+ if (kind == Code::LOAD_IC) {
+ code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
+ } else {
+ code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
+ }
+
+ Handle<Code> ic(code);
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
+static void GenerateCallFunction(MacroAssembler* masm,
+ Object* object,
+ const ParameterCount& arguments,
+ Label* miss,
+ Code::ExtraICState extra_ic_state) {
+ // ----------- S t a t e -------------
+ // -- a0: receiver
+ // -- a1: function to call
+ // -----------------------------------
+ // Check that the function really is a function.
+ __ JumpIfSmi(a1, miss);
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a3, MemOperand(sp, arguments.immediate() * kPointerSize));
+ }
+
+ // Invoke the function.
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(a1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ __ push(name);
+ InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+ Register scratch = name;
+ __ li(scratch, Operand(Handle<Object>(interceptor)));
+ __ Push(scratch, receiver, holder);
+ __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
+ __ push(scratch);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
+ masm->isolate());
+ __ li(a0, Operand(5));
+ __ li(a1, Operand(ref));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+}
+
+
+static const int kFastApiCallArguments = 3;
+
+
+// Reserves space for the extra arguments to FastHandleApiCall in the
+// caller's frame.
+//
+// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
+static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
+ Register scratch) {
+ ASSERT(Smi::FromInt(0) == 0);
+ for (int i = 0; i < kFastApiCallArguments; i++) {
+ __ push(zero_reg);
+ }
+}
+
+
+// Undoes the effects of ReserveSpaceForFastApiCall.
+static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
+ __ Drop(kFastApiCallArguments);
+}
+
+
+static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : holder (set by CheckPrototypes)
+ // -- sp[4] : callee js function
+ // -- sp[8] : call data
+ // -- sp[12] : last js argument
+ // -- ...
+ // -- sp[(argc + 3) * 4] : first js argument
+ // -- sp[(argc + 4) * 4] : receiver
+ // -----------------------------------
+ // Get the function and setup the context.
+ JSFunction* function = optimization.constant_function();
+ __ li(t1, Operand(Handle<JSFunction>(function)));
+ __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
+
+ // Pass the additional arguments FastHandleApiCall expects.
+ Object* call_data = optimization.api_call_info()->data();
+ Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
+ if (masm->isolate()->heap()->InNewSpace(call_data)) {
+ __ li(a0, api_call_info_handle);
+ __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
+ } else {
+ __ li(t2, Operand(Handle<Object>(call_data)));
+ }
+
+ // Store js function and call data.
+ __ sw(t1, MemOperand(sp, 1 * kPointerSize));
+ __ sw(t2, MemOperand(sp, 2 * kPointerSize));
+
+ // a2 points to call data as expected by Arguments
+ // (refer to layout above).
+ __ Addu(a2, sp, Operand(2 * kPointerSize));
+
+ Object* callback = optimization.api_call_info()->callback();
+ Address api_function_address = v8::ToCData<Address>(callback);
+ ApiFunction fun(api_function_address);
+
+ const int kApiStackSpace = 4;
+
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
+ // struct from the function (which is currently the case). This means we pass
+ // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn
+ // will handle setting up a0.
+
+ // a1 = v8::Arguments&
+ // Arguments is built at sp + 1 (sp is a reserved spot for ra).
+ __ Addu(a1, sp, kPointerSize);
+
+ // v8::Arguments::implicit_args = data
+ __ sw(a2, MemOperand(a1, 0 * kPointerSize));
+ // v8::Arguments::values = last argument
+ __ Addu(t0, a2, Operand(argc * kPointerSize));
+ __ sw(t0, MemOperand(a1, 1 * kPointerSize));
+ // v8::Arguments::length_ = argc
+ __ li(t0, Operand(argc));
+ __ sw(t0, MemOperand(a1, 2 * kPointerSize));
+ // v8::Arguments::is_construct_call = 0
+ __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
+
+ // Emitting a stub call may try to allocate (if the code is not
+ // already generated). Do not allow the assembler to perform a
+ // garbage collection but instead return the allocation failure
+ // object.
+ const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ ExternalReference ref =
+ ExternalReference(&fun,
+ ExternalReference::DIRECT_API_CALL,
+ masm->isolate());
+ return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+}
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+ CallInterceptorCompiler(StubCompiler* stub_compiler,
+ const ParameterCount& arguments,
+ Register name,
+ Code::ExtraICState extra_ic_state)
+ : stub_compiler_(stub_compiler),
+ arguments_(arguments),
+ name_(name),
+ extra_ic_state_(extra_ic_state) {}
+
+ MaybeObject* Compile(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ CallOptimization optimization(lookup);
+
+ if (optimization.is_constant_call()) {
+ return CompileCacheable(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ scratch3,
+ holder,
+ lookup,
+ name,
+ optimization,
+ miss);
+ } else {
+ CompileRegular(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ scratch3,
+ name,
+ holder,
+ miss);
+ return masm->isolate()->heap()->undefined_value();
+ }
+ }
+
+ private:
+ MaybeObject* CompileCacheable(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ JSObject* interceptor_holder,
+ LookupResult* lookup,
+ String* name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
+ ASSERT(optimization.is_constant_call());
+ ASSERT(!lookup->holder()->IsGlobalObject());
+
+ Counters* counters = masm->isolate()->counters();
+
+ int depth1 = kInvalidProtoDepth;
+ int depth2 = kInvalidProtoDepth;
+ bool can_do_fast_api_call = false;
+ if (optimization.is_simple_api_call() &&
+ !lookup->holder()->IsGlobalObject()) {
+ depth1 =
+ optimization.GetPrototypeDepthOfExpectedType(object,
+ interceptor_holder);
+ if (depth1 == kInvalidProtoDepth) {
+ depth2 =
+ optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+ lookup->holder());
+ }
+ can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+ (depth2 != kInvalidProtoDepth);
+ }
+
+ __ IncrementCounter(counters->call_const_interceptor(), 1,
+ scratch1, scratch2);
+
+ if (can_do_fast_api_call) {
+ __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
+ scratch1, scratch2);
+ ReserveSpaceForFastApiCall(masm, scratch1);
+ }
+
+ // Check that the maps from receiver to interceptor's holder
+ // haven't changed and thus we can invoke interceptor.
+ Label miss_cleanup;
+ Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
+ Register holder =
+ stub_compiler_->CheckPrototypes(object, receiver,
+ interceptor_holder, scratch1,
+ scratch2, scratch3, name, depth1, miss);
+
+ // Invoke an interceptor and if it provides a value,
+ // branch to |regular_invoke|.
+ Label regular_invoke;
+ LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
+ &regular_invoke);
+
+ // Interceptor returned nothing for this property. Try to use cached
+ // constant function.
+
+ // Check that the maps from interceptor's holder to constant function's
+ // holder haven't changed and thus we can use cached constant function.
+ if (interceptor_holder != lookup->holder()) {
+ stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
+ lookup->holder(), scratch1,
+ scratch2, scratch3, name, depth2, miss);
+ } else {
+ // CheckPrototypes has a side effect of fetching a 'holder'
+ // for API (object which is instanceof for the signature). It's
+ // safe to omit it here, as if present, it should be fetched
+ // by the previous CheckPrototypes.
+ ASSERT(depth2 == kInvalidProtoDepth);
+ }
+
+ // Invoke function.
+ if (can_do_fast_api_call) {
+ MaybeObject* result = GenerateFastApiDirectCall(masm,
+ optimization,
+ arguments_.immediate());
+ if (result->IsFailure()) return result;
+ } else {
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(optimization.constant_function(), arguments_,
+ JUMP_FUNCTION, call_kind);
+ }
+
+ // Deferred code for fast API call case---clean preallocated space.
+ if (can_do_fast_api_call) {
+ __ bind(&miss_cleanup);
+ FreeSpaceForFastApiCall(masm);
+ __ Branch(miss_label);
+ }
+
+ // Invoke a regular function.
+ __ bind(&regular_invoke);
+ if (can_do_fast_api_call) {
+ FreeSpaceForFastApiCall(masm);
+ }
+
+ return masm->isolate()->heap()->undefined_value();
+ }
+
+ void CompileRegular(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ String* name,
+ JSObject* interceptor_holder,
+ Label* miss_label) {
+ Register holder =
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3, name,
+ miss_label);
+
+ // Call a runtime function to load the interceptor property.
+ __ EnterInternalFrame();
+ // Save the name_ register across the call.
+ __ push(name_);
+
+ PushInterceptorArguments(masm,
+ receiver,
+ holder,
+ name_,
+ interceptor_holder);
+
+ __ CallExternalReference(
+ ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
+ masm->isolate()),
+ 5);
+
+ // Restore the name_ register.
+ __ pop(name_);
+ __ LeaveInternalFrame();
+ }
+
+ void LoadWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ JSObject* holder_obj,
+ Register scratch,
+ Label* interceptor_succeeded) {
+ __ EnterInternalFrame();
+
+ __ Push(holder, name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ __ LeaveInternalFrame();
+
+ // If interceptor returns no-result sentinel, call the constant function.
+ __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
+ }
+
+ StubCompiler* stub_compiler_;
+ const ParameterCount& arguments_;
+ Register name_;
+ Code::ExtraICState extra_ic_state_;
+};
+
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
+ MacroAssembler* masm,
+ GlobalObject* global,
+ String* name,
+ Register scratch,
+ Label* miss) {
+ Object* probe;
+ { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
+ if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+ }
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+ ASSERT(cell->value()->IsTheHole());
+ __ li(scratch, Operand(Handle<Object>(cell)));
+ __ lw(scratch,
+ FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(miss, ne, scratch, Operand(at));
+ return cell;
+}
+
+
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ Register scratch,
+ Label* miss) {
+ JSObject* current = object;
+ while (current != holder) {
+ if (current->IsGlobalObject()) {
+ // Returns a cell or a failure.
+ MaybeObject* result = GenerateCheckPropertyCell(
+ masm,
+ GlobalObject::cast(current),
+ name,
+ scratch,
+ miss);
+ if (result->IsFailure()) return result;
+ }
+ ASSERT(current->IsJSObject());
+ current = JSObject::cast(current->GetPrototype());
+ }
+ return NULL;
+}
+
+
+// Convert and store int passed in register ival to IEEE 754 single precision
+// floating point value at memory location (dst + 4 * wordoffset)
+// If FPU is available use it for conversion.
+static void StoreIntAsFloat(MacroAssembler* masm,
+ Register dst,
+ Register wordoffset,
+ Register ival,
+ Register fval,
+ Register scratch1,
+ Register scratch2) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(ival, f0);
+ __ cvt_s_w(f0, f0);
+ __ sll(scratch1, wordoffset, 2);
+ __ addu(scratch1, dst, scratch1);
+ __ swc1(f0, MemOperand(scratch1, 0));
+ } else {
+ // FPU is not available, do manual conversions.
+
+ Label not_special, done;
+ // Move sign bit from source to destination. This works because the sign
+ // bit in the exponent word of the double has the same position and polarity
+ // as the 2's complement sign bit in a Smi.
+ ASSERT(kBinary32SignMask == 0x80000000u);
+
+ __ And(fval, ival, Operand(kBinary32SignMask));
+ // Negate value if it is negative.
+ __ subu(scratch1, zero_reg, ival);
+ __ movn(ival, scratch1, fval);
+
+ // We have -1, 0 or 1, which we treat specially. Register ival contains
+ // absolute value: it is either equal to 1 (special case of -1 and 1),
+ // greater than 1 (not a special case) or less than 1 (special case of 0).
+ __ Branch(&not_special, gt, ival, Operand(1));
+
+ // For 1 or -1 we need to or in the 0 exponent (biased).
+ static const uint32_t exponent_word_for_1 =
+ kBinary32ExponentBias << kBinary32ExponentShift;
+
+ __ Xor(scratch1, ival, Operand(1));
+ __ li(scratch2, exponent_word_for_1);
+ __ or_(scratch2, fval, scratch2);
+ __ movz(fval, scratch2, scratch1); // Only if ival is equal to 1.
+ __ Branch(&done);
+
+ __ bind(&not_special);
+ // Count leading zeros.
+ // Gets the wrong answer for 0, but we already checked for that case above.
+ Register zeros = scratch2;
+ __ clz(zeros, ival);
+
+ // Compute exponent and or it into the exponent register.
+ __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
+ __ subu(scratch1, scratch1, zeros);
+
+ __ sll(scratch1, scratch1, kBinary32ExponentShift);
+ __ or_(fval, fval, scratch1);
+
+ // Shift up the source chopping the top bit off.
+ __ Addu(zeros, zeros, Operand(1));
+ // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
+ __ sllv(ival, ival, zeros);
+ // And the top (top 20 bits).
+ __ srl(scratch1, ival, kBitsPerInt - kBinary32MantissaBits);
+ __ or_(fval, fval, scratch1);
+
+ __ bind(&done);
+
+ __ sll(scratch1, wordoffset, 2);
+ __ addu(scratch1, dst, scratch1);
+ __ sw(fval, MemOperand(scratch1, 0));
+ }
+}
+
+
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+ Register hiword,
+ Register loword,
+ Register scratch,
+ int leading_zeroes) {
+ const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+ const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+ const int mantissa_shift_for_hi_word =
+ meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+
+ const int mantissa_shift_for_lo_word =
+ kBitsPerInt - mantissa_shift_for_hi_word;
+
+ __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
+ if (mantissa_shift_for_hi_word > 0) {
+ __ sll(loword, hiword, mantissa_shift_for_lo_word);
+ __ srl(hiword, hiword, mantissa_shift_for_hi_word);
+ __ or_(hiword, scratch, hiword);
+ } else {
+ __ mov(loword, zero_reg);
+ __ sll(hiword, hiword, mantissa_shift_for_hi_word);
+ __ or_(hiword, scratch, hiword);
+ }
+
+ // If least significant bit of biased exponent was not 1 it was corrupted
+ // by most significant bit of mantissa so we should fix that.
+ if (!(biased_exponent & 1)) {
+ __ li(scratch, 1 << HeapNumber::kExponentShift);
+ __ nor(scratch, scratch, scratch);
+ __ and_(hiword, hiword, scratch);
+ }
}
@@ -108,15 +1030,163 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
#define __ ACCESS_MASM(masm())
+Register StubCompiler::CheckPrototypes(JSObject* object,
+ Register object_reg,
+ JSObject* holder,
+ Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ String* name,
+ int save_at_depth,
+ Label* miss) {
+ // Make sure there's no overlap between holder and object registers.
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+ && !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ if (save_at_depth == depth) {
+ __ sw(reg, MemOperand(sp));
+ }
+
+ // Check the maps in the prototype chain.
+ // Traverse the prototype chain from the object and do map checks.
+ JSObject* current = object;
+ while (current != holder) {
+ depth++;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+ ASSERT(current->GetPrototype()->IsJSObject());
+ JSObject* prototype = JSObject::cast(current->GetPrototype());
+ if (!current->HasFastProperties() &&
+ !current->IsJSGlobalObject() &&
+ !current->IsJSGlobalProxy()) {
+ if (!name->IsSymbol()) {
+ MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
+ Object* lookup_result = NULL; // Initialization to please compiler.
+ if (!maybe_lookup_result->ToObject(&lookup_result)) {
+ set_failure(Failure::cast(maybe_lookup_result));
+ return reg;
+ }
+ name = String::cast(lookup_result);
+ }
+ ASSERT(current->property_dictionary()->FindEntry(name) ==
+ StringDictionary::kNotFound);
+
+ MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch1,
+ scratch2);
+ if (negative_lookup->IsFailure()) {
+ set_failure(Failure::cast(negative_lookup));
+ return reg;
+ }
+
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now the object is in holder_reg.
+ __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else if (heap()->InNewSpace(prototype)) {
+ // Get the map of the current object.
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ // Branch on the result of the map check.
+ __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ // Restore scratch register to be the map of the object. In the
+ // new space case below, we load the prototype from the map in
+ // the scratch register.
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ reg = holder_reg; // From now the object is in holder_reg.
+ // The prototype is in new space; we cannot store a reference
+ // to it in the code. Load it from the map.
+ __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // Check the map of the current object.
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ // Branch on the result of the map check.
+ __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
+ // The prototype is in old space; load it directly.
+ reg = holder_reg; // From now the object is in holder_reg.
+ __ li(reg, Operand(Handle<JSObject>(prototype)));
+ }
+
+ if (save_at_depth == depth) {
+ __ sw(reg, MemOperand(sp));
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ }
+
+ // Check the holder map.
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+
+ // Log the check depth.
+ LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+ // Perform security check for access to the global object.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+ if (holder->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ };
+
+ // If we've skipped any global objects, it's not enough to verify
+ // that their maps haven't changed. We also need to check that the
+ // property cell for the property is still empty.
+
+ MaybeObject* result = GenerateCheckPropertyCells(masm(),
+ object,
+ holder,
+ name,
+ scratch1,
+ miss);
+ if (result->IsFailure()) set_failure(Failure::cast(result));
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
void StubCompiler::GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
int index,
String* name,
Label* miss) {
- UNIMPLEMENTED_MIPS();
+ // Check that the receiver isn't a smi.
+ __ And(scratch1, receiver, Operand(kSmiTagMask));
+ __ Branch(miss, eq, scratch1, Operand(zero_reg));
+
+ // Check that the maps haven't changed.
+ Register reg =
+ CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+ name, miss);
+ GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
+ __ Ret();
}
@@ -125,289 +1195,3048 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Object* value,
String* name,
Label* miss) {
- UNIMPLEMENTED_MIPS();
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss, scratch1);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, scratch3, name, miss);
+
+ // Return the constant value.
+ __ li(v0, Operand(Handle<Object>(value)));
+ __ Ret();
}
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- AccessorInfo* callback,
- String* name,
- Label* miss,
- Failure** failure) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x470);
- return false; // UNIMPLEMENTED RETURN
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ AccessorInfo* callback,
+ String* name,
+ Label* miss) {
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss, scratch1);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+ name, miss);
+
+ // Build AccessorInfo::args_ list on the stack and push property name below
+ // the exit frame to make GC aware of them and store pointers to them.
+ __ push(receiver);
+ __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
+ Handle<AccessorInfo> callback_handle(callback);
+ if (heap()->InNewSpace(callback_handle->data())) {
+ __ li(scratch3, callback_handle);
+ __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
+ } else {
+ __ li(scratch3, Handle<Object>(callback_handle->data()));
+ }
+ __ Push(reg, scratch3, name_reg);
+ __ mov(a2, scratch2); // Saved in case scratch2 == a1.
+ __ mov(a1, sp); // a1 (first argument - see note below) = Handle<String>
+
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+
+ // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
+ // struct from the function (which is currently the case). This means we pass
+ // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
+ // will handle setting up a0.
+
+ const int kApiStackSpace = 1;
+
+ __ EnterExitFrame(false, kApiStackSpace);
+ // Create AccessorInfo instance on the stack above the exit frame with
+ // scratch2 (internal::Object **args_) as the data.
+ __ sw(a2, MemOperand(sp, kPointerSize));
+ // a2 (second argument - see note above) = AccessorInfo&
+ __ Addu(a2, sp, kPointerSize);
+
+ // Emitting a stub call may try to allocate (if the code is not
+ // already generated). Do not allow the assembler to perform a
+ // garbage collection but instead return the allocation failure
+ // object.
+ ExternalReference ref =
+ ExternalReference(&fun,
+ ExternalReference::DIRECT_GETTER_CALL,
+ masm()->isolate());
+ // 4 args - will be freed later by LeaveExitFrame.
+ return masm()->TryCallApiFunctionAndReturn(ref, 4);
}
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* holder,
+ JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
Label* miss) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x505);
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ bool compile_followup_inline = false;
+ if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->type() == FIELD) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsAccessorInfo() &&
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+ compile_followup_inline = true;
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, miss);
+ ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ Push(receiver, holder_reg, name_reg);
+ } else {
+ __ Push(holder_reg, name_reg);
+ }
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method).
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+ __ LeaveInternalFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ // Check that the maps from interceptor's holder to lookup's holder
+ // haven't changed. And load lookup's holder into |holder| register.
+ if (interceptor_holder != lookup->holder()) {
+ holder_reg = CheckPrototypes(interceptor_holder,
+ holder_reg,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ scratch3,
+ name,
+ miss);
+ }
+
+ if (lookup->type() == FIELD) {
+ // We found FIELD property in prototype chain of interceptor's holder.
+ // Retrieve a field from field's holder.
+ GenerateFastPropertyLoad(masm(), v0, holder_reg,
+ lookup->holder(), lookup->GetFieldIndex());
+ __ Ret();
+ } else {
+ // We found CALLBACKS property in prototype chain of interceptor's
+ // holder.
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ // Tail call to runtime.
+ // Important invariant in CALLBACKS case: the code above must be
+ // structured to never clobber |receiver| register.
+ __ li(scratch2, Handle<AccessorInfo>(callback));
+ // holder_reg is either receiver or scratch1.
+ if (!receiver.is(holder_reg)) {
+ ASSERT(scratch1.is(holder_reg));
+ __ Push(receiver, holder_reg);
+ __ lw(scratch3,
+ FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+ __ Push(scratch3, scratch2, name_reg);
+ } else {
+ __ push(receiver);
+ __ lw(scratch3,
+ FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+ __ Push(holder_reg, scratch3, scratch2, name_reg);
+ }
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 5, 1);
+ }
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, miss);
+ PushInterceptorArguments(masm(), receiver, holder_reg,
+ name_reg, interceptor_holder);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
+ __ TailCallExternalReference(ref, 5, 1);
+ }
+}
+
+
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+ if (kind_ == Code::KEYED_CALL_IC) {
+ __ Branch(miss, ne, a2, Operand(Handle<String>(name)));
+ }
}
-Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
- // Registers:
- // a1: function
- // ra: return address
+void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
+ JSObject* holder,
+ String* name,
+ Label* miss) {
+ ASSERT(holder->IsGlobalObject());
+
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
- // Enter an internal frame.
- __ EnterInternalFrame();
- // Preserve the function.
- __ Push(a1);
- // Setup aligned call.
- __ SetupAlignedCall(t0, 1);
- // Push the function on the stack as the argument to the runtime function.
- __ Push(a1);
- // Call the runtime function
- __ CallRuntime(Runtime::kLazyCompile, 1);
- __ ReturnFromAlignedCall();
- // Calculate the entry point.
- __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
- // Restore saved function.
- __ Pop(a1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
- // Do a tail-call of the compiled function.
- __ Jump(t9);
+ // Get the receiver from the stack.
+ __ lw(a0, MemOperand(sp, argc * kPointerSize));
- return GetCodeWithFlags(flags, "LazyCompileStub");
+ // If the object is the holder then we know that it's a global
+ // object which can only happen for contextual calls. In this case,
+ // the receiver cannot be a smi.
+ if (object != holder) {
+ __ JumpIfSmi(a0, miss);
+ }
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
}
-Object* CallStubCompiler::CompileCallField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ Label* miss) {
+ // Get the value from the cell.
+ __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+
+ // Check that the cell contains the same function.
+ if (heap()->InNewSpace(function)) {
+ // We can't embed a pointer to a function in new space so we have
+ // to verify that the shared function info is unchanged. This has
+ // the nice side effect that multiple closures based on the same
+ // function can all use this call IC. Before we load through the
+ // function, we have to verify that it still is a function.
+ __ JumpIfSmi(a1, miss);
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+ // Check the shared function info. Make sure it hasn't changed.
+ __ li(a3, Handle<SharedFunctionInfo>(function->shared()));
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Branch(miss, ne, t0, Operand(a3));
+ } else {
+ __ Branch(miss, ne, a1, Operand(Handle<JSFunction>(function)));
+ }
}
-Object* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::GenerateMissBranch() {
+ MaybeObject* maybe_obj =
+ isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+ kind_,
+ extra_ic_state_);
+ Object* obj;
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
+ return obj;
}
-Object* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
+ JSObject* holder,
+ int index,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ GenerateNameCheck(name, &miss);
+
+ const int argc = arguments().immediate();
+
+ // Get the receiver of the function from the stack into a0.
+ __ lw(a0, MemOperand(sp, argc * kPointerSize));
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(a0, &miss, t0);
+
+ // Do the right check and compute the holder register.
+ Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
+ GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
+
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(FIELD, name);
}
-Object* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // If object is not an array, bail out to regular call.
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+
+ Label miss;
+
+ GenerateNameCheck(name, &miss);
+
+ Register receiver = a1;
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(JSObject::cast(object), receiver,
+ holder, a3, v0, t0, name, &miss);
+
+ if (argc == 0) {
+ // Nothing to do, just return the length.
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Drop(argc + 1);
+ __ Ret();
+ } else {
+ Label call_builtin;
+
+ Register elements = a3;
+ Register end_elements = t1;
+
+ // Get the elements array of the object.
+ __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ v0,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+
+ if (argc == 1) { // Otherwise fall through to call the builtin.
+ Label exit, with_write_barrier, attempt_to_grow_elements;
+
+ // Get the array's length into v0 and calculate new length.
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
+
+ // Get the element's length.
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
+
+ // Save new length.
+ __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Push the element.
+ __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(end_elements, elements, end_elements);
+ const int kEndElementsOffset =
+ FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
+ __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
+ __ Addu(end_elements, end_elements, kPointerSize);
+
+ // Check for a smi.
+ __ JumpIfNotSmi(t0, &with_write_barrier);
+ __ bind(&exit);
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&with_write_barrier);
+ __ InNewSpace(elements, t0, eq, &exit);
+ __ RecordWriteHelper(elements, end_elements, t0);
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&attempt_to_grow_elements);
+ // v0: array's length + 1.
+ // t0: elements' length.
+
+ if (!FLAG_inline_new) {
+ __ Branch(&call_builtin);
+ }
+
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(
+ masm()->isolate());
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(
+ masm()->isolate());
+
+ const int kAllocationDelta = 4;
+ // Load top and check if it is the end of elements.
+ __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(end_elements, elements, end_elements);
+ __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
+ __ li(t3, Operand(new_space_allocation_top));
+ __ lw(t2, MemOperand(t3));
+ __ Branch(&call_builtin, ne, end_elements, Operand(t2));
+
+ __ li(t5, Operand(new_space_allocation_limit));
+ __ lw(t5, MemOperand(t5));
+ __ Addu(t2, t2, Operand(kAllocationDelta * kPointerSize));
+ __ Branch(&call_builtin, hi, t2, Operand(t5));
+
+ // We fit and could grow elements.
+ // Update new_space_allocation_top.
+ __ sw(t2, MemOperand(t3));
+ // Push the argument.
+ __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ sw(t2, MemOperand(end_elements));
+ // Fill the rest with holes.
+ __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < kAllocationDelta; i++) {
+ __ sw(t2, MemOperand(end_elements, i * kPointerSize));
+ }
+
+ // Update elements' and array's sizes.
+ __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
+ __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Elements are in new space, so write barrier is not required.
+ __ Drop(argc + 1);
+ __ Ret();
+ }
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
+ masm()->isolate()),
+ argc + 1,
+ 1);
+ }
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
}
-Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x782);
- return GetCode(INTERCEPTOR, name);
+MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // If object is not an array, bail out to regular call.
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+
+ Label miss, return_undefined, call_builtin;
+
+ Register receiver = a1;
+ Register elements = a3;
+
+ GenerateNameCheck(name, &miss);
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(JSObject::cast(object),
+ receiver, holder, elements, t0, v0, name, &miss);
+
+ // Get the elements array of the object.
+ __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ v0,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+
+ // Get the array's length into t0 and calculate new length.
+ __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Subu(t0, t0, Operand(Smi::FromInt(1)));
+ __ Branch(&return_undefined, lt, t0, Operand(zero_reg));
+
+ // Get the last element.
+ __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ // We can't address the last element in one operation. Compute the more
+ // expensive shift first, and use an offset later on.
+ __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(elements, elements, t1);
+ __ lw(v0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Branch(&call_builtin, eq, v0, Operand(t2));
+
+ // Set the array's length.
+ __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Fill with the hole.
+ __ sw(t2, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&return_undefined);
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
+ masm()->isolate()),
+ argc + 1,
+ 1);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
}
-Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : function name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label name_miss;
+ Label index_out_of_range;
+
+ Label* index_out_of_range_label = &index_out_of_range;
+
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ DEFAULT_STRING_STUB)) {
+ index_out_of_range_label = &miss;
+ }
+
+ GenerateNameCheck(name, &name_miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ v0,
+ &miss);
+ ASSERT(object != holder);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
+ a1, a3, t0, name, &miss);
+
+ Register receiver = a1;
+ Register index = t1;
+ Register scratch = a3;
+ Register result = v0;
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+ if (argc > 0) {
+ __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+
+ StringCharCodeAtGenerator char_code_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ char_code_at_generator.GenerateFast(masm());
+ __ Drop(argc + 1);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+ if (index_out_of_range.is_linked()) {
+ __ bind(&index_out_of_range);
+ __ LoadRoot(v0, Heap::kNanValueRootIndex);
+ __ Drop(argc + 1);
+ __ Ret();
+ }
+
+ __ bind(&miss);
+ // Restore function name in a2.
+ __ li(a2, Handle<String>(name));
+ __ bind(&name_miss);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
}
-Object* StoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileStringCharAtCall(
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : function name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label name_miss;
+ Label index_out_of_range;
+ Label* index_out_of_range_label = &index_out_of_range;
+
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ DEFAULT_STRING_STUB)) {
+ index_out_of_range_label = &miss;
+ }
+
+ GenerateNameCheck(name, &name_miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ v0,
+ &miss);
+ ASSERT(object != holder);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
+ a1, a3, t0, name, &miss);
+
+ Register receiver = v0;
+ Register index = t1;
+ Register scratch1 = a1;
+ Register scratch2 = a3;
+ Register result = v0;
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+ if (argc > 0) {
+ __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ char_at_generator.GenerateFast(masm());
+ __ Drop(argc + 1);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm(), call_helper);
+
+ if (index_out_of_range.is_linked()) {
+ __ bind(&index_out_of_range);
+ __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+ __ Drop(argc + 1);
+ __ Ret();
+ }
+
+ __ bind(&miss);
+ // Restore function name in a2.
+ __ li(a2, Handle<String>(name));
+ __ bind(&name_miss);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
}
-Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x906);
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : function name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(a1, &miss);
+
+ CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the char code argument.
+ Register code = a1;
+ __ lw(code, MemOperand(sp, 0 * kPointerSize));
+
+ // Check the code is a smi.
+ Label slow;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(code, &slow);
+
+ // Convert the smi code to uint16.
+ __ And(code, code, Operand(Smi::FromInt(0xffff)));
+
+ StringCharFromCodeGenerator char_from_code_generator(code, v0);
+ char_from_code_generator.GenerateFast(masm());
+ __ Drop(argc + 1);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_from_code_generator.GenerateSlow(masm(), call_helper);
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+
+ __ bind(&miss);
+ // a2: function name.
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
-Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : function name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ if (!CpuFeatures::IsSupported(FPU))
+ return heap()->undefined_value();
+ CpuFeatures::Scope scope_fpu(FPU);
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+ Label miss, slow;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(a1, &miss);
+
+ CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into v0.
+ __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+ // If the argument is a smi, just return.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ And(t0, v0, Operand(kSmiTagMask));
+ __ Drop(argc + 1, eq, t0, Operand(zero_reg));
+ __ Ret(eq, t0, Operand(zero_reg));
+
+ __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
+
+ Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
+
+ // If fpu is enabled, we use the floor instruction.
+
+ // Load the HeapNumber value.
+ __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+
+ // Backup FCSR.
+ __ cfc1(a3, FCSR);
+ // Clearing FCSR clears the exception mask with no side-effects.
+ __ ctc1(zero_reg, FCSR);
+ // Convert the argument to an integer.
+ __ floor_w_d(f0, f0);
+
+ // Start checking for special cases.
+ // Get the argument exponent and clear the sign bit.
+ __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
+ __ And(t2, t1, Operand(~HeapNumber::kSignMask));
+ __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
+
+ // Retrieve FCSR and check for fpu errors.
+ __ cfc1(t5, FCSR);
+ __ And(t5, t5, Operand(kFCSRExceptionFlagMask));
+ __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
+
+ // Check for NaN, Infinity, and -Infinity.
+ // They are invariant through a Math.Floor call, so just
+ // return the original argument.
+ __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
+ >> HeapNumber::kMantissaBitsInTopWord));
+ __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
+ // We had an overflow or underflow in the conversion. Check if we
+ // have a big exponent.
+ // If greater or equal, the argument is already round and in v0.
+ __ Branch(&restore_fcsr_and_return, ge, t3,
+ Operand(HeapNumber::kMantissaBits));
+ __ Branch(&wont_fit_smi);
+
+ __ bind(&no_fpu_error);
+ // Move the result back to v0.
+ __ mfc1(v0, f0);
+ // Check if the result fits into a smi.
+ __ Addu(a1, v0, Operand(0x40000000));
+ __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
+ // Tag the result.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ sll(v0, v0, kSmiTagSize);
+
+ // Check for -0.
+ __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
+ // t1 already holds the HeapNumber exponent.
+ __ And(t0, t1, Operand(HeapNumber::kSignMask));
+ // If our HeapNumber is negative it was -0, so load its address and return.
+ // Else v0 is loaded with 0, so we can also just return.
+ __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
+ __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+ __ bind(&restore_fcsr_and_return);
+ // Restore FCSR and return.
+ __ ctc1(a3, FCSR);
+
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&wont_fit_smi);
+ // Restore FCSR and fall to slow case.
+ __ ctc1(a3, FCSR);
+
+ __ bind(&slow);
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+
+ __ bind(&miss);
+ // a2: function name.
+ MaybeObject* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
-Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : function name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(a1, &miss);
+
+ CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into v0.
+ __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(v0, &not_smi);
+
+ // Do bitwise not or do nothing depending on the sign of the
+ // argument.
+ __ sra(t0, v0, kBitsPerInt - 1);
+ __ Xor(a1, v0, t0);
+
+ // Add 1 or do nothing depending on the sign of the argument.
+ __ Subu(v0, a1, t0);
+
+ // If the result is still negative, go to the slow case.
+ // This only happens for the most negative smi.
+ Label slow;
+ __ Branch(&slow, lt, v0, Operand(zero_reg));
+
+ // Smi case done.
+ __ Drop(argc + 1);
+ __ Ret();
+
+ // Check if the argument is a heap number and load its exponent and
+ // sign.
+ __ bind(&not_smi);
+ __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
+ __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+
+ // Check the sign of the argument. If the argument is positive,
+ // just return it.
+ Label negative_sign;
+ __ And(t0, a1, Operand(HeapNumber::kSignMask));
+ __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
+ __ Drop(argc + 1);
+ __ Ret();
+
+ // If the argument is negative, clear the sign, and return a new
+ // number.
+ __ bind(&negative_sign);
+ __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
+ __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
+ __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ Drop(argc + 1);
+ __ Ret();
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+
+ __ bind(&miss);
+ // a2: function name.
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
-Object* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileFastApiCall(
+ const CallOptimization& optimization,
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+
+ Counters* counters = isolate()->counters();
+
+ ASSERT(optimization.is_simple_api_call());
+ // Bail out if object is a global object as we don't want to
+ // repatch it to global receiver.
+ if (object->IsGlobalObject()) return heap()->undefined_value();
+ if (cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSObject()) return heap()->undefined_value();
+ int depth = optimization.GetPrototypeDepthOfExpectedType(
+ JSObject::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+
+ Label miss, miss_before_stack_reserved;
+
+ GenerateNameCheck(name, &miss_before_stack_reserved);
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(a1, &miss_before_stack_reserved);
+
+ __ IncrementCounter(counters->call_const(), 1, a0, a3);
+ __ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
+
+ ReserveSpaceForFastApiCall(masm(), a0);
+
+ // Check that the maps haven't changed and find a Holder as a side effect.
+ CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+ depth, &miss);
+
+ MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
+ if (result->IsFailure()) return result;
+
+ __ bind(&miss);
+ FreeSpaceForFastApiCall(masm());
+
+ __ bind(&miss_before_stack_reserved);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
}
-Object* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ if (HasCustomCallGenerator(function)) {
+ MaybeObject* maybe_result = CompileCustomCall(
+ object, holder, NULL, function, name);
+ Object* result;
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ // Undefined means bail out to regular compiler.
+ if (!result->IsUndefined()) return result;
+ }
+
+ Label miss;
+
+ GenerateNameCheck(name, &miss);
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ if (check != NUMBER_CHECK) {
+ __ And(t1, a1, Operand(kSmiTagMask));
+ __ Branch(&miss, eq, t1, Operand(zero_reg));
+ }
+
+ // Make sure that it's okay not to patch the on stack receiver
+ // unless we're doing a receiver map check.
+ ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+ SharedFunctionInfo* function_info = function->shared();
+ switch (check) {
+ case RECEIVER_MAP_CHECK:
+ __ IncrementCounter(masm()->isolate()->counters()->call_const(),
+ 1, a0, a3);
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+ &miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ lw(a3, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a3, MemOperand(sp, argc * kPointerSize));
+ }
+ break;
+
+ case STRING_CHECK:
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
+ } else {
+ // Check that the object is a two-byte string or a symbol.
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+ a1, t0, name, &miss);
+ }
+ break;
+
+ case NUMBER_CHECK: {
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ And(t1, a1, Operand(kSmiTagMask));
+ __ Branch(&fast, eq, t1, Operand(zero_reg));
+ __ GetObjectType(a1, a0, a0);
+ __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+ a1, t0, name, &miss);
+ }
+ break;
+ }
+
+ case BOOLEAN_CHECK: {
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a boolean.
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+ __ Branch(&fast, eq, a1, Operand(t0));
+ __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+ __ Branch(&miss, ne, a1, Operand(t0));
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+ a1, t0, name, &miss);
+ }
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
}
-Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ Label miss;
+
+ GenerateNameCheck(name, &miss);
+
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
+ // Get the receiver from the stack.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+ CallInterceptorCompiler compiler(this, arguments(), a2, extra_ic_state_);
+ MaybeObject* result = compiler.Compile(masm(),
+ object,
+ holder,
+ name,
+ &lookup,
+ a1,
+ a3,
+ t0,
+ a0,
+ &miss);
+ if (result->IsFailure()) {
+ return result;
+ }
+
+ // Move returned value, the function to call, to a1.
+ __ mov(a1, v0);
+ // Restore receiver.
+ __ lw(a0, MemOperand(sp, argc * kPointerSize));
+
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
}
-Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
+MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ if (HasCustomCallGenerator(function)) {
+ MaybeObject* maybe_result = CompileCustomCall(
+ object, holder, cell, function, name);
+ Object* result;
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ // Undefined means bail out to regular compiler.
+ if (!result->IsUndefined()) return result;
+ }
+
+ Label miss;
+
+ GenerateNameCheck(name, &miss);
+
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ GenerateGlobalReceiverCheck(object, holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a3, MemOperand(sp, argc * kPointerSize));
+ }
+
+ // Setup the context (function already in r1).
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
+ ASSERT(function->is_compiled());
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ if (V8::UseCrankshaft()) {
+ UNIMPLEMENTED_MIPS();
+ } else {
+ __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
+ JUMP_FUNCTION, call_kind);
+ }
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
}
-Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Name register might be clobbered.
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ a1, a2, a3,
+ &miss);
+ __ bind(&miss);
+ __ li(a2, Operand(Handle<String>(name))); // Restore name.
+ Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
}
-Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
+MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+ AccessorInfo* callback,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(a1, &miss);
+
+ // Check that the map of the object hasn't changed.
+ __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(a1, a3, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ __ push(a1); // Receiver.
+ __ li(a3, Operand(Handle<AccessorInfo>(callback))); // Callback info.
+ __ Push(a3, a2, a0);
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
+ masm()->isolate());
+ __ TailCallExternalReference(store_callback_property, 4, 1);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
+MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(a1, &miss);
+
+ // Check that the map of the object hasn't changed.
+ __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Branch(&miss, ne, a3, Operand(Handle<Map>(receiver->map())));
+
+ // Perform global security token check if needed.
+ if (receiver->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(a1, a3, &miss);
+ }
+
+ // Stub is never generated for non-global objects that require access
+ // checks.
+ ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+ __ Push(a1, a2, a0); // Receiver, name, value.
+
+ __ li(a0, Operand(Smi::FromInt(strict_mode_)));
+ __ push(a0); // Strict mode.
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
+ masm()->isolate());
+ __ TailCallExternalReference(store_ic_property, 4, 1);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
+}
+
+
+MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map of the global has not changed.
+ __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+
+ // Check that the value in the cell is not the hole. If it is, this
+ // cell could have been deleted and reintroducing the global needs
+ // to update the property details in the property dictionary of the
+ // global object. We bail out to the runtime system to do that.
+ __ li(t0, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+ __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
+ __ Branch(&miss, eq, t1, Operand(t2));
+
+ // Store the value in the cell.
+ __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
+ __ mov(v0, a0); // Stored value must be returned in v0.
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
+ __ Ret();
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ IncrementCounter(counters->named_store_global_inline_miss(), 1, a1, a3);
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
+ JSObject* object,
+ JSObject* last) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the receiver is not a smi.
+ __ JumpIfSmi(a0, &miss);
+
+ // Check the maps of the full prototype chain.
+ CheckPrototypes(object, a0, last, a3, a1, t0, name, &miss);
+
+ // If the last object in the prototype chain is a global object,
+ // check that the global property cell is empty.
+ if (last->IsGlobalObject()) {
+ MaybeObject* cell = GenerateCheckPropertyCell(masm(),
+ GlobalObject::cast(last),
+ name,
+ a1,
+ &miss);
+ if (cell->IsFailure()) {
+ miss.Unuse();
+ return cell;
+ }
+ }
+
+ // Return undefined if maps of the full prototype chain is still the same.
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(NONEXISTENT, heap()->empty_string());
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
JSObject* holder,
- int index) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ int index,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ __ mov(v0, a0);
+
+ GenerateLoadField(object, holder, v0, a3, a1, t0, index, name, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(FIELD, name);
}
-Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* receiver,
+MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* object,
JSObject* holder,
AccessorInfo* callback) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ MaybeObject* result = GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0,
+ callback, name, &miss);
+ if (result->IsFailure()) {
+ miss.Unuse();
+ return result;
+ }
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
}
-Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
+MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
JSObject* holder,
- Object* value) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ Object* value,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ GenerateLoadConstant(object, holder, a0, a3, a1, t0, value, name, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION, name);
}
-Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
JSObject* holder,
String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+ GenerateLoadInterceptor(object,
+ holder,
+ &lookup,
+ a0,
+ a2,
+ a3,
+ a1,
+ t0,
+ name,
+ &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
}
-Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ String* name,
+ bool is_dont_delete) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // If the object is the holder then we know that it's a global
+ // object which can only happen for contextual calls. In this case,
+ // the receiver cannot be a smi.
+ if (object != holder) {
+ __ And(t0, a0, Operand(kSmiTagMask));
+ __ Branch(&miss, eq, t0, Operand(zero_reg));
+ }
+
+ // Check that the map of the global has not changed.
+ CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
+
+ // Get the value from the cell.
+ __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (!is_dont_delete) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&miss, eq, t0, Operand(at));
+ }
+
+ __ mov(v0, t0);
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ __ Ret();
+
+ __ bind(&miss);
+ __ IncrementCounter(counters->named_load_global_stub_miss(), 1, a1, a3);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
}
-Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int index) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(FIELD, name);
}
-// TODO(1224671): implement the fast case.
-Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
+ String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ MaybeObject* result = GenerateLoadCallback(receiver, holder, a1, a0, a2, a3,
+ t0, callback, name, &miss);
+ if (result->IsFailure()) {
+ miss.Unuse();
+ return result;
+ }
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(CALLBACKS, name);
}
-Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION, name);
}
-Object* ConstructStubCompiler::CompileConstructStub(
- SharedFunctionInfo* shared) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+ GenerateLoadInterceptor(receiver,
+ holder,
+ &lookup,
+ a1,
+ a0,
+ a2,
+ a3,
+ t0,
+ name,
+ &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(INTERCEPTOR, name);
}
-Object* ExternalArrayStubCompiler::CompileKeyedLoadStub(
- ExternalArrayType array_type, Code::Flags flags) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ GenerateLoadArrayLength(masm(), a1, a2, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(CALLBACKS, name);
}
-Object* ExternalArrayStubCompiler::CompileKeyedStoreStub(
- ExternalArrayType array_type, Code::Flags flags) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
+
+ // Check the key is the cached one.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
+ __ bind(&miss);
+ __ DecrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
+
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(CALLBACKS, name);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
+
+ // Check the name hasn't changed.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(CALLBACKS, name);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Code* stub;
+ MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ __ DispatchMap(a1,
+ a2,
+ Handle<Map>(receiver_map),
+ Handle<Code>(stub),
+ DO_SMI_CHECK);
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+ __ JumpIfSmi(a1, &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map(receiver_maps->at(current));
+ Handle<Code> code(handler_ics->at(current));
+ __ Jump(code, RelocInfo::CODE_TARGET, eq, a2, Operand(map));
+ }
+
+ __ bind(&miss);
+ Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ Label miss;
+
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
+
+ // Check that the name has not changed.
+ __ Branch(&miss, ne, a1, Operand(Handle<String>(name)));
+
+ // a3 is used as scratch register. a1 and a2 keep their values if a jump to
+ // the miss label is generated.
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ a2, a1, a3,
+ &miss);
+ __ bind(&miss);
+
+ __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
+ Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : scratch
+ // -----------------------------------
+ Code* stub;
+ MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ __ DispatchMap(a2,
+ a3,
+ Handle<Map>(receiver_map),
+ Handle<Code>(stub),
+ DO_SMI_CHECK);
+
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : scratch
+ // -----------------------------------
+ Label miss;
+ __ JumpIfSmi(a2, &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map(receiver_maps->at(current));
+ Handle<Code> code(handler_ics->at(current));
+ __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
+ }
+
+ __ bind(&miss);
+ Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
+}
+
+
+MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+ // a0 : argc
+ // a1 : constructor
+ // ra : return address
+ // [sp] : last argument
+ Label generic_stub_call;
+
+ // Use t7 for holding undefined which is used in several places below.
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Check to see whether there are any break points in the function code. If
+ // there are jump to the generic constructor stub which calls the actual
+ // code for the function thereby hitting the break points.
+ __ lw(t5, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(t5, SharedFunctionInfo::kDebugInfoOffset));
+ __ Branch(&generic_stub_call, ne, a2, Operand(t7));
+#endif
+
+ // Load the initial map and verify that it is in fact a map.
+ // a1: constructor function
+ // t7: undefined
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Branch(&generic_stub_call, eq, t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t0);
+ __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
+
+#ifdef DEBUG
+ // Cannot construct functions this way.
+ // a0: argc
+ // a1: constructor function
+ // a2: initial map
+ // t7: undefined
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Check(ne, "Function constructed by construct stub.",
+ a3, Operand(JS_FUNCTION_TYPE));
+#endif
+
+ // Now allocate the JSObject in new space.
+ // a0: argc
+ // a1: constructor function
+ // a2: initial map
+ // t7: undefined
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ AllocateInNewSpace(a3,
+ t4,
+ t5,
+ t6,
+ &generic_stub_call,
+ SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to initial
+ // map and properties and elements are set to empty fixed array.
+ // a0: argc
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size (in words)
+ // t4: JSObject (not tagged)
+ // t7: undefined
+ __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t5, t4);
+ __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+ __ Addu(t5, t5, Operand(3 * kPointerSize));
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+
+ // Calculate the location of the first argument. The stack contains only the
+ // argc arguments.
+ __ sll(a1, a0, kPointerSizeLog2);
+ __ Addu(a1, a1, sp);
+
+ // Fill all the in-object properties with undefined.
+ // a0: argc
+ // a1: first argument
+ // a3: object size (in words)
+ // t4: JSObject (not tagged)
+ // t5: First in-object property of JSObject (not tagged)
+ // t7: undefined
+ // Fill the initialized properties with a constant value or a passed argument
+ // depending on the this.x = ...; assignment in the function.
+ SharedFunctionInfo* shared = function->shared();
+ for (int i = 0; i < shared->this_property_assignments_count(); i++) {
+ if (shared->IsThisPropertyAssignmentArgument(i)) {
+ Label not_passed, next;
+ // Check if the argument assigned to the property is actually passed.
+ int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+ __ Branch(&not_passed, less_equal, a0, Operand(arg_number));
+ // Argument passed - find it on the stack.
+ __ lw(a2, MemOperand(a1, (arg_number + 1) * -kPointerSize));
+ __ sw(a2, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ __ jmp(&next);
+ __ bind(&not_passed);
+ // Set the property to undefined.
+ __ sw(t7, MemOperand(t5));
+ __ Addu(t5, t5, Operand(kPointerSize));
+ __ bind(&next);
+ } else {
+ // Set the property to the constant value.
+ Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+ __ li(a2, Operand(constant));
+ __ sw(a2, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ }
+ }
+
+ // Fill the unused in-object property fields with undefined.
+ ASSERT(function->has_initial_map());
+ for (int i = shared->this_property_assignments_count();
+ i < function->initial_map()->inobject_properties();
+ i++) {
+ __ sw(t7, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ }
+
+ // a0: argc
+ // t4: JSObject (not tagged)
+ // Move argc to a1 and the JSObject to return to v0 and tag it.
+ __ mov(a1, a0);
+ __ mov(v0, t4);
+ __ Or(v0, v0, Operand(kHeapObjectTag));
+
+ // v0: JSObject
+ // a1: argc
+ // Remove caller arguments and receiver from the stack and return.
+ __ sll(t0, a1, kPointerSizeLog2);
+ __ Addu(sp, sp, t0);
+ __ Addu(sp, sp, Operand(kPointerSize));
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->constructed_objects(), 1, a1, a2);
+ __ IncrementCounter(counters->constructed_objects_stub(), 1, a1, a2);
+ __ Ret();
+
+ // Jump to the generic stub in case the specialized code cannot handle the
+ // construction.
+ __ bind(&generic_stub_call);
+ Handle<Code> generic_construct_stub =
+ masm()->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) {
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ return true;
+
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ return false;
+
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ return false;
+ }
+ return false;
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+ MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind) {
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss_force_generic, slow, failed_allocation;
+
+ Register key = a0;
+ Register receiver = a1;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &miss_force_generic);
+
+ __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // a3: elements array
+
+ // Check that the index is in range.
+ __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
+ __ sra(t2, key, kSmiTagSize);
+ // Unsigned comparison catches both negative and too-large values.
+ __ Branch(&miss_force_generic, Uless, t1, Operand(t2));
+
+ __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+ // a3: base pointer of external storage
+
+ // We are not untagging smi key and instead work with it
+ // as if it was premultiplied by 2.
+ ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+
+ Register value = a2;
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ __ srl(t2, key, 1);
+ __ addu(t3, a3, t2);
+ __ lb(value, MemOperand(t3, 0));
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ srl(t2, key, 1);
+ __ addu(t3, a3, t2);
+ __ lbu(value, MemOperand(t3, 0));
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ __ addu(t3, a3, key);
+ __ lh(value, MemOperand(t3, 0));
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ addu(t3, a3, key);
+ __ lhu(value, MemOperand(t3, 0));
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ sll(t2, key, 1);
+ __ addu(t3, a3, t2);
+ __ lw(value, MemOperand(t3, 0));
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ __ sll(t3, t2, 2);
+ __ addu(t3, a3, t3);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ lwc1(f0, MemOperand(t3, 0));
+ } else {
+ __ lw(value, MemOperand(t3, 0));
+ }
+ break;
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ __ sll(t2, key, 2);
+ __ addu(t3, a3, t2);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ ldc1(f0, MemOperand(t3, 0));
+ } else {
+ // t3: pointer to the beginning of the double we want to load.
+ __ lw(a2, MemOperand(t3, 0));
+ __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
+ }
+ break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+
+ // For integer array types:
+ // a2: value
+ // For float array type:
+ // f0: value (if FPU is supported)
+ // a2: value (if FPU is not supported)
+ // For double array type:
+ // f0: value (if FPU is supported)
+ // a2/a3: value (if FPU is not supported)
+
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) {
+ // For the Int and UnsignedInt array types, we need to see whether
+ // the value can be represented in a Smi. If not, we need to convert
+ // it to a HeapNumber.
+ Label box_int;
+ __ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result.
+ __ Branch(&box_int, lt, t3, Operand(zero_reg));
+ // Tag integer as smi and return it.
+ __ sll(v0, value, kSmiTagSize);
+ __ Ret();
+
+ __ bind(&box_int);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion.
+ // The arm version uses a temporary here to save r0, but we don't need to
+ // (a0 is not modified).
+ __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(value, f0);
+ __ cvt_d_w(f0, f0);
+ __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+ __ Ret();
+ } else {
+ Register dst1 = t2;
+ Register dst2 = t3;
+ FloatingPointHelper::Destination dest =
+ FloatingPointHelper::kCoreRegisters;
+ FloatingPointHelper::ConvertIntToDouble(masm,
+ value,
+ dest,
+ f0,
+ dst1,
+ dst2,
+ t1,
+ f2);
+ __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ Ret();
+ }
+ } else if (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ // The test is different for unsigned int values. Since we need
+ // the value to be in the range of a positive smi, we can't
+ // handle either of the top two bits being set in the value.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ Label pl_box_int;
+ __ And(t2, value, Operand(0xC0000000));
+ __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
+
+ // It can fit in an Smi.
+ // Tag integer as smi and return it.
+ __ sll(v0, value, kSmiTagSize);
+ __ Ret();
+
+ __ bind(&pl_box_int);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
+ // registers - also when jumping due to exhausted young space.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
+
+ // This is replaced by a macro:
+ // __ mtc1(value, f0); // LS 32-bits.
+ // __ mtc1(zero_reg, f1); // MS 32-bits are all zero.
+ // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
+
+ __ Cvt_d_uw(f0, value);
+
+ __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+
+ __ Ret();
+ } else {
+ // Check whether unsigned integer fits into smi.
+ Label box_int_0, box_int_1, done;
+ __ And(t2, value, Operand(0x80000000));
+ __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
+ __ And(t2, value, Operand(0x40000000));
+ __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
+
+ // Tag integer as smi and return it.
+ __ sll(v0, value, kSmiTagSize);
+ __ Ret();
+
+ Register hiword = value; // a2.
+ Register loword = a3;
+
+ __ bind(&box_int_0);
+ // Integer does not have leading zeros.
+ GenerateUInt2Double(masm, hiword, loword, t0, 0);
+ __ Branch(&done);
+
+ __ bind(&box_int_1);
+ // Integer has one leading zero.
+ GenerateUInt2Double(masm, hiword, loword, t0, 1);
+
+
+ __ bind(&done);
+ // Integer was converted to double in registers hiword:loword.
+ // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
+ // clobbers all registers - also when jumping due to exhausted young
+ // space.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
+
+ __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
+ __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
+
+ __ mov(v0, t2);
+ __ Ret();
+ }
+ } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ // For the floating-point array type, we need to always allocate a
+ // HeapNumber.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+ // The float (single) value is already in fpu reg f0 (if we use float).
+ __ cvt_d_s(f0, f0);
+ __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+ __ Ret();
+ } else {
+ // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+ // FPU is not available, do manual single to double conversion.
+
+ // a2: floating point value (binary32).
+ // v0: heap number for result
+
+ // Extract mantissa to t4.
+ __ And(t4, value, Operand(kBinary32MantissaMask));
+
+ // Extract exponent to t5.
+ __ srl(t5, value, kBinary32MantissaBits);
+ __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+ Label exponent_rebiased;
+ __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
+
+ __ li(t0, 0x7ff);
+ __ Xor(t1, t5, Operand(0xFF));
+ __ movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
+ __ Branch(&exponent_rebiased, eq, t0, Operand(0xff));
+
+ // Rebias exponent.
+ __ Addu(t5,
+ t5,
+ Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+ __ bind(&exponent_rebiased);
+ __ And(a2, value, Operand(kBinary32SignMask));
+ value = no_reg;
+ __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
+ __ or_(a2, a2, t0);
+
+ // Shift mantissa.
+ static const int kMantissaShiftForHiWord =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaShiftForLoWord =
+ kBitsPerInt - kMantissaShiftForHiWord;
+
+ __ srl(t0, t4, kMantissaShiftForHiWord);
+ __ or_(a2, a2, t0);
+ __ sll(a0, t4, kMantissaShiftForLoWord);
+
+ __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ Ret();
+ }
+
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+ // The double value is already in f0
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ } else {
+ // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+
+ __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ Ret();
+ }
+
+ } else {
+ // Tag integer as smi and return it.
+ __ sll(v0, value, kSmiTagSize);
+ __ Ret();
+ }
+
+ // Slow case, key and receiver still in a0 and a1.
+ __ bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
+ 1, a2, a3);
+
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+
+ __ Push(a1, a0);
+
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+ __ bind(&miss_force_generic);
+ Code* stub = masm->isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_MissForceGeneric);
+ __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+ MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ Label slow, check_heap_number, miss_force_generic;
+
+ // Register usage.
+ Register value = a0;
+ Register key = a1;
+ Register receiver = a2;
+ // a3 mostly holds the elements array or the destination external array.
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &miss_force_generic);
+
+ // Check that the index is in range.
+ __ SmiUntag(t0, key);
+ __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ Branch(&miss_force_generic, Ugreater_equal, t0, Operand(t1));
+
+ // Handle both smis and HeapNumbers in the fast path. Go to the
+ // runtime for all other kinds of values.
+ // a3: external array.
+ // t0: key (integer).
+
+ if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ // Double to pixel conversion is only implemented in the runtime for now.
+ __ JumpIfNotSmi(value, &slow);
+ } else {
+ __ JumpIfNotSmi(value, &check_heap_number);
+ }
+ __ SmiUntag(t1, value);
+ __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+
+ // a3: base pointer of external storage.
+ // t0: key (integer).
+ // t1: value (integer).
+
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS: {
+ // Clamp the value to [0..255].
+ // v0 is used as a scratch register here.
+ Label done;
+ __ li(v0, Operand(255));
+ // Normal branch: nop in delay slot.
+ __ Branch(&done, gt, t1, Operand(v0));
+ // Use delay slot in this branch.
+ __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
+ __ mov(v0, zero_reg); // In delay slot.
+ __ mov(v0, t1); // Value is in range 0..255.
+ __ bind(&done);
+ __ mov(t1, v0);
+ __ addu(t8, a3, t0);
+ __ sb(t1, MemOperand(t8, 0));
+ }
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ addu(t8, a3, t0);
+ __ sb(t1, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ sll(t8, t0, 1);
+ __ addu(t8, a3, t8);
+ __ sh(t1, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ sll(t8, t0, 2);
+ __ addu(t8, a3, t8);
+ __ sw(t1, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ // Perform int-to-float conversion and store to memory.
+ StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
+ break;
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ __ sll(t8, t0, 3);
+ __ addu(a3, a3, t8);
+ // a3: effective address of the double element
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(FPU)) {
+ destination = FloatingPointHelper::kFPURegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+ FloatingPointHelper::ConvertIntToDouble(
+ masm, t1, destination,
+ f0, t2, t3, // These are: double_dst, dst1, dst2.
+ t0, f2); // These are: scratch2, single_scratch.
+ if (destination == FloatingPointHelper::kFPURegisters) {
+ CpuFeatures::Scope scope(FPU);
+ __ sdc1(f0, MemOperand(a3, 0));
+ } else {
+ __ sw(t2, MemOperand(a3, 0));
+ __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
+ }
+ break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+
+ // Entry registers are intact, a0 holds the value which is the return value.
+ __ mov(v0, value);
+ __ Ret();
+
+ if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ // a3: external array.
+ // t0: index (integer).
+ __ bind(&check_heap_number);
+ __ GetObjectType(value, t1, t2);
+ __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
+
+ __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+
+ // a3: base pointer of external storage.
+ // t0: key (integer).
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
+
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ __ cvt_s_d(f0, f0);
+ __ sll(t8, t0, 2);
+ __ addu(t8, a3, t8);
+ __ swc1(f0, MemOperand(t8, 0));
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ sll(t8, t0, 3);
+ __ addu(t8, a3, t8);
+ __ sdc1(f0, MemOperand(t8, 0));
+ } else {
+ __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
+
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ addu(t8, a3, t0);
+ __ sb(t3, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ sll(t8, t0, 1);
+ __ addu(t8, a3, t8);
+ __ sh(t3, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ sll(t8, t0, 2);
+ __ addu(t8, a3, t8);
+ __ sw(t3, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ // Entry registers are intact, a0 holds the value
+ // which is the return value.
+ __ mov(v0, value);
+ __ Ret();
+ } else {
+ // FPU is not available, do manual conversions.
+
+ __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ Label done, nan_or_infinity_or_zero;
+ static const int kMantissaInHiWordShift =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaInLoWordShift =
+ kBitsPerInt - kMantissaInHiWordShift;
+
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
+ // and infinities. All these should be converted to 0.
+ __ li(t5, HeapNumber::kExponentMask);
+ __ and_(t6, t3, t5);
+ __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg));
+
+ __ xor_(t1, t6, t5);
+ __ li(t2, kBinary32ExponentMask);
+ __ movz(t6, t2, t1); // Only if t6 is equal to t5.
+ __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5));
+
+ // Rebias exponent.
+ __ srl(t6, t6, HeapNumber::kExponentShift);
+ __ Addu(t6,
+ t6,
+ Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
+
+ __ li(t1, Operand(kBinary32MaxExponent));
+ __ Slt(t1, t1, t6);
+ __ And(t2, t3, Operand(HeapNumber::kSignMask));
+ __ Or(t2, t2, Operand(kBinary32ExponentMask));
+ __ movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent.
+ __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
+
+ __ Slt(t1, t6, Operand(kBinary32MinExponent));
+ __ And(t2, t3, Operand(HeapNumber::kSignMask));
+ __ movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent.
+ __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
+
+ __ And(t7, t3, Operand(HeapNumber::kSignMask));
+ __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+ __ sll(t3, t3, kMantissaInHiWordShift);
+ __ or_(t7, t7, t3);
+ __ srl(t4, t4, kMantissaInLoWordShift);
+ __ or_(t7, t7, t4);
+ __ sll(t6, t6, kBinary32ExponentShift);
+ __ or_(t3, t7, t6);
+
+ __ bind(&done);
+ __ sll(t9, a1, 2);
+ __ addu(t9, a2, t9);
+ __ sw(t3, MemOperand(t9, 0));
+
+ // Entry registers are intact, a0 holds the value which is the return
+ // value.
+ __ mov(v0, value);
+ __ Ret();
+
+ __ bind(&nan_or_infinity_or_zero);
+ __ And(t7, t3, Operand(HeapNumber::kSignMask));
+ __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+ __ or_(t6, t6, t7);
+ __ sll(t3, t3, kMantissaInHiWordShift);
+ __ or_(t6, t6, t3);
+ __ srl(t4, t4, kMantissaInLoWordShift);
+ __ or_(t3, t6, t4);
+ __ Branch(&done);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ sll(t8, t0, 3);
+ __ addu(t8, a3, t8);
+ // t8: effective address of destination element.
+ __ sw(t4, MemOperand(t8, 0));
+ __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
+ __ Ret();
+ } else {
+ bool is_signed_type = IsElementTypeSigned(elements_kind);
+ int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
+ int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
+
+ Label done, sign;
+
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
+ // and infinities. All these should be converted to 0.
+ __ li(t5, HeapNumber::kExponentMask);
+ __ and_(t6, t3, t5);
+ __ movz(t3, zero_reg, t6); // Only if t6 is equal to zero.
+ __ Branch(&done, eq, t6, Operand(zero_reg));
+
+ __ xor_(t2, t6, t5);
+ __ movz(t3, zero_reg, t2); // Only if t6 is equal to t5.
+ __ Branch(&done, eq, t6, Operand(t5));
+
+ // Unbias exponent.
+ __ srl(t6, t6, HeapNumber::kExponentShift);
+ __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
+ // If exponent is negative then result is 0.
+ __ slt(t2, t6, zero_reg);
+ __ movn(t3, zero_reg, t2); // Only if exponent is negative.
+ __ Branch(&done, lt, t6, Operand(zero_reg));
+
+ // If exponent is too big then result is minimal value.
+ __ slti(t1, t6, meaningfull_bits - 1);
+ __ li(t2, min_value);
+ __ movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1.
+ __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
+
+ __ And(t5, t3, Operand(HeapNumber::kSignMask));
+ __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+ __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+
+ __ li(t9, HeapNumber::kMantissaBitsInTopWord);
+ __ subu(t6, t9, t6);
+ __ slt(t1, t6, zero_reg);
+ __ srlv(t2, t3, t6);
+ __ movz(t3, t2, t1); // Only if t6 is positive.
+ __ Branch(&sign, ge, t6, Operand(zero_reg));
+
+ __ subu(t6, zero_reg, t6);
+ __ sllv(t3, t3, t6);
+ __ li(t9, meaningfull_bits);
+ __ subu(t6, t9, t6);
+ __ srlv(t4, t4, t6);
+ __ or_(t3, t3, t4);
+
+ __ bind(&sign);
+ __ subu(t2, t3, zero_reg);
+ __ movz(t3, t2, t5); // Only if t5 is zero.
+
+ __ bind(&done);
+
+ // Result is in t3.
+ // This switch block should be exactly the same as above (FPU mode).
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ addu(t8, a3, t0);
+ __ sb(t3, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ sll(t8, t0, 1);
+ __ addu(t8, a3, t8);
+ __ sh(t3, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ sll(t8, t0, 2);
+ __ addu(t8, a3, t8);
+ __ sw(t3, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ }
+
+ // Slow case, key and receiver still in a0 and a1.
+ __ bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
+ 1, a2, a3);
+ // Entry registers are intact.
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+ // Miss case, call the runtime.
+ __ bind(&miss_force_generic);
+
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss_force_generic;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(a0, &miss_force_generic);
+
+ // Get the elements array.
+ __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+ __ AssertFastElements(a2);
+
+ // Check that the key is within bounds.
+ __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Branch(&miss_force_generic, hs, a0, Operand(a3));
+
+ // Load the result and make sure it's not the hole.
+ __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, t0, a3);
+ __ lw(t0, MemOperand(t0));
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+ __ Branch(&miss_force_generic, eq, t0, Operand(t1));
+ __ mov(v0, t0);
+ __ Ret();
+
+ __ bind(&miss_force_generic);
+ Code* stub = masm->isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_MissForceGeneric);
+ __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+ bool is_js_array) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : scratch
+ // -- a4 : scratch (elements)
+ // -----------------------------------
+ Label miss_force_generic;
+
+ Register value_reg = a0;
+ Register key_reg = a1;
+ Register receiver_reg = a2;
+ Register scratch = a3;
+ Register elements_reg = t0;
+ Register scratch2 = t1;
+ Register scratch3 = t2;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(a0, &miss_force_generic);
+
+ // Get the elements array and make sure it is a fast element array, not 'cow'.
+ __ lw(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ CheckMap(elements_reg,
+ scratch,
+ Heap::kFixedArrayMapRootIndex,
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ // Check that the key is within bounds.
+ if (is_js_array) {
+ __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ } else {
+ __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ }
+ // Compare smis.
+ __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+ __ Addu(scratch,
+ elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(scratch3, scratch2, scratch);
+ __ sw(value_reg, MemOperand(scratch3));
+ __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg);
+
+ // value_reg (a0) is preserved.
+ // Done.
+ __ Ret();
+
+ __ bind(&miss_force_generic);
+ Handle<Code> ic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/mips/virtual-frame-mips.cc b/deps/v8/src/mips/virtual-frame-mips.cc
deleted file mode 100644
index b61ce75bd..000000000
--- a/deps/v8/src/mips/virtual-frame-mips.cc
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
- UNREACHABLE();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
- UNREACHABLE();
-}
-
-
-void VirtualFrame::SyncRange(int begin, int end) {
- // All elements are in memory on MIPS (ie, synced).
-#ifdef DEBUG
- for (int i = begin; i <= end; i++) {
- ASSERT(elements_[i].is_synced());
- }
-#endif
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Enter() {
- // TODO(MIPS): Implement DEBUG
-
- // We are about to push four values to the frame.
- Adjust(4);
- __ MultiPush(ra.bit() | fp.bit() | cp.bit() | a1.bit());
- // Adjust FP to point to saved FP.
- __ addiu(fp, sp, 2 * kPointerSize);
-}
-
-
-void VirtualFrame::Exit() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- int count = local_count();
- if (count > 0) {
- Comment cmnt(masm(), "[ Allocate space for locals");
- Adjust(count);
- // Initialize stack slots with 'undefined' value.
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ addiu(sp, sp, -count * kPointerSize);
- for (int i = 0; i < count; i++) {
- __ sw(t0, MemOperand(sp, (count-i-1)*kPointerSize));
- }
- }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
- return kIllegalIndex;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::RawCallStub(CodeStub* stub) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(f, arg_count);
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(id, arg_count);
-}
-
-
-void VirtualFrame::CallAlignedRuntime(Runtime::Function* f, int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallAlignedRuntime(Runtime::FunctionId id, int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- Result* arg_count_register,
- int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- int dropped_args) {
- switch (code->kind()) {
- case Code::CALL_IC:
- break;
- case Code::FUNCTION:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::KEYED_LOAD_IC:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::LOAD_IC:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::KEYED_STORE_IC:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::STORE_IC:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::BUILTIN:
- UNIMPLEMENTED_MIPS();
- break;
- default:
- UNREACHABLE();
- break;
- }
- Forget(dropped_args);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args,
- bool set_auto_args_slots) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- ASSERT(height() >= count);
- int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
- // Emit code to lower the stack pointer if necessary.
- if (num_virtual_elements < count) {
- int num_dropped = count - num_virtual_elements;
- stack_pointer_ -= num_dropped;
- __ addiu(sp, sp, num_dropped * kPointerSize);
- }
-
- // Discard elements from the virtual frame and free any registers.
- for (int i = 0; i < count; i++) {
- FrameElement dropped = elements_.RemoveLast();
- if (dropped.is_register()) {
- Unuse(dropped.reg());
- }
- }
-}
-
-
-void VirtualFrame::DropFromVFrameOnly(int count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Result VirtualFrame::Pop() {
- UNIMPLEMENTED_MIPS();
- Result res = Result();
- return res; // UNIMPLEMENTED RETURN
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ Pop(reg);
-}
-
-
-void VirtualFrame::EmitMultiPop(RegList regs) {
- ASSERT(stack_pointer_ == element_count() - 1);
- for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
- stack_pointer_--;
- elements_.RemoveLast();
- }
- }
- __ MultiPop(regs);
-}
-
-
-void VirtualFrame::EmitPush(Register reg) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
- stack_pointer_++;
- __ Push(reg);
-}
-
-
-void VirtualFrame::EmitMultiPush(RegList regs) {
- ASSERT(stack_pointer_ == element_count() - 1);
- for (int16_t i = kNumRegisters; i > 0; i--) {
- if ((regs & (1 << i)) != 0) {
- elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
- stack_pointer_++;
- }
- }
- __ MultiPush(regs);
-}
-
-
-void VirtualFrame::EmitArgumentSlots(RegList reglist) {
- UNIMPLEMENTED_MIPS();
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/virtual-frame-mips.h b/deps/v8/src/mips/virtual-frame-mips.h
deleted file mode 100644
index b32e2aeed..000000000
--- a/deps/v8/src/mips/virtual-frame-mips.h
+++ /dev/null
@@ -1,548 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-
-#include "register-allocator.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, but no attempt is made to require it
- // to stay spilled. It is intended as documentation while the code
- // generator is being transformed.
- class SpilledScope BASE_EMBEDDED {
- public:
- SpilledScope() {}
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
- MacroAssembler* masm() { return cgen()->masm(); }
-
- // Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index,
- NumberInfo info = NumberInfo::Unknown());
-
- // The number of elements on the virtual frame.
- int element_count() { return elements_.length(); }
-
- // The height of the virtual expression stack.
- int height() {
- return element_count() - expression_base_index();
- }
-
- int register_location(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num];
- }
-
- int register_location(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)];
- }
-
- void set_register_location(Register reg, int index) {
- register_locations_[RegisterAllocator::ToNumber(reg)] = index;
- }
-
- bool is_used(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num] != kIllegalIndex;
- }
-
- bool is_used(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)]
- != kIllegalIndex;
- }
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget elements from the top of the frame to match an actual frame (eg,
- // the frame after a runtime call). No code is emitted.
- void Forget(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_ -= count;
- // On mips, all elements are in memory, so there is no extra bookkeeping
- // (registers, copies, etc.) beyond dropping the elements.
- elements_.Rewind(stack_pointer_ + 1);
- }
-
- // Forget count elements from the top of the frame and adjust the stack
- // pointer downward. This is used, for example, before merging frames at
- // break, continue, and return targets.
- void ForgetElements(int count);
-
- // Spill all values from the frame to memory.
- void SpillAll();
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_location(reg));
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references).
- Register SpillAnyRegister();
-
- // Prepare this virtual frame for merging to an expected frame by
- // performing some state changes that do not require generating
- // code. It is guaranteed that no code will be generated.
- void PrepareMergeTo(VirtualFrame* expected);
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(VirtualFrame* expected);
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by spilling locals and
- // dropping all non-locals elements in the virtual frame. This
- // avoids generating unnecessary merge code when jumping to the
- // shared return site. Emits code for spills.
- void PrepareForReturn();
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // The current top of the expression stack as an assembly operand.
- MemOperand Top() { return MemOperand(sp, 0); }
-
- // An element of the expression stack as an assembly operand.
- MemOperand ElementAt(int index) {
- return MemOperand(sp, index * kPointerSize);
- }
-
- // Random-access store to a frame-top relative frame element. The result
- // becomes owned by the frame and is invalidated.
- void SetElementAt(int index, Result* value);
-
- // Set a frame element to a constant. The index is frame-top relative.
- void SetElementAt(int index, Handle<Object> value) {
- Result temp(value);
- SetElementAt(index, &temp);
- }
-
- void PushElementAt(int index) {
- PushFrameSlotAt(element_count() - index - 1);
- }
-
- // A frame-allocated local as an assembly operand.
- MemOperand LocalAt(int index) {
- ASSERT(0 <= index);
- ASSERT(index < local_count());
- return MemOperand(s8_fp, kLocal0Offset - index * kPointerSize);
- }
-
- // Push a copy of the value of a local frame slot on top of the frame.
- void PushLocalAt(int index) {
- PushFrameSlotAt(local0_index() + index);
- }
-
- // Push the value of a local frame slot on top of the frame and invalidate
- // the local slot. The slot should be written to before trying to read
- // from it again.
- void TakeLocalAt(int index) {
- TakeFrameSlotAt(local0_index() + index);
- }
-
- // Store the top value on the virtual frame into a local frame slot. The
- // value is left in place on top of the frame.
- void StoreToLocalAt(int index) {
- StoreToFrameSlotAt(local0_index() + index);
- }
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // The function frame slot.
- MemOperand Function() { return MemOperand(s8_fp, kFunctionOffset); }
-
- // Push the function on top of the frame.
- void PushFunction() { PushFrameSlotAt(function_index()); }
-
- // The context frame slot.
- MemOperand Context() { return MemOperand(s8_fp, kContextOffset); }
-
- // Save the value of the cp register to the context frame slot.
- void SaveContextRegister();
-
- // Restore the cp register from the value of the context frame
- // slot.
- void RestoreContextRegister();
-
- // A parameter as an assembly operand.
- MemOperand ParameterAt(int index) {
- // Index -1 corresponds to the receiver.
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index <= parameter_count());
- uint16_t a = 0; // Number of argument slots.
- return MemOperand(s8_fp, (1 + parameter_count() + a - index) *kPointerSize);
- }
-
- // Push a copy of the value of a parameter frame slot on top of the frame.
- void PushParameterAt(int index) {
- PushFrameSlotAt(param0_index() + index);
- }
-
- // Push the value of a paramter frame slot on top of the frame and
- // invalidate the parameter slot. The slot should be written to before
- // trying to read from it again.
- void TakeParameterAt(int index) {
- TakeFrameSlotAt(param0_index() + index);
- }
-
- // Store the top value on the virtual frame into a parameter frame slot.
- // The value is left in place on top of the frame.
- void StoreToParameterAt(int index) {
- StoreToFrameSlotAt(param0_index() + index);
- }
-
- // The receiver frame slot.
- MemOperand Receiver() { return ParameterAt(-1); }
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- void CallStub(CodeStub* stub, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- RawCallStub(stub);
- }
-
- void CallStub(CodeStub* stub, Result* arg);
-
- void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- void CallRuntime(Runtime::Function* f, int arg_count);
- void CallRuntime(Runtime::FunctionId id, int arg_count);
-
- // Call runtime with sp aligned to 8 bytes.
- void CallAlignedRuntime(Runtime::Function* f, int arg_count);
- void CallAlignedRuntime(Runtime::FunctionId id, int arg_count);
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flag,
- Result* arg_count_register,
- int arg_count);
-
- // Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments are passed as results and
- // consumed by the call.
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- int dropped_args);
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args);
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args,
- bool set_auto_args_slots = false);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
- // Similar to VirtualFrame::Drop but we don't modify the actual stack.
- // This is because we need to manually restore sp to the correct position.
- void DropFromVFrameOnly(int count);
-
- // Drop one element.
- void Drop() { Drop(1); }
- void DropFromVFrameOnly() { DropFromVFrameOnly(1); }
-
- // Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(element_count() - 1); }
-
- // Pop an element from the top of the expression stack. Returns a
- // Result, which may be a constant or a register.
- Result Pop();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
- // Same but for multiple registers
- void EmitMultiPop(RegList regs);
- void EmitMultiPopReversed(RegList regs);
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg);
- // Same but for multiple registers.
- void EmitMultiPush(RegList regs);
- void EmitMultiPushReversed(RegList regs);
-
- // Push an element on the virtual frame.
- inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
- inline void Push(Handle<Object> value);
- inline void Push(Smi* value);
-
- // Pushing a result invalidates it (its contents become owned by the frame).
- void Push(Result* result) {
- if (result->is_register()) {
- Push(result->reg());
- } else {
- ASSERT(result->is_constant());
- Push(result->handle());
- }
- result->Unuse();
- }
-
- // Nip removes zero or more elements from immediately below the top
- // of the frame, leaving the previous top-of-frame value on top of
- // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
- inline void Nip(int num_dropped);
-
- // This pushes 4 arguments slots on the stack and saves asked 'a' registers
- // 'a' registers are arguments register a0 to a3.
- void EmitArgumentSlots(RegList reglist);
-
- inline void SetTypeForLocalAt(int index, NumberInfo info);
- inline void SetTypeForParamAt(int index, NumberInfo info);
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- ZoneList<FrameElement> elements_;
-
- // The index of the element that is at the processor's stack pointer
- // (the sp register).
- int stack_pointer_;
-
- // The index of the register frame element using each register, or
- // kIllegalIndex if a register is not on the frame.
- int register_locations_[RegisterAllocator::kNumRegisters];
-
- // The number of frame-allocated locals and parameters respectively.
- int parameter_count() { return cgen()->scope()->num_parameters(); }
- int local_count() { return cgen()->scope()->num_stack_slots(); }
-
- // The index of the element that is at the processor's frame pointer
- // (the fp register). The parameters, receiver, function, and context
- // are below the frame pointer.
- int frame_pointer() { return parameter_count() + 3; }
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() { return 1; }
-
- // The index of the context slot in the frame. It is immediately
- // below the frame pointer.
- int context_index() { return frame_pointer() - 1; }
-
- // The index of the function slot in the frame. It is below the frame
- // pointer and context slot.
- int function_index() { return frame_pointer() - 2; }
-
- // The index of the first local. Between the frame pointer and the
- // locals lies the return address.
- int local0_index() { return frame_pointer() + 2; }
-
- // The index of the base of the expression stack.
- int expression_base_index() { return local0_index() + local_count(); }
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- int fp_relative(int index) {
- ASSERT(index < element_count());
- ASSERT(frame_pointer() < element_count()); // FP is on the frame.
- return (frame_pointer() - index) * kPointerSize;
- }
-
- // Record an occurrence of a register in the virtual frame. This has the
- // effect of incrementing the register's external reference count and
- // of updating the index of the register's location in the frame.
- void Use(Register reg, int index) {
- ASSERT(!is_used(reg));
- set_register_location(reg, index);
- cgen()->allocator()->Use(reg);
- }
-
- // Record that a register reference has been dropped from the frame. This
- // decrements the register's external reference count and invalidates the
- // index of the register's location in the frame.
- void Unuse(Register reg) {
- ASSERT(is_used(reg));
- set_register_location(reg, kIllegalIndex);
- cgen()->allocator()->Unuse(reg);
- }
-
- // Spill the element at a particular index---write it to memory if
- // necessary, free any associated register, and forget its value if
- // constant.
- void SpillElementAt(int index);
-
- // Sync the element at a particular index. If it is a register or
- // constant that disagrees with the value on the stack, write it to memory.
- // Keep the element type as register or constant, and clear the dirty bit.
- void SyncElementAt(int index);
-
- // Sync the range of elements in [begin, end] with memory.
- void SyncRange(int begin, int end);
-
- // Sync a single unsynced element that lies beneath or at the stack pointer.
- void SyncElementBelowStackPointer(int index);
-
- // Sync a single unsynced element that lies just above the stack pointer.
- void SyncElementByPushing(int index);
-
- // Push a copy of a frame slot (typically a local or parameter) on top of
- // the frame.
- inline void PushFrameSlotAt(int index);
-
- // Push a the value of a frame slot (typically a local or parameter) on
- // top of the frame and invalidate the slot.
- void TakeFrameSlotAt(int index);
-
- // Store the value on top of the frame to a frame slot (typically a local
- // or parameter).
- void StoreToFrameSlotAt(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // Move frame elements currently in registers or constants, that
- // should be in memory in the expected frame, to memory.
- void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
- // Make the register-to-register moves necessary to
- // merge this frame with the expected frame.
- // Register to memory moves must already have been made,
- // and memory to register moves must follow this call.
- // This is because some new memory-to-register moves are
- // created in order to break cycles of register moves.
- // Used in the implementation of MergeTo().
- void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
- // Make the memory-to-register and constant-to-register moves
- // needed to make this frame equal the expected frame.
- // Called after all register-to-memory and register-to-register
- // moves have been made. After this function returns, the frames
- // should be equal.
- void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
- // Invalidates a frame slot (puts an invalid frame element in it).
- // Copies on the frame are correctly handled, and if this slot was
- // the backing store of copies, the index of the new backing store
- // is returned. Otherwise, returns kIllegalIndex.
- // Register counts are correctly updated.
- int InvalidateFrameSlotAt(int index);
-
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- void RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
- inline bool Equals(VirtualFrame* other);
-
- // Classes that need raw access to the elements_ array.
- friend class DeferredCode;
- friend class JumpTarget;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 416f88794..3a0353515 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -170,14 +170,16 @@ PropertyKind.Indexed = 2;
// A copy of the PropertyType enum from global.h
PropertyType = {};
-PropertyType.Normal = 0;
-PropertyType.Field = 1;
-PropertyType.ConstantFunction = 2;
-PropertyType.Callbacks = 3;
-PropertyType.Interceptor = 4;
-PropertyType.MapTransition = 5;
-PropertyType.ConstantTransition = 6;
-PropertyType.NullDescriptor = 7;
+PropertyType.Normal = 0;
+PropertyType.Field = 1;
+PropertyType.ConstantFunction = 2;
+PropertyType.Callbacks = 3;
+PropertyType.Handler = 4;
+PropertyType.Interceptor = 5;
+PropertyType.MapTransition = 6;
+PropertyType.ExternalArrayTransition = 7;
+PropertyType.ConstantTransition = 8;
+PropertyType.NullDescriptor = 9;
// Different attributes for a property.
diff --git a/deps/v8/src/jump-target.cc b/deps/v8/src/misc-intrinsics.h
index 72aada8ab..5393de2c2 100644
--- a/deps/v8/src/jump-target.cc
+++ b/deps/v8/src/misc-intrinsics.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,67 +25,65 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
+#ifndef V8_MISC_INTRINSICS_H_
+#define V8_MISC_INTRINSICS_H_
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
+#include "../include/v8.h"
+#include "globals.h"
namespace v8 {
namespace internal {
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-void JumpTarget::Jump() {
- DoJump();
-}
+// Returns the index of the leading 1 bit, counting the least significant bit at
+// index 0. (1 << IntegerLog2(x)) is a mask for the most significant bit of x.
+// Result is undefined if input is zero.
+int IntegerLog2(uint32_t value);
+#if defined(__GNUC__)
-void JumpTarget::Branch(Condition cc, Hint hint) {
- DoBranch(cc, hint);
+inline int IntegerLog2(uint32_t value) {
+ return 31 - __builtin_clz(value);
}
+#elif defined(_MSC_VER)
+
+#pragma intrinsic(_BitScanReverse)
-void JumpTarget::Bind() {
- DoBind();
+inline int IntegerLog2(uint32_t value) {
+ unsigned long result; // NOLINT: MSVC intrinsic demands this type.
+ _BitScanReverse(&result, value);
+ return result;
}
+#else
-// -------------------------------------------------------------------------
-// ShadowTarget implementation.
+// Default version using regular operations. Code taken from:
+// http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog
+inline int IntegerLog2(uint32_t value) {
+ int result, shift;
-ShadowTarget::ShadowTarget(BreakTarget* shadowed) {
- ASSERT(shadowed != NULL);
- other_target_ = shadowed;
+ shift = (value > 0xFFFF) << 4;
+ value >>= shift;
+ result = shift;
-#ifdef DEBUG
- is_shadowing_ = true;
-#endif
- // While shadowing this shadow target saves the state of the original.
- shadowed->CopyTo(this);
+ shift = (value > 0xFF) << 3;
+ value >>= shift;
+ result |= shift;
- // The original's state is reset.
- shadowed->Unuse();
- ASSERT(cgen()->has_valid_frame());
- shadowed->set_expected_height(cgen()->frame()->height());
-}
+ shift = (value > 0xF) << 2;
+ value >>= shift;
+ result |= shift;
+ shift = (value > 0x3) << 1;
+ value >>= shift;
+ result |= shift;
-void ShadowTarget::StopShadowing() {
- ASSERT(is_shadowing_);
+ result |= (value >> 1);
- // The states of this target, which was shadowed, and the original
- // target, which was shadowing, are swapped.
- BreakTarget temp;
- other_target_->CopyTo(&temp);
- CopyTo(other_target_);
- temp.CopyTo(this);
- temp.Unuse();
-
-#ifdef DEBUG
- is_shadowing_ = false;
-#endif
+ return result;
}
+#endif
} } // namespace v8::internal
+
+#endif // V8_MISC_INTRINSICS_H_
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index a30b45079..1ed610341 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+#include <bzlib.h>
+#endif
#include <signal.h>
#include <string>
#include <map>
@@ -95,11 +98,54 @@ typedef std::map<std::string, int*>::iterator CounterMapIterator;
static CounterMap counter_table_;
-class CppByteSink : public i::SnapshotByteSink {
+class Compressor {
+ public:
+ virtual ~Compressor() {}
+ virtual bool Compress(i::Vector<char> input) = 0;
+ virtual i::Vector<char>* output() = 0;
+};
+
+
+class PartialSnapshotSink : public i::SnapshotByteSink {
public:
- explicit CppByteSink(const char* snapshot_file)
- : bytes_written_(0),
- partial_sink_(this) {
+ PartialSnapshotSink() : data_(), raw_size_(-1) { }
+ virtual ~PartialSnapshotSink() { data_.Free(); }
+ virtual void Put(int byte, const char* description) {
+ data_.Add(byte);
+ }
+ virtual int Position() { return data_.length(); }
+ void Print(FILE* fp) {
+ int length = Position();
+ for (int j = 0; j < length; j++) {
+ if ((j & 0x1f) == 0x1f) {
+ fprintf(fp, "\n");
+ }
+ if (j != 0) {
+ fprintf(fp, ",");
+ }
+ fprintf(fp, "%d", at(j));
+ }
+ }
+ char at(int i) { return data_[i]; }
+ bool Compress(Compressor* compressor) {
+ ASSERT_EQ(-1, raw_size_);
+ raw_size_ = data_.length();
+ if (!compressor->Compress(data_.ToVector())) return false;
+ data_.Clear();
+ data_.AddAll(*compressor->output());
+ return true;
+ }
+ int raw_size() { return raw_size_; }
+
+ private:
+ i::List<char> data_;
+ int raw_size_;
+};
+
+
+class CppByteSink : public PartialSnapshotSink {
+ public:
+ explicit CppByteSink(const char* snapshot_file) {
fp_ = i::OS::FOpen(snapshot_file, "wb");
if (fp_ == NULL) {
i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
@@ -114,7 +160,18 @@ class CppByteSink : public i::SnapshotByteSink {
}
virtual ~CppByteSink() {
- fprintf(fp_, "const int Snapshot::size_ = %d;\n\n", bytes_written_);
+ fprintf(fp_, "const int Snapshot::size_ = %d;\n", Position());
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+ fprintf(fp_, "const byte* Snapshot::raw_data_ = NULL;\n");
+ fprintf(fp_,
+ "const int Snapshot::raw_size_ = %d;\n\n",
+ raw_size());
+#else
+ fprintf(fp_,
+ "const byte* Snapshot::raw_data_ = Snapshot::data_;\n");
+ fprintf(fp_,
+ "const int Snapshot::raw_size_ = Snapshot::size_;\n\n");
+#endif
fprintf(fp_, "} } // namespace v8::internal\n");
fclose(fp_);
}
@@ -127,7 +184,6 @@ class CppByteSink : public i::SnapshotByteSink {
int map_space_used,
int cell_space_used,
int large_space_used) {
- fprintf(fp_, "};\n\n");
fprintf(fp_, "const int Snapshot::new_space_used_ = %d;\n", new_space_used);
fprintf(fp_,
"const int Snapshot::pointer_space_used_ = %d;\n",
@@ -151,59 +207,94 @@ class CppByteSink : public i::SnapshotByteSink {
int length = partial_sink_.Position();
fprintf(fp_, "};\n\n");
fprintf(fp_, "const int Snapshot::context_size_ = %d;\n", length);
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+ fprintf(fp_,
+ "const int Snapshot::context_raw_size_ = %d;\n",
+ partial_sink_.raw_size());
+#else
+ fprintf(fp_,
+ "const int Snapshot::context_raw_size_ = "
+ "Snapshot::context_size_;\n");
+#endif
fprintf(fp_, "const byte Snapshot::context_data_[] = {\n");
- for (int j = 0; j < length; j++) {
- if ((j & 0x1f) == 0x1f) {
- fprintf(fp_, "\n");
- }
- char byte = partial_sink_.at(j);
- if (j != 0) {
- fprintf(fp_, ",");
- }
- fprintf(fp_, "%d", byte);
- }
+ partial_sink_.Print(fp_);
+ fprintf(fp_, "};\n\n");
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+ fprintf(fp_, "const byte* Snapshot::context_raw_data_ = NULL;\n");
+#else
+ fprintf(fp_, "const byte* Snapshot::context_raw_data_ ="
+ " Snapshot::context_data_;\n");
+#endif
}
- virtual void Put(int byte, const char* description) {
- if (bytes_written_ != 0) {
- fprintf(fp_, ",");
- }
- fprintf(fp_, "%d", byte);
- bytes_written_++;
- if ((bytes_written_ & 0x1f) == 0) {
- fprintf(fp_, "\n");
- }
+ void WriteSnapshot() {
+ Print(fp_);
}
- virtual int Position() {
- return bytes_written_;
- }
+ PartialSnapshotSink* partial_sink() { return &partial_sink_; }
+
+ private:
+ FILE* fp_;
+ PartialSnapshotSink partial_sink_;
+};
- i::SnapshotByteSink* partial_sink() { return &partial_sink_; }
- class PartialSnapshotSink : public i::SnapshotByteSink {
- public:
- explicit PartialSnapshotSink(CppByteSink* parent)
- : parent_(parent),
- data_() { }
- virtual ~PartialSnapshotSink() { data_.Free(); }
- virtual void Put(int byte, const char* description) {
- data_.Add(byte);
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+class BZip2Compressor : public Compressor {
+ public:
+ BZip2Compressor() : output_(NULL) {}
+ virtual ~BZip2Compressor() {
+ delete output_;
+ }
+ virtual bool Compress(i::Vector<char> input) {
+ delete output_;
+ output_ = new i::ScopedVector<char>((input.length() * 101) / 100 + 1000);
+ unsigned int output_length_ = output_->length();
+ int result = BZ2_bzBuffToBuffCompress(output_->start(), &output_length_,
+ input.start(), input.length(),
+ 9, 1, 0);
+ if (result == BZ_OK) {
+ output_->Truncate(output_length_);
+ return true;
+ } else {
+ fprintf(stderr, "bzlib error code: %d\n", result);
+ return false;
}
- virtual int Position() { return data_.length(); }
- char at(int i) { return data_[i]; }
- private:
- CppByteSink* parent_;
- i::List<char> data_;
- };
+ }
+ virtual i::Vector<char>* output() { return output_; }
private:
- FILE* fp_;
- int bytes_written_;
- PartialSnapshotSink partial_sink_;
+ i::ScopedVector<char>* output_;
};
+class BZip2Decompressor : public StartupDataDecompressor {
+ public:
+ virtual ~BZip2Decompressor() { }
+
+ protected:
+ virtual int DecompressData(char* raw_data,
+ int* raw_data_size,
+ const char* compressed_data,
+ int compressed_data_size) {
+ ASSERT_EQ(StartupData::kBZip2,
+ V8::GetCompressedStartupDataAlgorithm());
+ unsigned int decompressed_size = *raw_data_size;
+ int result =
+ BZ2_bzBuffToBuffDecompress(raw_data,
+ &decompressed_size,
+ const_cast<char*>(compressed_data),
+ compressed_data_size,
+ 0, 1);
+ if (result == BZ_OK) {
+ *raw_data_size = decompressed_size;
+ }
+ return result;
+ }
+};
+#endif
+
+
int main(int argc, char** argv) {
#ifdef ENABLE_LOGGING_AND_PROFILING
// By default, log code create information in the snapshot.
@@ -217,18 +308,26 @@ int main(int argc, char** argv) {
i::FlagList::PrintHelp();
return !i::FLAG_help;
}
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+ BZip2Decompressor natives_decompressor;
+ int bz2_result = natives_decompressor.Decompress();
+ if (bz2_result != BZ_OK) {
+ fprintf(stderr, "bzip error code: %d\n", bz2_result);
+ exit(1);
+ }
+#endif
i::Serializer::Enable();
Persistent<Context> context = v8::Context::New();
ASSERT(!context.IsEmpty());
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
- i::Bootstrapper::NativesSourceLookup(i);
+ i::Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
}
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
- i::Heap::CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
context.Dispose();
CppByteSink sink(argv[1]);
@@ -242,6 +341,14 @@ int main(int argc, char** argv) {
ser.SerializeWeakReferences();
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+ BZip2Compressor compressor;
+ if (!sink.Compress(&compressor))
+ return 1;
+ if (!sink.partial_sink()->Compress(&compressor))
+ return 1;
+#endif
+ sink.WriteSnapshot();
sink.WritePartialSnapshot();
sink.WriteSpaceUsed(
diff --git a/deps/v8/src/natives.h b/deps/v8/src/natives.h
index 639a2d37b..a2831863a 100644
--- a/deps/v8/src/natives.h
+++ b/deps/v8/src/natives.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -36,7 +36,7 @@ typedef bool (*NativeSourceCallback)(Vector<const char> name,
int index);
enum NativeType {
- CORE, D8
+ CORE, EXPERIMENTAL, D8
};
template <NativeType type>
@@ -52,11 +52,15 @@ class NativesCollection {
// non-debugger scripts have an index in the interval [GetDebuggerCount(),
// GetNativesCount()).
static int GetIndex(const char* name);
- static Vector<const char> GetScriptSource(int index);
+ static int GetRawScriptsSize();
+ static Vector<const char> GetRawScriptSource(int index);
static Vector<const char> GetScriptName(int index);
+ static Vector<const byte> GetScriptsSource();
+ static void SetRawScriptsSource(Vector<const char> raw_source);
};
typedef NativesCollection<CORE> Natives;
+typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
} } // namespace v8::internal
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index c1caef2d9..d0f86713e 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -88,11 +88,14 @@ void HeapObject::HeapObjectVerify() {
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayVerify();
break;
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ FixedDoubleArray::cast(this)->FixedDoubleArrayVerify();
+ break;
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayVerify();
break;
- case PIXEL_ARRAY_TYPE:
- PixelArray::cast(this)->PixelArrayVerify();
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
+ ExternalPixelArray::cast(this)->ExternalPixelArrayVerify();
break;
case EXTERNAL_BYTE_ARRAY_TYPE:
ExternalByteArray::cast(this)->ExternalByteArrayVerify();
@@ -116,6 +119,9 @@ void HeapObject::HeapObjectVerify() {
case EXTERNAL_FLOAT_ARRAY_TYPE:
ExternalFloatArray::cast(this)->ExternalFloatArrayVerify();
break;
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
+ ExternalDoubleArray::cast(this)->ExternalDoubleArrayVerify();
+ break;
case CODE_TYPE:
Code::cast(this)->CodeVerify();
break;
@@ -152,8 +158,11 @@ void HeapObject::HeapObjectVerify() {
break;
case FILLER_TYPE:
break;
- case PROXY_TYPE:
- Proxy::cast(this)->ProxyVerify();
+ case JS_PROXY_TYPE:
+ JSProxy::cast(this)->JSProxyVerify();
+ break;
+ case FOREIGN_TYPE:
+ Foreign::cast(this)->ForeignVerify();
break;
case SHARED_FUNCTION_INFO_TYPE:
SharedFunctionInfo::cast(this)->SharedFunctionInfoVerify();
@@ -178,7 +187,7 @@ void HeapObject::HeapObjectVerify() {
void HeapObject::VerifyHeapPointer(Object* p) {
ASSERT(p->IsHeapObject());
- ASSERT(Heap::Contains(HeapObject::cast(p)));
+ ASSERT(HEAP->Contains(HeapObject::cast(p)));
}
@@ -192,8 +201,8 @@ void ByteArray::ByteArrayVerify() {
}
-void PixelArray::PixelArrayVerify() {
- ASSERT(IsPixelArray());
+void ExternalPixelArray::ExternalPixelArrayVerify() {
+ ASSERT(IsExternalPixelArray());
}
@@ -232,6 +241,11 @@ void ExternalFloatArray::ExternalFloatArrayVerify() {
}
+void ExternalDoubleArray::ExternalDoubleArrayVerify() {
+ ASSERT(IsExternalDoubleArray());
+}
+
+
void JSObject::JSObjectVerify() {
VerifyHeapPointer(properties());
VerifyHeapPointer(elements());
@@ -241,18 +255,18 @@ void JSObject::JSObjectVerify() {
map()->NextFreePropertyIndex()));
}
ASSERT(map()->has_fast_elements() ==
- (elements()->map() == Heap::fixed_array_map() ||
- elements()->map() == Heap::fixed_cow_array_map()));
+ (elements()->map() == GetHeap()->fixed_array_map() ||
+ elements()->map() == GetHeap()->fixed_cow_array_map()));
ASSERT(map()->has_fast_elements() == HasFastElements());
}
void Map::MapVerify() {
- ASSERT(!Heap::InNewSpace(this));
+ ASSERT(!HEAP->InNewSpace(this));
ASSERT(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
ASSERT(instance_size() == kVariableSizeSentinel ||
(kPointerSize <= instance_size() &&
- instance_size() < Heap::Capacity()));
+ instance_size() < HEAP->Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
}
@@ -261,8 +275,7 @@ void Map::MapVerify() {
void Map::SharedMapVerify() {
MapVerify();
ASSERT(is_shared());
- ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors());
- ASSERT_EQ(Heap::empty_fixed_array(), code_cache());
+ ASSERT(instance_descriptors()->IsEmpty());
ASSERT_EQ(0, pre_allocated_property_fields());
ASSERT_EQ(0, unused_property_fields());
ASSERT_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
@@ -279,6 +292,12 @@ void CodeCache::CodeCacheVerify() {
}
+void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
+ VerifyHeapPointer(cache());
+ ASSERT(cache()->IsUndefined() || cache()->IsPolymorphicCodeCacheHashTable());
+}
+
+
void FixedArray::FixedArrayVerify() {
for (int i = 0; i < length(); i++) {
Object* e = get(i);
@@ -291,6 +310,17 @@ void FixedArray::FixedArrayVerify() {
}
+void FixedDoubleArray::FixedDoubleArrayVerify() {
+ for (int i = 0; i < length(); i++) {
+ if (!is_the_hole(i)) {
+ double value = get(i);
+ ASSERT(!isnan(value) ||
+ BitCast<uint64_t>(value) == kCanonicalNonHoleNanInt64);
+ }
+ }
+}
+
+
void JSValue::JSValueVerify() {
Object* v = value();
if (v->IsHeapObject()) {
@@ -316,7 +346,7 @@ void String::StringVerify() {
CHECK(IsString());
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
if (IsSymbol()) {
- CHECK(!Heap::InNewSpace(this));
+ CHECK(!HEAP->InNewSpace(this));
}
}
@@ -380,7 +410,7 @@ void Oddball::OddballVerify() {
VerifyHeapPointer(to_string());
Object* number = to_number();
if (number->IsHeapObject()) {
- ASSERT(number == Heap::nan_value());
+ ASSERT(number == HEAP->nan_value());
} else {
ASSERT(number->IsSmi());
int value = Smi::cast(number)->value();
@@ -416,7 +446,9 @@ void Code::CodeVerify() {
void JSArray::JSArrayVerify() {
JSObjectVerify();
ASSERT(length()->IsNumber() || length()->IsUndefined());
- ASSERT(elements()->IsUndefined() || elements()->IsFixedArray());
+ ASSERT(elements()->IsUndefined() ||
+ elements()->IsFixedArray() ||
+ elements()->IsFixedDoubleArray());
}
@@ -454,8 +486,13 @@ void JSRegExp::JSRegExpVerify() {
}
-void Proxy::ProxyVerify() {
- ASSERT(IsProxy());
+void JSProxy::JSProxyVerify() {
+ ASSERT(IsJSProxy());
+ VerifyPointer(handler());
+}
+
+void Foreign::ForeignVerify() {
+ ASSERT(IsForeign());
}
@@ -591,16 +628,17 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
int holes = 0;
FixedArray* e = FixedArray::cast(elements());
int len = e->length();
+ Heap* heap = HEAP;
for (int i = 0; i < len; i++) {
- if (e->get(i) == Heap::the_hole_value()) holes++;
+ if (e->get(i) == heap->the_hole_value()) holes++;
}
info->number_of_fast_used_elements_ += len - holes;
info->number_of_fast_unused_elements_ += holes;
break;
}
- case PIXEL_ELEMENTS: {
+ case EXTERNAL_PIXEL_ELEMENTS: {
info->number_of_objects_with_fast_elements_++;
- PixelArray* e = PixelArray::cast(elements());
+ ExternalPixelArray* e = ExternalPixelArray::cast(elements());
info->number_of_fast_used_elements_ += e->length();
break;
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index dedb19956..0198dc153 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -39,9 +39,10 @@
#include "contexts.h"
#include "conversions-inl.h"
#include "heap.h"
-#include "memory.h"
+#include "isolate.h"
#include "property.h"
#include "spaces.h"
+#include "v8memory.h"
namespace v8 {
namespace internal {
@@ -78,7 +79,16 @@ PropertyDetails PropertyDetails::AsDeleted() {
type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
void holder::set_##name(type* value, WriteBarrierMode mode) { \
WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(this, offset, mode); \
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode); \
+ }
+
+
+// GC-safe accessors do not use HeapObject::GetHeap(), but access TLS instead.
+#define ACCESSORS_GCSAFE(holder, name, type, offset) \
+ type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
+ void holder::set_##name(type* value, WriteBarrierMode mode) { \
+ WRITE_FIELD(this, offset, value); \
+ CONDITIONAL_WRITE_BARRIER(HEAP, this, offset, mode); \
}
@@ -207,6 +217,10 @@ bool Object::IsExternalTwoByteString() {
String::cast(this)->IsTwoByteRepresentation();
}
+bool Object::HasValidElements() {
+ // Dictionary is covered under FixedArray.
+ return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray();
+}
StringShape::StringShape(String* str)
: type_(str->map()->instance_type()) {
@@ -330,9 +344,10 @@ bool Object::IsByteArray() {
}
-bool Object::IsPixelArray() {
+bool Object::IsExternalPixelArray() {
return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() == PIXEL_ARRAY_TYPE;
+ HeapObject::cast(this)->map()->instance_type() ==
+ EXTERNAL_PIXEL_ARRAY_TYPE;
}
@@ -395,6 +410,13 @@ bool Object::IsExternalFloatArray() {
}
+bool Object::IsExternalDoubleArray() {
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map()->instance_type() ==
+ EXTERNAL_DOUBLE_ARRAY_TYPE;
+}
+
+
bool MaybeObject::IsFailure() {
return HAS_FAILURE_TAG(this);
}
@@ -418,7 +440,7 @@ bool MaybeObject::IsException() {
bool MaybeObject::IsTheHole() {
- return this == Heap::the_hole_value();
+ return !IsFailure() && ToObjectUnchecked()->IsTheHole();
}
@@ -428,9 +450,27 @@ Failure* Failure::cast(MaybeObject* obj) {
}
+bool Object::IsJSReceiver() {
+ return IsHeapObject() &&
+ HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
+}
+
+
bool Object::IsJSObject() {
- return IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
+ return IsJSReceiver() && !IsJSProxy();
+}
+
+
+bool Object::IsJSProxy() {
+ return Object::IsHeapObject() &&
+ (HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE ||
+ HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE);
+}
+
+
+bool Object::IsJSFunctionProxy() {
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE;
}
@@ -453,6 +493,13 @@ bool Object::IsFixedArray() {
}
+bool Object::IsFixedDoubleArray() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() ==
+ FIXED_DOUBLE_ARRAY_TYPE;
+}
+
+
bool Object::IsDescriptorArray() {
return IsFixedArray();
}
@@ -486,22 +533,22 @@ bool Object::IsDeoptimizationOutputData() {
bool Object::IsContext() {
- return Object::IsHeapObject()
- && (HeapObject::cast(this)->map() == Heap::context_map() ||
- HeapObject::cast(this)->map() == Heap::catch_context_map() ||
- HeapObject::cast(this)->map() == Heap::global_context_map());
-}
-
-
-bool Object::IsCatchContext() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map() == Heap::catch_context_map();
+ if (Object::IsHeapObject()) {
+ Map* map = HeapObject::cast(this)->map();
+ Heap* heap = map->GetHeap();
+ return (map == heap->function_context_map() ||
+ map == heap->catch_context_map() ||
+ map == heap->with_context_map() ||
+ map == heap->global_context_map());
+ }
+ return false;
}
bool Object::IsGlobalContext() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map() == Heap::global_context_map();
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map() ==
+ HeapObject::cast(this)->GetHeap()->global_context_map();
}
@@ -523,6 +570,7 @@ bool Object::IsCode() {
bool Object::IsOddball() {
+ ASSERT(HEAP->is_safe_to_read_maps());
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() == ODDBALL_TYPE;
}
@@ -560,14 +608,15 @@ bool Object::IsStringWrapper() {
}
-bool Object::IsProxy() {
+bool Object::IsForeign() {
return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == PROXY_TYPE;
+ && HeapObject::cast(this)->map()->instance_type() == FOREIGN_TYPE;
}
bool Object::IsBoolean() {
- return IsTrue() || IsFalse();
+ return IsOddball() &&
+ ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
}
@@ -589,18 +638,21 @@ template <> inline bool Is<JSArray>(Object* obj) {
bool Object::IsHashTable() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map() == Heap::hash_table_map();
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map() ==
+ HeapObject::cast(this)->GetHeap()->hash_table_map();
}
bool Object::IsDictionary() {
- return IsHashTable() && this != Heap::symbol_table();
+ return IsHashTable() &&
+ this != HeapObject::cast(this)->GetHeap()->symbol_table();
}
bool Object::IsSymbolTable() {
- return IsHashTable() && this == Heap::raw_unchecked_symbol_table();
+ return IsHashTable() && this ==
+ HeapObject::cast(this)->GetHeap()->raw_unchecked_symbol_table();
}
@@ -642,6 +694,11 @@ bool Object::IsCodeCacheHashTable() {
}
+bool Object::IsPolymorphicCodeCacheHashTable() {
+ return IsHashTable();
+}
+
+
bool Object::IsMapCache() {
return IsHashTable();
}
@@ -717,27 +774,32 @@ bool Object::IsStruct() {
bool Object::IsUndefined() {
- return this == Heap::undefined_value();
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUndefined;
}
bool Object::IsNull() {
- return this == Heap::null_value();
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kNull;
+}
+
+
+bool Object::IsTheHole() {
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTheHole;
}
bool Object::IsTrue() {
- return this == Heap::true_value();
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue;
}
bool Object::IsFalse() {
- return this == Heap::false_value();
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kFalse;
}
bool Object::IsArgumentsMarker() {
- return this == Heap::arguments_marker();
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::kArgumentMarker;
}
@@ -749,7 +811,6 @@ double Object::Number() {
}
-
MaybeObject* Object::ToSmi() {
if (IsSmi()) return this;
if (IsHeapNumber()) {
@@ -772,7 +833,7 @@ MaybeObject* Object::GetElement(uint32_t index) {
// GetElement can trigger a getter which can cause allocation.
// This was not always the case. This ASSERT is here to catch
// leftover incorrect uses.
- ASSERT(Heap::IsAllocationAllowed());
+ ASSERT(HEAP->IsAllocationAllowed());
return GetElementWithReceiver(this, index);
}
@@ -806,28 +867,62 @@ MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) {
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
-
+// TODO(isolates): Pass heap in to these macros.
#define WRITE_BARRIER(object, offset) \
- Heap::RecordWrite(object->address(), offset);
+ object->GetHeap()->RecordWrite(object->address(), offset);
// CONDITIONAL_WRITE_BARRIER must be issued after the actual
// write due to the assert validating the written value.
-#define CONDITIONAL_WRITE_BARRIER(object, offset, mode) \
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, mode) \
if (mode == UPDATE_WRITE_BARRIER) { \
- Heap::RecordWrite(object->address(), offset); \
+ heap->RecordWrite(object->address(), offset); \
} else { \
ASSERT(mode == SKIP_WRITE_BARRIER); \
- ASSERT(Heap::InNewSpace(object) || \
- !Heap::InNewSpace(READ_FIELD(object, offset)) || \
+ ASSERT(heap->InNewSpace(object) || \
+ !heap->InNewSpace(READ_FIELD(object, offset)) || \
Page::FromAddress(object->address())-> \
IsRegionDirty(object->address() + offset)); \
}
-#define READ_DOUBLE_FIELD(p, offset) \
- (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
+#ifndef V8_TARGET_ARCH_MIPS
+ #define READ_DOUBLE_FIELD(p, offset) \
+ (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
+#else // V8_TARGET_ARCH_MIPS
+ // Prevent gcc from using load-double (mips ldc1) on (possibly)
+ // non-64-bit aligned HeapNumber::value.
+ static inline double read_double_field(void* p, int offset) {
+ union conversion {
+ double d;
+ uint32_t u[2];
+ } c;
+ c.u[0] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)));
+ c.u[1] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4)));
+ return c.d;
+ }
+ #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
+#endif // V8_TARGET_ARCH_MIPS
+
+
+#ifndef V8_TARGET_ARCH_MIPS
+ #define WRITE_DOUBLE_FIELD(p, offset, value) \
+ (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
+#else // V8_TARGET_ARCH_MIPS
+ // Prevent gcc from using store-double (mips sdc1) on (possibly)
+ // non-64-bit aligned HeapNumber::value.
+ static inline void write_double_field(void* p, int offset,
+ double value) {
+ union conversion {
+ double d;
+ uint32_t u[2];
+ } c;
+ c.d = value;
+ (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) = c.u[0];
+ (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4))) = c.u[1];
+ }
+ #define WRITE_DOUBLE_FIELD(p, offset, value) \
+ write_double_field(p, offset, value)
+#endif // V8_TARGET_ARCH_MIPS
-#define WRITE_DOUBLE_FIELD(p, offset, value) \
- (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT_FIELD(p, offset) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)))
@@ -1098,6 +1193,21 @@ void HeapObject::VerifySmiField(int offset) {
#endif
+Heap* HeapObject::GetHeap() {
+ // During GC, the map pointer in HeapObject is used in various ways that
+ // prevent us from retrieving Heap from the map.
+ // Assert that we are not in GC, implement GC code in a way that it doesn't
+ // pull heap from the map.
+ ASSERT(HEAP->is_safe_to_read_maps());
+ return map()->heap();
+}
+
+
+Isolate* HeapObject::GetIsolate() {
+ return GetHeap()->isolate();
+}
+
+
Map* HeapObject::map() {
return map_word().ToMap();
}
@@ -1214,35 +1324,31 @@ ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
HeapObject* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
- // In the assert below Dictionary is covered under FixedArray.
- ASSERT(array->IsFixedArray() || array->IsPixelArray() ||
- array->IsExternalArray());
+ ASSERT(array->HasValidElements());
return reinterpret_cast<HeapObject*>(array);
}
void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
ASSERT(map()->has_fast_elements() ==
- (value->map() == Heap::fixed_array_map() ||
- value->map() == Heap::fixed_cow_array_map()));
- // In the assert below Dictionary is covered under FixedArray.
- ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
- value->IsExternalArray());
+ (value->map() == GetHeap()->fixed_array_map() ||
+ value->map() == GetHeap()->fixed_cow_array_map()));
+ ASSERT(value->HasValidElements());
WRITE_FIELD(this, kElementsOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode);
}
void JSObject::initialize_properties() {
- ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
- WRITE_FIELD(this, kPropertiesOffset, Heap::empty_fixed_array());
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+ WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
}
void JSObject::initialize_elements() {
ASSERT(map()->has_fast_elements());
- ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
- WRITE_FIELD(this, kElementsOffset, Heap::empty_fixed_array());
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+ WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
}
@@ -1261,6 +1367,16 @@ ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
+byte Oddball::kind() {
+ return READ_BYTE_FIELD(this, kKindOffset);
+}
+
+
+void Oddball::set_kind(byte value) {
+ WRITE_BYTE_FIELD(this, kKindOffset, value);
+}
+
+
Object* JSGlobalPropertyCell::value() {
return READ_FIELD(this, kValueOffset);
}
@@ -1314,6 +1430,12 @@ int JSObject::GetInternalFieldCount() {
}
+int JSObject::GetInternalFieldOffset(int index) {
+ ASSERT(index < GetInternalFieldCount() && index >= 0);
+ return GetHeaderSize() + (kPointerSize * index);
+}
+
+
Object* JSObject::GetInternalField(int index) {
ASSERT(index < GetInternalFieldCount() && index >= 0);
// Internal objects do follow immediately after the header, whereas in-object
@@ -1365,6 +1487,14 @@ Object* JSObject::FastPropertyAtPut(int index, Object* value) {
}
+int JSObject::GetInObjectPropertyOffset(int index) {
+ // Adjust for the number of properties stored in the object.
+ index -= map()->inobject_properties();
+ ASSERT(index < 0);
+ return map()->instance_size() + (index * kPointerSize);
+}
+
+
Object* JSObject::InObjectPropertyAt(int index) {
// Adjust for the number of properties stored in the object.
index -= map()->inobject_properties();
@@ -1382,14 +1512,14 @@ Object* JSObject::InObjectPropertyAtPut(int index,
ASSERT(index < 0);
int offset = map()->instance_size() + (index * kPointerSize);
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(this, offset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
return value;
}
void JSObject::InitializeBody(int object_size, Object* value) {
- ASSERT(!value->IsHeapObject() || !Heap::InNewSpace(value));
+ ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
WRITE_FIELD(this, offset, value);
}
@@ -1412,7 +1542,7 @@ int JSObject::MaxFastProperties() {
void Struct::InitializeBody(int object_size) {
- Object* value = Heap::undefined_value();
+ Object* value = GetHeap()->undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
WRITE_FIELD(this, offset, value);
}
@@ -1451,6 +1581,12 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
}
+FixedArrayBase* FixedArrayBase::cast(Object* object) {
+ ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray());
+ return reinterpret_cast<FixedArrayBase*>(object);
+}
+
+
Object* FixedArray::get(int index) {
ASSERT(index >= 0 && index < this->length());
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
@@ -1458,7 +1594,7 @@ Object* FixedArray::get(int index) {
void FixedArray::set(int index, Smi* value) {
- ASSERT(map() != Heap::fixed_cow_array_map());
+ ASSERT(map() != HEAP->fixed_cow_array_map());
ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -1466,7 +1602,7 @@ void FixedArray::set(int index, Smi* value) {
void FixedArray::set(int index, Object* value) {
- ASSERT(map() != Heap::fixed_cow_array_map());
+ ASSERT(map() != HEAP->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -1474,8 +1610,90 @@ void FixedArray::set(int index, Object* value) {
}
+double FixedDoubleArray::get(int index) {
+ ASSERT(map() != HEAP->fixed_cow_array_map() &&
+ map() != HEAP->fixed_array_map());
+ ASSERT(index >= 0 && index < this->length());
+ double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
+ ASSERT(!is_the_hole_nan(result));
+ return result;
+}
+
+
+void FixedDoubleArray::set(int index, double value) {
+ ASSERT(map() != HEAP->fixed_cow_array_map() &&
+ map() != HEAP->fixed_array_map());
+ int offset = kHeaderSize + index * kDoubleSize;
+ if (isnan(value)) value = canonical_not_the_hole_nan_as_double();
+ WRITE_DOUBLE_FIELD(this, offset, value);
+}
+
+
+void FixedDoubleArray::set_the_hole(int index) {
+ ASSERT(map() != HEAP->fixed_cow_array_map() &&
+ map() != HEAP->fixed_array_map());
+ int offset = kHeaderSize + index * kDoubleSize;
+ WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+}
+
+
+bool FixedDoubleArray::is_the_hole(int index) {
+ int offset = kHeaderSize + index * kDoubleSize;
+ return is_the_hole_nan(READ_DOUBLE_FIELD(this, offset));
+}
+
+
+void FixedDoubleArray::Initialize(FixedDoubleArray* from) {
+ int old_length = from->length();
+ ASSERT(old_length < length());
+ OS::MemCopy(FIELD_ADDR(this, kHeaderSize),
+ FIELD_ADDR(from, kHeaderSize),
+ old_length * kDoubleSize);
+ int offset = kHeaderSize + old_length * kDoubleSize;
+ for (int current = from->length(); current < length(); ++current) {
+ WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+ offset += kDoubleSize;
+ }
+}
+
+
+void FixedDoubleArray::Initialize(FixedArray* from) {
+ int old_length = from->length();
+ ASSERT(old_length < length());
+ for (int i = 0; i < old_length; i++) {
+ Object* hole_or_object = from->get(i);
+ if (hole_or_object->IsTheHole()) {
+ set_the_hole(i);
+ } else {
+ set(i, hole_or_object->Number());
+ }
+ }
+ int offset = kHeaderSize + old_length * kDoubleSize;
+ for (int current = from->length(); current < length(); ++current) {
+ WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+ offset += kDoubleSize;
+ }
+}
+
+
+void FixedDoubleArray::Initialize(NumberDictionary* from) {
+ int offset = kHeaderSize;
+ for (int current = 0; current < length(); ++current) {
+ WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+ offset += kDoubleSize;
+ }
+ for (int i = 0; i < from->Capacity(); i++) {
+ Object* key = from->KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t entry = static_cast<uint32_t>(key->Number());
+ set(entry, from->ValueAt(i)->Number());
+ }
+ }
+}
+
+
WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
- if (Heap::InNewSpace(this)) return SKIP_WRITE_BARRIER;
+ if (GetHeap()->InNewSpace(this)) return SKIP_WRITE_BARRIER;
return UPDATE_WRITE_BARRIER;
}
@@ -1483,44 +1701,55 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
void FixedArray::set(int index,
Object* value,
WriteBarrierMode mode) {
- ASSERT(map() != Heap::fixed_cow_array_map());
+ ASSERT(map() != HEAP->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(this, offset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
}
void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
- ASSERT(array->map() != Heap::raw_unchecked_fixed_cow_array_map());
+ ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
- ASSERT(!Heap::InNewSpace(value));
+ ASSERT(!HEAP->InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
void FixedArray::set_undefined(int index) {
- ASSERT(map() != Heap::fixed_cow_array_map());
+ ASSERT(map() != HEAP->fixed_cow_array_map());
+ set_undefined(GetHeap(), index);
+}
+
+
+void FixedArray::set_undefined(Heap* heap, int index) {
ASSERT(index >= 0 && index < this->length());
- ASSERT(!Heap::InNewSpace(Heap::undefined_value()));
+ ASSERT(!heap->InNewSpace(heap->undefined_value()));
WRITE_FIELD(this, kHeaderSize + index * kPointerSize,
- Heap::undefined_value());
+ heap->undefined_value());
}
void FixedArray::set_null(int index) {
- ASSERT(map() != Heap::fixed_cow_array_map());
+ set_null(GetHeap(), index);
+}
+
+
+void FixedArray::set_null(Heap* heap, int index) {
ASSERT(index >= 0 && index < this->length());
- ASSERT(!Heap::InNewSpace(Heap::null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::null_value());
+ ASSERT(!heap->InNewSpace(heap->null_value()));
+ WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
}
void FixedArray::set_the_hole(int index) {
- ASSERT(map() != Heap::fixed_cow_array_map());
+ ASSERT(map() != HEAP->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
- ASSERT(!Heap::InNewSpace(Heap::the_hole_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::the_hole_value());
+ ASSERT(!HEAP->InNewSpace(HEAP->the_hole_value()));
+ WRITE_FIELD(this,
+ kHeaderSize + index * kPointerSize,
+ GetHeap()->the_hole_value());
}
@@ -1531,19 +1760,20 @@ void FixedArray::set_unchecked(int index, Smi* value) {
}
-void FixedArray::set_unchecked(int index,
+void FixedArray::set_unchecked(Heap* heap,
+ int index,
Object* value,
WriteBarrierMode mode) {
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(this, offset, mode);
+ CONDITIONAL_WRITE_BARRIER(heap, this, offset, mode);
}
-void FixedArray::set_null_unchecked(int index) {
+void FixedArray::set_null_unchecked(Heap* heap, int index) {
ASSERT(index >= 0 && index < this->length());
- ASSERT(!Heap::InNewSpace(Heap::null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::null_value());
+ ASSERT(!HEAP->InNewSpace(heap->null_value()));
+ WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
}
@@ -1553,9 +1783,21 @@ Object** FixedArray::data_start() {
bool DescriptorArray::IsEmpty() {
- ASSERT(this == Heap::empty_descriptor_array() ||
- this->length() > 2);
- return this == Heap::empty_descriptor_array();
+ ASSERT(this->IsSmi() ||
+ this->length() > kFirstIndex ||
+ this == HEAP->empty_descriptor_array());
+ return this->IsSmi() || length() <= kFirstIndex;
+}
+
+
+int DescriptorArray::bit_field3_storage() {
+ Object* storage = READ_FIELD(this, kBitField3StorageOffset);
+ return Smi::cast(storage)->value();
+}
+
+void DescriptorArray::set_bit_field3_storage(int value) {
+ ASSERT(!IsEmpty());
+ WRITE_FIELD(this, kBitField3StorageOffset, Smi::FromInt(value));
}
@@ -1585,10 +1827,10 @@ int DescriptorArray::Search(String* name) {
int DescriptorArray::SearchWithCache(String* name) {
- int number = DescriptorLookupCache::Lookup(this, name);
+ int number = GetIsolate()->descriptor_lookup_cache()->Lookup(this, name);
if (number == DescriptorLookupCache::kAbsent) {
number = Search(name);
- DescriptorLookupCache::Update(this, name, number);
+ GetIsolate()->descriptor_lookup_cache()->Update(this, name, number);
}
return number;
}
@@ -1636,8 +1878,8 @@ Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
ASSERT(GetType(descriptor_number) == CALLBACKS);
- Proxy* p = Proxy::cast(GetCallbacksObject(descriptor_number));
- return reinterpret_cast<AccessorDescriptor*>(p->proxy());
+ Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
+ return reinterpret_cast<AccessorDescriptor*>(p->address());
}
@@ -1648,7 +1890,8 @@ bool DescriptorArray::IsProperty(int descriptor_number) {
bool DescriptorArray::IsTransition(int descriptor_number) {
PropertyType t = GetType(descriptor_number);
- return t == MAP_TRANSITION || t == CONSTANT_TRANSITION;
+ return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
+ t == EXTERNAL_ARRAY_TRANSITION;
}
@@ -1665,7 +1908,7 @@ bool DescriptorArray::IsDontEnum(int descriptor_number) {
void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
desc->Init(GetKey(descriptor_number),
GetValue(descriptor_number),
- GetDetails(descriptor_number));
+ PropertyDetails(GetDetails(descriptor_number)));
}
@@ -1674,8 +1917,8 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
ASSERT(descriptor_number < number_of_descriptors());
// Make sure none of the elements in desc are in new space.
- ASSERT(!Heap::InNewSpace(desc->GetKey()));
- ASSERT(!Heap::InNewSpace(desc->GetValue()));
+ ASSERT(!HEAP->InNewSpace(desc->GetKey()));
+ ASSERT(!HEAP->InNewSpace(desc->GetValue()));
fast_set(this, ToKeyIndex(descriptor_number), desc->GetKey());
FixedArray* content_array = GetContentArray();
@@ -1700,6 +1943,30 @@ void DescriptorArray::Swap(int first, int second) {
}
+template<typename Shape, typename Key>
+int HashTable<Shape, Key>::FindEntry(Key key) {
+ return FindEntry(GetIsolate(), key);
+}
+
+
+// Find entry for key otherwise return kNotFound.
+template<typename Shape, typename Key>
+int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
+ uint32_t capacity = Capacity();
+ uint32_t entry = FirstProbe(Shape::Hash(key), capacity);
+ uint32_t count = 1;
+ // EnsureCapacity will guarantee the hash table is never full.
+ while (true) {
+ Object* element = KeyAt(entry);
+ if (element == isolate->heap()->undefined_value()) break; // Empty entry.
+ if (element != isolate->heap()->null_value() &&
+ Shape::IsMatch(key, element)) return entry;
+ entry = NextProbe(entry, count++, capacity);
+ }
+ return kNotFound;
+}
+
+
bool NumberDictionary::requires_slow_elements() {
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return false;
@@ -1725,6 +1992,7 @@ void NumberDictionary::set_requires_slow_elements() {
CAST_ACCESSOR(FixedArray)
+CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
@@ -1733,6 +2001,7 @@ CAST_ACCESSOR(JSFunctionResultCache)
CAST_ACCESSOR(NormalizedMapCache)
CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(CodeCacheHashTable)
+CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
CAST_ACCESSOR(MapCache)
CAST_ACCESSOR(String)
CAST_ACCESSOR(SeqString)
@@ -1742,6 +2011,7 @@ CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalAsciiString)
CAST_ACCESSOR(ExternalTwoByteString)
+CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(Smi)
CAST_ACCESSOR(HeapObject)
@@ -1758,9 +2028,10 @@ CAST_ACCESSOR(JSBuiltinsObject)
CAST_ACCESSOR(Code)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSRegExp)
-CAST_ACCESSOR(Proxy)
+CAST_ACCESSOR(JSProxy)
+CAST_ACCESSOR(JSFunctionProxy)
+CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(ByteArray)
-CAST_ACCESSOR(PixelArray)
CAST_ACCESSOR(ExternalArray)
CAST_ACCESSOR(ExternalByteArray)
CAST_ACCESSOR(ExternalUnsignedByteArray)
@@ -1769,6 +2040,8 @@ CAST_ACCESSOR(ExternalUnsignedShortArray)
CAST_ACCESSOR(ExternalIntArray)
CAST_ACCESSOR(ExternalUnsignedIntArray)
CAST_ACCESSOR(ExternalFloatArray)
+CAST_ACCESSOR(ExternalDoubleArray)
+CAST_ACCESSOR(ExternalPixelArray)
CAST_ACCESSOR(Struct)
@@ -1784,10 +2057,11 @@ HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
}
-SMI_ACCESSORS(FixedArray, length, kLengthOffset)
+SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SMI_ACCESSORS(ByteArray, length, kLengthOffset)
-INT_ACCESSORS(PixelArray, length, kLengthOffset)
+// TODO(1493): Investigate if it's possible to s/INT/SMI/ here (and
+// subsequently unify H{Fixed,External}ArrayLength).
INT_ACCESSORS(ExternalArray, length, kLengthOffset)
@@ -1947,7 +2221,7 @@ Object* ConsString::unchecked_first() {
void ConsString::set_first(String* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kFirstOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kFirstOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, mode);
}
@@ -1963,7 +2237,7 @@ Object* ConsString::unchecked_second() {
void ConsString::set_second(String* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kSecondOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kSecondOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, mode);
}
@@ -1999,7 +2273,7 @@ void JSFunctionResultCache::Clear() {
int cache_size = size();
Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex));
MemsetPointer(entries_start,
- Heap::the_hole_value(),
+ GetHeap()->the_hole_value(),
cache_size - kEntriesIndex);
MakeZeroSize();
}
@@ -2054,28 +2328,21 @@ Address ByteArray::GetDataStartAddress() {
}
-uint8_t* PixelArray::external_pointer() {
- intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
- return reinterpret_cast<uint8_t*>(ptr);
-}
-
-
-void PixelArray::set_external_pointer(uint8_t* value, WriteBarrierMode mode) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
+uint8_t* ExternalPixelArray::external_pixel_pointer() {
+ return reinterpret_cast<uint8_t*>(external_pointer());
}
-uint8_t PixelArray::get(int index) {
+uint8_t ExternalPixelArray::get(int index) {
ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = external_pointer();
+ uint8_t* ptr = external_pixel_pointer();
return ptr[index];
}
-void PixelArray::set(int index, uint8_t value) {
+void ExternalPixelArray::set(int index, uint8_t value) {
ASSERT((index >= 0) && (index < this->length()));
- uint8_t* ptr = external_pointer();
+ uint8_t* ptr = external_pixel_pointer();
ptr[index] = value;
}
@@ -2190,6 +2457,20 @@ void ExternalFloatArray::set(int index, float value) {
}
+double ExternalDoubleArray::get(int index) {
+ ASSERT((index >= 0) && (index < this->length()));
+ double* ptr = static_cast<double*>(external_pointer());
+ return ptr[index];
+}
+
+
+void ExternalDoubleArray::set(int index, double value) {
+ ASSERT((index >= 0) && (index < this->length()));
+ double* ptr = static_cast<double*>(external_pointer());
+ ptr[index] = value;
+}
+
+
int Map::visitor_id() {
return READ_BYTE_FIELD(this, kVisitorIdOffset);
}
@@ -2237,6 +2518,10 @@ int HeapObject::SizeFromMap(Map* map) {
return SeqTwoByteString::SizeFor(
reinterpret_cast<SeqTwoByteString*>(this)->length());
}
+ if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
+ return FixedDoubleArray::SizeFor(
+ reinterpret_cast<FixedDoubleArray*>(this)->length());
+ }
ASSERT(instance_type == CODE_TYPE);
return reinterpret_cast<Code*>(this)->CodeSize();
}
@@ -2374,14 +2659,14 @@ bool Map::attached_to_shared_function_info() {
void Map::set_is_shared(bool value) {
if (value) {
- set_bit_field2(bit_field2() | (1 << kIsShared));
+ set_bit_field3(bit_field3() | (1 << kIsShared));
} else {
- set_bit_field2(bit_field2() & ~(1 << kIsShared));
+ set_bit_field3(bit_field3() & ~(1 << kIsShared));
}
}
bool Map::is_shared() {
- return ((1 << kIsShared) & bit_field2()) != 0;
+ return ((1 << kIsShared) & bit_field3()) != 0;
}
@@ -2390,6 +2675,12 @@ JSFunction* Map::unchecked_constructor() {
}
+FixedArray* Map::unchecked_prototype_transitions() {
+ return reinterpret_cast<FixedArray*>(
+ READ_FIELD(this, kPrototypeTransitionsOffset));
+}
+
+
Code::Flags Code::flags() {
return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
}
@@ -2435,7 +2726,6 @@ Code::ExtraICState Code::extra_ic_state() {
PropertyType Code::type() {
- ASSERT(ic_state() == MONOMORPHIC);
return ExtractTypeFromFlags(flags());
}
@@ -2448,8 +2738,8 @@ int Code::arguments_count() {
int Code::major_key() {
ASSERT(kind() == STUB ||
+ kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
- kind() == TYPE_RECORDING_BINARY_OP_IC ||
kind() == COMPARE_IC);
return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
}
@@ -2457,8 +2747,8 @@ int Code::major_key() {
void Code::set_major_key(int major) {
ASSERT(kind() == STUB ||
+ kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
- kind() == TYPE_RECORDING_BINARY_OP_IC ||
kind() == COMPARE_IC);
ASSERT(0 <= major && major < 256);
WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
@@ -2553,38 +2843,38 @@ void Code::set_check_type(CheckType value) {
}
-byte Code::binary_op_type() {
- ASSERT(is_binary_op_stub());
- return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
+byte Code::unary_op_type() {
+ ASSERT(is_unary_op_stub());
+ return READ_BYTE_FIELD(this, kUnaryOpTypeOffset);
}
-void Code::set_binary_op_type(byte value) {
- ASSERT(is_binary_op_stub());
- WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
+void Code::set_unary_op_type(byte value) {
+ ASSERT(is_unary_op_stub());
+ WRITE_BYTE_FIELD(this, kUnaryOpTypeOffset, value);
}
-byte Code::type_recording_binary_op_type() {
- ASSERT(is_type_recording_binary_op_stub());
+byte Code::binary_op_type() {
+ ASSERT(is_binary_op_stub());
return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
}
-void Code::set_type_recording_binary_op_type(byte value) {
- ASSERT(is_type_recording_binary_op_stub());
+void Code::set_binary_op_type(byte value) {
+ ASSERT(is_binary_op_stub());
WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
}
-byte Code::type_recording_binary_op_result_type() {
- ASSERT(is_type_recording_binary_op_stub());
+byte Code::binary_op_result_type() {
+ ASSERT(is_binary_op_stub());
return READ_BYTE_FIELD(this, kBinaryOpReturnTypeOffset);
}
-void Code::set_type_recording_binary_op_result_type(byte value) {
- ASSERT(is_type_recording_binary_op_stub());
+void Code::set_binary_op_result_type(byte value) {
+ ASSERT(is_binary_op_stub());
WRITE_BYTE_FIELD(this, kBinaryOpReturnTypeOffset, value);
}
@@ -2614,11 +2904,10 @@ Code::Flags Code::ComputeFlags(Kind kind,
PropertyType type,
int argc,
InlineCacheHolderFlag holder) {
- // Extra IC state is only allowed for monomorphic call IC stubs
- // or for store IC stubs.
+ // Extra IC state is only allowed for call IC stubs or for store IC
+ // stubs.
ASSERT(extra_ic_state == kNoExtraICState ||
- (kind == CALL_IC && (ic_state == MONOMORPHIC ||
- ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)) ||
+ (kind == CALL_IC) ||
(kind == STORE_IC) ||
(kind == KEYED_STORE_IC));
// Compute the bit mask.
@@ -2710,6 +2999,48 @@ Code* Code::GetCodeFromTargetAddress(Address address) {
}
+Isolate* Map::isolate() {
+ return heap()->isolate();
+}
+
+
+Heap* Map::heap() {
+ // NOTE: address() helper is not used to save one instruction.
+ Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+ ASSERT(heap != NULL);
+ ASSERT(heap->isolate() == Isolate::Current());
+ return heap;
+}
+
+
+Heap* Code::heap() {
+ // NOTE: address() helper is not used to save one instruction.
+ Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+ ASSERT(heap != NULL);
+ ASSERT(heap->isolate() == Isolate::Current());
+ return heap;
+}
+
+
+Isolate* Code::isolate() {
+ return heap()->isolate();
+}
+
+
+Heap* JSGlobalPropertyCell::heap() {
+ // NOTE: address() helper is not used to save one instruction.
+ Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+ ASSERT(heap != NULL);
+ ASSERT(heap->isolate() == Isolate::Current());
+ return heap;
+}
+
+
+Isolate* JSGlobalPropertyCell::isolate() {
+ return heap()->isolate();
+}
+
+
Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
return HeapObject::
FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
@@ -2722,9 +3053,9 @@ Object* Map::prototype() {
void Map::set_prototype(Object* value, WriteBarrierMode mode) {
- ASSERT(value->IsNull() || value->IsJSObject());
+ ASSERT(value->IsNull() || value->IsJSReceiver());
WRITE_FIELD(this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kPrototypeOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, mode);
}
@@ -2735,49 +3066,122 @@ MaybeObject* Map::GetFastElementsMap() {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
- new_map->set_has_fast_elements(true);
- Counters::map_slow_to_fast_elements.Increment();
+ new_map->set_elements_kind(JSObject::FAST_ELEMENTS);
+ isolate()->counters()->map_to_fast_elements()->Increment();
return new_map;
}
-MaybeObject* Map::GetSlowElementsMap() {
- if (!has_fast_elements()) return this;
+MaybeObject* Map::GetFastDoubleElementsMap() {
+ if (has_fast_double_elements()) return this;
Object* obj;
{ MaybeObject* maybe_obj = CopyDropTransitions();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
- new_map->set_has_fast_elements(false);
- Counters::map_fast_to_slow_elements.Increment();
+ new_map->set_elements_kind(JSObject::FAST_DOUBLE_ELEMENTS);
+ isolate()->counters()->map_to_fast_double_elements()->Increment();
return new_map;
}
-MaybeObject* Map::GetPixelArrayElementsMap() {
- if (has_pixel_array_elements()) return this;
- // TODO(danno): Special case empty object map (or most common case)
- // to return a pre-canned pixel array map.
+MaybeObject* Map::GetSlowElementsMap() {
+ if (!has_fast_elements() && !has_fast_double_elements()) return this;
Object* obj;
{ MaybeObject* maybe_obj = CopyDropTransitions();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
- new_map->set_has_fast_elements(false);
- new_map->set_has_pixel_array_elements(true);
- Counters::map_to_pixel_array_elements.Increment();
+ new_map->set_elements_kind(JSObject::DICTIONARY_ELEMENTS);
+ isolate()->counters()->map_to_slow_elements()->Increment();
return new_map;
}
-ACCESSORS(Map, instance_descriptors, DescriptorArray,
- kInstanceDescriptorsOffset)
+DescriptorArray* Map::instance_descriptors() {
+ Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
+ if (object->IsSmi()) {
+ return HEAP->empty_descriptor_array();
+ } else {
+ return DescriptorArray::cast(object);
+ }
+}
+
+
+void Map::init_instance_descriptors() {
+ WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, Smi::FromInt(0));
+}
+
+
+void Map::clear_instance_descriptors() {
+ Object* object = READ_FIELD(this,
+ kInstanceDescriptorsOrBitField3Offset);
+ if (!object->IsSmi()) {
+ WRITE_FIELD(
+ this,
+ kInstanceDescriptorsOrBitField3Offset,
+ Smi::FromInt(DescriptorArray::cast(object)->bit_field3_storage()));
+ }
+}
+
+
+void Map::set_instance_descriptors(DescriptorArray* value,
+ WriteBarrierMode mode) {
+ Object* object = READ_FIELD(this,
+ kInstanceDescriptorsOrBitField3Offset);
+ if (value == isolate()->heap()->empty_descriptor_array()) {
+ clear_instance_descriptors();
+ return;
+ } else {
+ if (object->IsSmi()) {
+ value->set_bit_field3_storage(Smi::cast(object)->value());
+ } else {
+ value->set_bit_field3_storage(
+ DescriptorArray::cast(object)->bit_field3_storage());
+ }
+ }
+ ASSERT(!is_shared());
+ WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(),
+ this,
+ kInstanceDescriptorsOrBitField3Offset,
+ mode);
+}
+
+
+int Map::bit_field3() {
+ Object* object = READ_FIELD(this,
+ kInstanceDescriptorsOrBitField3Offset);
+ if (object->IsSmi()) {
+ return Smi::cast(object)->value();
+ } else {
+ return DescriptorArray::cast(object)->bit_field3_storage();
+ }
+}
+
+
+void Map::set_bit_field3(int value) {
+ ASSERT(Smi::IsValid(value));
+ Object* object = READ_FIELD(this,
+ kInstanceDescriptorsOrBitField3Offset);
+ if (object->IsSmi()) {
+ WRITE_FIELD(this,
+ kInstanceDescriptorsOrBitField3Offset,
+ Smi::FromInt(value));
+ } else {
+ DescriptorArray::cast(object)->set_bit_field3_storage(value);
+ }
+}
+
+
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
+ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset)
ACCESSORS(Map, constructor, Object, kConstructorOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
-ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
+ACCESSORS_GCSAFE(JSFunction, next_function_link, Object,
+ kNextFunctionLinkOffset)
ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
@@ -2828,6 +3232,8 @@ ACCESSORS(FunctionTemplateInfo, instance_call_handler, Object,
ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
kAccessCheckInfoOffset)
ACCESSORS(FunctionTemplateInfo, flag, Smi, kFlagOffset)
+ACCESSORS(FunctionTemplateInfo, prototype_attributes, Smi,
+ kPrototypeAttributesOffset)
ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
@@ -2845,7 +3251,7 @@ ACCESSORS(Script, line_offset, Smi, kLineOffsetOffset)
ACCESSORS(Script, column_offset, Smi, kColumnOffsetOffset)
ACCESSORS(Script, data, Object, kDataOffset)
ACCESSORS(Script, context_data, Object, kContextOffset)
-ACCESSORS(Script, wrapper, Proxy, kWrapperOffset)
+ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
ACCESSORS(Script, type, Smi, kTypeOffset)
ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
@@ -2866,8 +3272,8 @@ ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
#endif
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
+ACCESSORS_GCSAFE(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
+ACCESSORS_GCSAFE(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@@ -2886,17 +3292,22 @@ BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
kIsExpressionBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
kIsTopLevelBit)
-BOOL_GETTER(SharedFunctionInfo, compiler_hints,
+BOOL_GETTER(SharedFunctionInfo,
+ compiler_hints,
has_only_simple_this_property_assignments,
kHasOnlySimpleThisPropertyAssignments)
BOOL_ACCESSORS(SharedFunctionInfo,
compiler_hints,
- try_full_codegen,
- kTryFullCodegen)
-BOOL_ACCESSORS(SharedFunctionInfo,
- compiler_hints,
allows_lazy_compilation,
kAllowLazyCompilation)
+BOOL_ACCESSORS(SharedFunctionInfo,
+ compiler_hints,
+ uses_arguments,
+ kUsesArguments)
+BOOL_ACCESSORS(SharedFunctionInfo,
+ compiler_hints,
+ has_duplicate_parameters,
+ kHasDuplicateParameters)
#if V8_HOST_ARCH_32_BIT
@@ -2980,28 +3391,21 @@ void SharedFunctionInfo::set_construction_count(int value) {
}
-bool SharedFunctionInfo::live_objects_may_exist() {
- return (compiler_hints() & (1 << kLiveObjectsMayExist)) != 0;
-}
-
-
-void SharedFunctionInfo::set_live_objects_may_exist(bool value) {
- if (value) {
- set_compiler_hints(compiler_hints() | (1 << kLiveObjectsMayExist));
- } else {
- set_compiler_hints(compiler_hints() & ~(1 << kLiveObjectsMayExist));
- }
-}
+BOOL_ACCESSORS(SharedFunctionInfo,
+ compiler_hints,
+ live_objects_may_exist,
+ kLiveObjectsMayExist)
bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
- return initial_map() != Heap::undefined_value();
+ return initial_map() != HEAP->undefined_value();
}
-bool SharedFunctionInfo::optimization_disabled() {
- return BooleanBit::get(compiler_hints(), kOptimizationDisabled);
-}
+BOOL_GETTER(SharedFunctionInfo,
+ compiler_hints,
+ optimization_disabled,
+ kOptimizationDisabled)
void SharedFunctionInfo::set_optimization_disabled(bool disable) {
@@ -3016,14 +3420,32 @@ void SharedFunctionInfo::set_optimization_disabled(bool disable) {
}
-bool SharedFunctionInfo::strict_mode() {
- return BooleanBit::get(compiler_hints(), kStrictModeFunction);
+BOOL_ACCESSORS(SharedFunctionInfo,
+ compiler_hints,
+ strict_mode,
+ kStrictModeFunction)
+
+
+bool SharedFunctionInfo::native() {
+ return BooleanBit::get(compiler_hints(), kNative);
+}
+
+
+void SharedFunctionInfo::set_native(bool value) {
+ set_compiler_hints(BooleanBit::set(compiler_hints(),
+ kNative,
+ value));
+}
+
+
+bool SharedFunctionInfo::bound() {
+ return BooleanBit::get(compiler_hints(), kBoundFunction);
}
-void SharedFunctionInfo::set_strict_mode(bool value) {
+void SharedFunctionInfo::set_bound(bool value) {
set_compiler_hints(BooleanBit::set(compiler_hints(),
- kStrictModeFunction,
+ kBoundFunction,
value));
}
@@ -3031,6 +3453,8 @@ void SharedFunctionInfo::set_strict_mode(bool value) {
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
+ACCESSORS(PolymorphicCodeCache, cache, Object, kCacheOffset)
+
bool Script::HasValidSource() {
Object* src = this->source();
if (!src->IsString()) return true;
@@ -3074,7 +3498,7 @@ Code* SharedFunctionInfo::unchecked_code() {
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kCodeOffset, value);
- CONDITIONAL_WRITE_BARRIER(this, kCodeOffset, mode);
+ ASSERT(!Isolate::Current()->heap()->InNewSpace(value));
}
@@ -3087,7 +3511,7 @@ SerializedScopeInfo* SharedFunctionInfo::scope_info() {
void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
WriteBarrierMode mode) {
WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
- CONDITIONAL_WRITE_BARRIER(this, kScopeInfoOffset, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kScopeInfoOffset, mode);
}
@@ -3102,7 +3526,8 @@ void SharedFunctionInfo::set_deopt_counter(Smi* value) {
bool SharedFunctionInfo::is_compiled() {
- return code() != Builtins::builtin(Builtins::LazyCompile);
+ return code() !=
+ Isolate::Current()->builtins()->builtin(Builtins::kLazyCompile);
}
@@ -3161,8 +3586,13 @@ bool JSFunction::IsOptimized() {
}
+bool JSFunction::IsOptimizable() {
+ return code()->kind() == Code::FUNCTION && code()->optimizable();
+}
+
+
bool JSFunction::IsMarkedForLazyRecompilation() {
- return code() == Builtins::builtin(Builtins::LazyRecompile);
+ return code() == GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile);
}
@@ -3179,7 +3609,7 @@ Code* JSFunction::unchecked_code() {
void JSFunction::set_code(Code* value) {
// Skip the write barrier because code is never in new space.
- ASSERT(!Heap::InNewSpace(value));
+ ASSERT(!HEAP->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
}
@@ -3219,7 +3649,7 @@ SharedFunctionInfo* JSFunction::unchecked_shared() {
void JSFunction::set_context(Object* value) {
- ASSERT(value == Heap::undefined_value() || value->IsContext());
+ ASSERT(value->IsUndefined() || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
WRITE_BARRIER(this, kContextOffset);
}
@@ -3276,7 +3706,7 @@ bool JSFunction::should_have_prototype() {
bool JSFunction::is_compiled() {
- return code() != Builtins::builtin(Builtins::LazyCompile);
+ return code() != GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
}
@@ -3309,17 +3739,20 @@ void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
Code* value) {
ASSERT(id < kJSBuiltinsCount); // id is unsigned.
WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
- ASSERT(!Heap::InNewSpace(value));
+ ASSERT(!HEAP->InNewSpace(value));
}
-Address Proxy::proxy() {
- return AddressFrom<Address>(READ_INTPTR_FIELD(this, kProxyOffset));
+ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
+
+
+Address Foreign::address() {
+ return AddressFrom<Address>(READ_INTPTR_FIELD(this, kAddressOffset));
}
-void Proxy::set_proxy(Address value) {
- WRITE_INTPTR_FIELD(this, kProxyOffset, OffsetFrom(value));
+void Foreign::set_address(Address value) {
+ WRITE_INTPTR_FIELD(this, kAddressOffset, OffsetFrom(value));
}
@@ -3352,6 +3785,8 @@ JSMessageObject* JSMessageObject::cast(Object* obj) {
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
+ACCESSORS(Code, next_code_flushing_candidate,
+ Object, kNextCodeFlushingCandidateOffset)
byte* Code::instruction_start() {
@@ -3458,39 +3893,17 @@ void JSRegExp::SetDataAt(int index, Object* value) {
JSObject::ElementsKind JSObject::GetElementsKind() {
- if (map()->has_fast_elements()) {
- ASSERT(elements()->map() == Heap::fixed_array_map() ||
- elements()->map() == Heap::fixed_cow_array_map());
- return FAST_ELEMENTS;
- }
- HeapObject* array = elements();
- if (array->IsFixedArray()) {
- // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a
- // FixedArray, but FAST_ELEMENTS is already handled above.
- ASSERT(array->IsDictionary());
- return DICTIONARY_ELEMENTS;
- }
- if (array->IsExternalArray()) {
- switch (array->map()->instance_type()) {
- case EXTERNAL_BYTE_ARRAY_TYPE:
- return EXTERNAL_BYTE_ELEMENTS;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- return EXTERNAL_SHORT_ELEMENTS;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
- case EXTERNAL_INT_ARRAY_TYPE:
- return EXTERNAL_INT_ELEMENTS;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_INT_ELEMENTS;
- default:
- ASSERT(array->map()->instance_type() == EXTERNAL_FLOAT_ARRAY_TYPE);
- return EXTERNAL_FLOAT_ELEMENTS;
- }
- }
- ASSERT(array->IsPixelArray());
- return PIXEL_ELEMENTS;
+ ElementsKind kind = map()->elements_kind();
+ ASSERT((kind == FAST_ELEMENTS &&
+ (elements()->map() == GetHeap()->fixed_array_map() ||
+ elements()->map() == GetHeap()->fixed_cow_array_map())) ||
+ (kind == FAST_DOUBLE_ELEMENTS &&
+ elements()->IsFixedDoubleArray()) ||
+ (kind == DICTIONARY_ELEMENTS &&
+ elements()->IsFixedArray() &&
+ elements()->IsDictionary()) ||
+ (kind > DICTIONARY_ELEMENTS));
+ return kind;
}
@@ -3499,60 +3912,46 @@ bool JSObject::HasFastElements() {
}
-bool JSObject::HasDictionaryElements() {
- return GetElementsKind() == DICTIONARY_ELEMENTS;
+bool JSObject::HasFastDoubleElements() {
+ return GetElementsKind() == FAST_DOUBLE_ELEMENTS;
}
-bool JSObject::HasPixelElements() {
- return GetElementsKind() == PIXEL_ELEMENTS;
+bool JSObject::HasDictionaryElements() {
+ return GetElementsKind() == DICTIONARY_ELEMENTS;
}
bool JSObject::HasExternalArrayElements() {
- return (HasExternalByteElements() ||
- HasExternalUnsignedByteElements() ||
- HasExternalShortElements() ||
- HasExternalUnsignedShortElements() ||
- HasExternalIntElements() ||
- HasExternalUnsignedIntElements() ||
- HasExternalFloatElements());
-}
-
-
-bool JSObject::HasExternalByteElements() {
- return GetElementsKind() == EXTERNAL_BYTE_ELEMENTS;
-}
-
-
-bool JSObject::HasExternalUnsignedByteElements() {
- return GetElementsKind() == EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
-}
-
-
-bool JSObject::HasExternalShortElements() {
- return GetElementsKind() == EXTERNAL_SHORT_ELEMENTS;
-}
-
-
-bool JSObject::HasExternalUnsignedShortElements() {
- return GetElementsKind() == EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
-}
-
-
-bool JSObject::HasExternalIntElements() {
- return GetElementsKind() == EXTERNAL_INT_ELEMENTS;
+ HeapObject* array = elements();
+ ASSERT(array != NULL);
+ return array->IsExternalArray();
}
-bool JSObject::HasExternalUnsignedIntElements() {
- return GetElementsKind() == EXTERNAL_UNSIGNED_INT_ELEMENTS;
+#define EXTERNAL_ELEMENTS_CHECK(name, type) \
+bool JSObject::HasExternal##name##Elements() { \
+ HeapObject* array = elements(); \
+ ASSERT(array != NULL); \
+ if (!array->IsHeapObject()) \
+ return false; \
+ return array->map()->instance_type() == type; \
}
-bool JSObject::HasExternalFloatElements() {
- return GetElementsKind() == EXTERNAL_FLOAT_ELEMENTS;
-}
+EXTERNAL_ELEMENTS_CHECK(Byte, EXTERNAL_BYTE_ARRAY_TYPE)
+EXTERNAL_ELEMENTS_CHECK(UnsignedByte, EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)
+EXTERNAL_ELEMENTS_CHECK(Short, EXTERNAL_SHORT_ARRAY_TYPE)
+EXTERNAL_ELEMENTS_CHECK(UnsignedShort,
+ EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)
+EXTERNAL_ELEMENTS_CHECK(Int, EXTERNAL_INT_ARRAY_TYPE)
+EXTERNAL_ELEMENTS_CHECK(UnsignedInt,
+ EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
+EXTERNAL_ELEMENTS_CHECK(Float,
+ EXTERNAL_FLOAT_ARRAY_TYPE)
+EXTERNAL_ELEMENTS_CHECK(Double,
+ EXTERNAL_DOUBLE_ARRAY_TYPE)
+EXTERNAL_ELEMENTS_CHECK(Pixel, EXTERNAL_PIXEL_ARRAY_TYPE)
bool JSObject::HasNamedInterceptor() {
@@ -3567,7 +3966,7 @@ bool JSObject::HasIndexedInterceptor() {
bool JSObject::AllowsSetElementsLength() {
bool result = elements()->IsFixedArray();
- ASSERT(result == (!HasPixelElements() && !HasExternalArrayElements()));
+ ASSERT(result == !HasExternalArrayElements());
return result;
}
@@ -3575,16 +3974,17 @@ bool JSObject::AllowsSetElementsLength() {
MaybeObject* JSObject::EnsureWritableFastElements() {
ASSERT(HasFastElements());
FixedArray* elems = FixedArray::cast(elements());
- if (elems->map() != Heap::fixed_cow_array_map()) return elems;
+ Isolate* isolate = GetIsolate();
+ if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
Object* writable_elems;
- { MaybeObject* maybe_writable_elems =
- Heap::CopyFixedArrayWithMap(elems, Heap::fixed_array_map());
+ { MaybeObject* maybe_writable_elems = isolate->heap()->CopyFixedArrayWithMap(
+ elems, isolate->heap()->fixed_array_map());
if (!maybe_writable_elems->ToObject(&writable_elems)) {
return maybe_writable_elems;
}
}
set_elements(FixedArray::cast(writable_elems));
- Counters::cow_arrays_converted.Increment();
+ isolate->counters()->cow_arrays_converted()->Increment();
return writable_elems;
}
@@ -3685,6 +4085,22 @@ uint32_t StringHasher::GetHash() {
}
+template <typename schar>
+uint32_t HashSequentialString(const schar* chars, int length) {
+ StringHasher hasher(length);
+ if (!hasher.has_trivial_hash()) {
+ int i;
+ for (i = 0; hasher.is_array_index() && (i < length); i++) {
+ hasher.AddCharacter(chars[i]);
+ }
+ for (; i < length; i++) {
+ hasher.AddCharacterNoIndex(chars[i]);
+ }
+ }
+ return hasher.GetHashField();
+}
+
+
bool String::AsArrayIndex(uint32_t* index) {
uint32_t field = hash_field();
if (IsHashFieldComputed(field) && (field & kIsNotArrayIndexMask)) {
@@ -3694,12 +4110,12 @@ bool String::AsArrayIndex(uint32_t* index) {
}
-Object* JSObject::GetPrototype() {
- return JSObject::cast(this)->map()->prototype();
+Object* JSReceiver::GetPrototype() {
+ return HeapObject::cast(this)->map()->prototype();
}
-PropertyAttributes JSObject::GetPropertyAttribute(String* key) {
+PropertyAttributes JSReceiver::GetPropertyAttribute(String* key) {
return GetPropertyAttributeWithReceiver(this, key);
}
@@ -3708,7 +4124,7 @@ PropertyAttributes JSObject::GetPropertyAttribute(String* key) {
Object* JSObject::BypassGlobalProxy() {
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return Heap::undefined_value();
+ if (proto->IsNull()) return GetHeap()->undefined_value();
ASSERT(proto->IsJSGlobalObject());
return proto;
}
@@ -3719,7 +4135,7 @@ Object* JSObject::BypassGlobalProxy() {
bool JSObject::HasHiddenPropertiesObject() {
ASSERT(!IsJSGlobalProxy());
return GetPropertyAttributePostInterceptor(this,
- Heap::hidden_symbol(),
+ GetHeap()->hidden_symbol(),
false) != ABSENT;
}
@@ -3732,7 +4148,7 @@ Object* JSObject::GetHiddenPropertiesObject() {
// object.
Object* result =
GetLocalPropertyPostInterceptor(this,
- Heap::hidden_symbol(),
+ GetHeap()->hidden_symbol(),
&attributes)->ToObjectUnchecked();
return result;
}
@@ -3740,7 +4156,7 @@ Object* JSObject::GetHiddenPropertiesObject() {
MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
ASSERT(!IsJSGlobalProxy());
- return SetPropertyPostInterceptor(Heap::hidden_symbol(),
+ return SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
hidden_obj,
DONT_ENUM,
kNonStrictMode);
@@ -3793,6 +4209,15 @@ void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
set_flag(Smi::FromInt(rest_value | AttributesField::encode(attributes)));
}
+
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::SetEntry(int entry,
+ Object* key,
+ Object* value) {
+ SetEntry(entry, key, value, PropertyDetails(Smi::FromInt(0)));
+}
+
+
template<typename Shape, typename Key>
void Dictionary<Shape, Key>::SetEntry(int entry,
Object* key,
@@ -3808,12 +4233,57 @@ void Dictionary<Shape, Key>::SetEntry(int entry,
}
-void Map::ClearCodeCache() {
+bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
+ ASSERT(other->IsNumber());
+ return key == static_cast<uint32_t>(other->Number());
+}
+
+
+uint32_t NumberDictionaryShape::Hash(uint32_t key) {
+ return ComputeIntegerHash(key);
+}
+
+
+uint32_t NumberDictionaryShape::HashForObject(uint32_t key, Object* other) {
+ ASSERT(other->IsNumber());
+ return ComputeIntegerHash(static_cast<uint32_t>(other->Number()));
+}
+
+
+MaybeObject* NumberDictionaryShape::AsObject(uint32_t key) {
+ return Isolate::Current()->heap()->NumberFromUint32(key);
+}
+
+
+bool StringDictionaryShape::IsMatch(String* key, Object* other) {
+ // We know that all entries in a hash table had their hash keys created.
+ // Use that knowledge to have fast failure.
+ if (key->Hash() != String::cast(other)->Hash()) return false;
+ return key->Equals(String::cast(other));
+}
+
+
+uint32_t StringDictionaryShape::Hash(String* key) {
+ return key->Hash();
+}
+
+
+uint32_t StringDictionaryShape::HashForObject(String* key, Object* other) {
+ return String::cast(other)->Hash();
+}
+
+
+MaybeObject* StringDictionaryShape::AsObject(String* key) {
+ return key;
+}
+
+
+void Map::ClearCodeCache(Heap* heap) {
// No write barrier is needed since empty_fixed_array is not in new space.
// Please note this function is used during marking:
// - MarkCompactCollector::MarkUnmarkedObject
- ASSERT(!Heap::InNewSpace(Heap::raw_unchecked_empty_fixed_array()));
- WRITE_FIELD(this, kCodeCacheOffset, Heap::raw_unchecked_empty_fixed_array());
+ ASSERT(!heap->InNewSpace(heap->raw_unchecked_empty_fixed_array()));
+ WRITE_FIELD(this, kCodeCacheOffset, heap->raw_unchecked_empty_fixed_array());
}
@@ -3826,7 +4296,7 @@ void JSArray::EnsureSize(int required_size) {
// constantly growing.
Expand(required_size + (required_size >> 3));
// It's a performance benefit to keep a frequently used array in new-space.
- } else if (!Heap::new_space()->Contains(elts) &&
+ } else if (!GetHeap()->new_space()->Contains(elts) &&
required_size < kArraySizeThatFitsComfortablyInNewSpace) {
// Expand will allocate a new backing store in new space even if the size
// we asked for isn't larger than what we had before.
@@ -3848,7 +4318,22 @@ void JSArray::SetContent(FixedArray* storage) {
MaybeObject* FixedArray::Copy() {
if (length() == 0) return this;
- return Heap::CopyFixedArray(this);
+ return GetHeap()->CopyFixedArray(this);
+}
+
+
+Relocatable::Relocatable(Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ isolate_ = isolate;
+ prev_ = isolate->relocatable_top();
+ isolate->set_relocatable_top(this);
+}
+
+
+Relocatable::~Relocatable() {
+ ASSERT(isolate_ == Isolate::Current());
+ ASSERT_EQ(isolate_->relocatable_top(), this);
+ isolate_->set_relocatable_top(prev_);
}
@@ -3857,16 +4342,16 @@ int JSObject::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
}
-void Proxy::ProxyIterateBody(ObjectVisitor* v) {
+void Foreign::ForeignIterateBody(ObjectVisitor* v) {
v->VisitExternalReference(
- reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
+ reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
}
template<typename StaticVisitor>
-void Proxy::ProxyIterateBody() {
+void Foreign::ForeignIterateBody() {
StaticVisitor::VisitExternalReference(
- reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
+ reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 237358dcc..158789e0d 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -88,8 +88,8 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayPrint(out);
break;
- case PIXEL_ARRAY_TYPE:
- PixelArray::cast(this)->PixelArrayPrint(out);
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
+ ExternalPixelArray::cast(this)->ExternalPixelArrayPrint(out);
break;
case EXTERNAL_BYTE_ARRAY_TYPE:
ExternalByteArray::cast(this)->ExternalByteArrayPrint(out);
@@ -114,6 +114,9 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case EXTERNAL_FLOAT_ARRAY_TYPE:
ExternalFloatArray::cast(this)->ExternalFloatArrayPrint(out);
break;
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
+ ExternalDoubleArray::cast(this)->ExternalDoubleArrayPrint(out);
+ break;
case FILLER_TYPE:
PrintF(out, "filler");
break;
@@ -145,8 +148,11 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case CODE_TYPE:
Code::cast(this)->CodePrint(out);
break;
- case PROXY_TYPE:
- Proxy::cast(this)->ProxyPrint(out);
+ case JS_PROXY_TYPE:
+ JSProxy::cast(this)->JSProxyPrint(out);
+ break;
+ case FOREIGN_TYPE:
+ Foreign::cast(this)->ForeignPrint(out);
break;
case SHARED_FUNCTION_INFO_TYPE:
SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(out);
@@ -177,8 +183,8 @@ void ByteArray::ByteArrayPrint(FILE* out) {
}
-void PixelArray::PixelArrayPrint(FILE* out) {
- PrintF(out, "pixel array");
+void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) {
+ PrintF(out, "external pixel array");
}
@@ -217,6 +223,11 @@ void ExternalFloatArray::ExternalFloatArrayPrint(FILE* out) {
}
+void ExternalDoubleArray::ExternalDoubleArrayPrint(FILE* out) {
+ PrintF(out, "external double array");
+}
+
+
void JSObject::PrintProperties(FILE* out) {
if (HasFastProperties()) {
DescriptorArray* descs = map()->instance_descriptors();
@@ -271,8 +282,8 @@ void JSObject::PrintElements(FILE* out) {
}
break;
}
- case PIXEL_ELEMENTS: {
- PixelArray* p = PixelArray::cast(elements());
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ ExternalPixelArray* p = ExternalPixelArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
PrintF(out, " %d: %d\n", i, p->get(i));
}
@@ -330,9 +341,25 @@ void JSObject::PrintElements(FILE* out) {
}
break;
}
+ case EXTERNAL_DOUBLE_ELEMENTS: {
+ ExternalDoubleArray* p = ExternalDoubleArray::cast(elements());
+ for (int i = 0; i < p->length(); i++) {
+ PrintF(out, " %d: %f\n", i, p->get(i));
+ }
+ break;
+ }
case DICTIONARY_ELEMENTS:
elements()->Print(out);
break;
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* p = FixedArray::cast(elements());
+ for (int i = 2; i < p->length(); i++) {
+ PrintF(out, " %d: ", i);
+ p->get(i)->ShortPrint(out);
+ PrintF(out, "\n");
+ }
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -372,7 +399,7 @@ static const char* TypeToString(InstanceType type) {
case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
- case PIXEL_ARRAY_TYPE: return "PIXEL_ARRAY";
+ case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY";
case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
return "EXTERNAL_UNSIGNED_BYTE_ARRAY";
@@ -383,6 +410,7 @@ static const char* TypeToString(InstanceType type) {
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
return "EXTERNAL_UNSIGNED_INT_ARRAY";
case EXTERNAL_FLOAT_ARRAY_TYPE: return "EXTERNAL_FLOAT_ARRAY";
+ case EXTERNAL_DOUBLE_ARRAY_TYPE: return "EXTERNAL_DOUBLE_ARRAY";
case FILLER_TYPE: return "FILLER";
case JS_OBJECT_TYPE: return "JS_OBJECT";
case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
@@ -392,19 +420,19 @@ static const char* TypeToString(InstanceType type) {
case JS_FUNCTION_TYPE: return "JS_FUNCTION";
case CODE_TYPE: return "CODE";
case JS_ARRAY_TYPE: return "JS_ARRAY";
+ case JS_PROXY_TYPE: return "JS_PROXY";
case JS_REGEXP_TYPE: return "JS_REGEXP";
case JS_VALUE_TYPE: return "JS_VALUE";
case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
- case PROXY_TYPE: return "PROXY";
- case LAST_STRING_TYPE: return "LAST_STRING_TYPE";
+ case FOREIGN_TYPE: return "FOREIGN";
case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ default: return "UNKNOWN";
}
- return "UNKNOWN";
}
@@ -453,6 +481,13 @@ void CodeCache::CodeCachePrint(FILE* out) {
}
+void PolymorphicCodeCache::PolymorphicCodeCachePrint(FILE* out) {
+ HeapObject::PrintHeader(out, "PolymorphicCodeCache");
+ PrintF(out, "\n - cache: ");
+ cache()->ShortPrint(out);
+}
+
+
void FixedArray::FixedArrayPrint(FILE* out) {
HeapObject::PrintHeader(out, "FixedArray");
PrintF(out, " - length: %d", length());
@@ -515,6 +550,15 @@ void String::StringPrint(FILE* out) {
}
+void JSProxy::JSProxyPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSProxy");
+ PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - handler = ");
+ handler()->Print(out);
+ PrintF(out, "\n");
+}
+
+
void JSFunction::JSFunctionPrint(FILE* out) {
HeapObject::PrintHeader(out, "Function");
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
@@ -607,8 +651,8 @@ void Code::CodePrint(FILE* out) {
}
-void Proxy::ProxyPrint(FILE* out) {
- PrintF(out, "proxy to %p", proxy());
+void Foreign::ForeignPrint(FILE* out) {
+ PrintF(out, "foreign address : %p", address());
}
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
index 5f054bd32..3b36cbe93 100644
--- a/deps/v8/src/objects-visiting.cc
+++ b/deps/v8/src/objects-visiting.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -73,6 +73,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case FIXED_ARRAY_TYPE:
return kVisitFixedArray;
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ return kVisitFixedDoubleArray;
+
case ODDBALL_TYPE:
return kVisitOddball;
@@ -88,10 +91,15 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case SHARED_FUNCTION_INFO_TYPE:
return kVisitSharedFunctionInfo;
- case PROXY_TYPE:
+ case JS_PROXY_TYPE:
+ return GetVisitorIdForSize(kVisitStruct,
+ kVisitStructGeneric,
+ JSProxy::kSize);
+
+ case FOREIGN_TYPE:
return GetVisitorIdForSize(kVisitDataObject,
kVisitDataObjectGeneric,
- Proxy::kSize);
+ Foreign::kSize);
case FILLER_TYPE:
return kVisitDataObjectGeneric;
@@ -113,7 +121,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
return kVisitJSFunction;
case HEAP_NUMBER_TYPE:
- case PIXEL_ARRAY_TYPE:
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
case EXTERNAL_BYTE_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
case EXTERNAL_SHORT_ARRAY_TYPE:
@@ -121,6 +129,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case EXTERNAL_INT_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
case EXTERNAL_FLOAT_ARRAY_TYPE:
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
return GetVisitorIdForSize(kVisitDataObject,
kVisitDataObjectGeneric,
instance_size);
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index ea6d7954e..f2b85869f 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,8 @@
#ifndef V8_OBJECTS_VISITING_H_
#define V8_OBJECTS_VISITING_H_
+#include "allocation.h"
+
// This file provides base classes and auxiliary methods for defining
// static object visitors used during GC.
// Visiting HeapObject body with a normal ObjectVisitor requires performing
@@ -50,6 +52,7 @@ class StaticVisitorBase : public AllStatic {
kVisitShortcutCandidate,
kVisitByteArray,
kVisitFixedArray,
+ kVisitFixedDoubleArray,
kVisitGlobalContext,
// For data objects, JS objects and structs along with generic visitor which
@@ -141,13 +144,22 @@ class StaticVisitorBase : public AllStatic {
template<typename Callback>
class VisitorDispatchTable {
public:
+ void CopyFrom(VisitorDispatchTable* other) {
+ // We are not using memcpy to guarantee that during update
+ // every element of callbacks_ array will remain correct
+ // pointer (memcpy might be implemented as a byte copying loop).
+ for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) {
+ NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
+ }
+ }
+
inline Callback GetVisitor(Map* map) {
- return callbacks_[map->visitor_id()];
+ return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
}
void Register(StaticVisitorBase::VisitorId id, Callback callback) {
ASSERT(id < StaticVisitorBase::kVisitorIdCount); // id is unsigned.
- callbacks_[id] = callback;
+ callbacks_[id] = reinterpret_cast<AtomicWord>(callback);
}
template<typename Visitor,
@@ -179,21 +191,22 @@ class VisitorDispatchTable {
}
private:
- Callback callbacks_[StaticVisitorBase::kVisitorIdCount];
+ AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
};
template<typename StaticVisitor>
class BodyVisitorBase : public AllStatic {
public:
- INLINE(static void IteratePointers(HeapObject* object,
+ INLINE(static void IteratePointers(Heap* heap,
+ HeapObject* object,
int start_offset,
int end_offset)) {
Object** start_slot = reinterpret_cast<Object**>(object->address() +
start_offset);
Object** end_slot = reinterpret_cast<Object**>(object->address() +
end_offset);
- StaticVisitor::VisitPointers(start_slot, end_slot);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
}
};
@@ -204,7 +217,10 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
static inline ReturnType Visit(Map* map, HeapObject* object) {
int object_size = BodyDescriptor::SizeOf(map, object);
BodyVisitorBase<StaticVisitor>::IteratePointers(
- object, BodyDescriptor::kStartOffset, object_size);
+ map->heap(),
+ object,
+ BodyDescriptor::kStartOffset,
+ object_size);
return static_cast<ReturnType>(object_size);
}
@@ -212,7 +228,10 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
BodyVisitorBase<StaticVisitor>::IteratePointers(
- object, BodyDescriptor::kStartOffset, object_size);
+ map->heap(),
+ object,
+ BodyDescriptor::kStartOffset,
+ object_size);
return static_cast<ReturnType>(object_size);
}
};
@@ -223,7 +242,10 @@ class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
public:
static inline ReturnType Visit(Map* map, HeapObject* object) {
BodyVisitorBase<StaticVisitor>::IteratePointers(
- object, BodyDescriptor::kStartOffset, BodyDescriptor::kEndOffset);
+ map->heap(),
+ object,
+ BodyDescriptor::kStartOffset,
+ BodyDescriptor::kEndOffset);
return static_cast<ReturnType>(BodyDescriptor::kSize);
}
};
@@ -264,6 +286,8 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
FixedArray::BodyDescriptor,
int>::Visit);
+ table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+
table_.Register(kVisitGlobalContext,
&FixedBodyVisitor<StaticVisitor,
Context::ScavengeBodyDescriptor,
@@ -299,8 +323,8 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return table_.GetVisitor(map)(map, obj);
}
- static inline void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(p);
+ static inline void VisitPointers(Heap* heap, Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
}
private:
@@ -308,6 +332,11 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
}
+ static inline int VisitFixedDoubleArray(Map* map, HeapObject* object) {
+ int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
+ return FixedDoubleArray::SizeFor(length);
+ }
+
static inline int VisitSeqAsciiString(Map* map, HeapObject* object) {
return SeqAsciiString::cast(object)->
SeqAsciiStringSize(map->instance_type());
@@ -372,7 +401,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
template<typename StaticVisitor>
-void Code::CodeIterateBody() {
+void Code::CodeIterateBody(Heap* heap) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
@@ -386,12 +415,14 @@ void Code::CodeIterateBody() {
RelocIterator it(this, mode_mask);
StaticVisitor::VisitPointer(
+ heap,
reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
StaticVisitor::VisitPointer(
+ heap,
reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
for (; !it.done(); it.next()) {
- it.rinfo()->template Visit<StaticVisitor>();
+ it.rinfo()->template Visit<StaticVisitor>(heap);
}
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 0b7f60a90..92a2ed494 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -41,7 +41,6 @@
#include "macro-assembler.h"
#include "safepoint-table.h"
#include "scanner-base.h"
-#include "scopeinfo.h"
#include "string-stream.h"
#include "utils.h"
#include "vm-state-inl.h"
@@ -51,7 +50,6 @@
#include "disassembler.h"
#endif
-
namespace v8 {
namespace internal {
@@ -60,11 +58,16 @@ namespace internal {
const int kGetterIndex = 0;
const int kSetterIndex = 1;
+uint64_t FixedDoubleArray::kHoleNanInt64 = -1;
+uint64_t FixedDoubleArray::kCanonicalNonHoleNanLower32 = 0x7FF00000;
+uint64_t FixedDoubleArray::kCanonicalNonHoleNanInt64 =
+ kCanonicalNonHoleNanLower32 << 32;
MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
Object* value) {
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateJSObject(constructor);
+ { MaybeObject* maybe_result =
+ constructor->GetHeap()->AllocateJSObject(constructor);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSValue::cast(result)->set_value(value);
@@ -86,14 +89,19 @@ MaybeObject* Object::ToObject(Context* global_context) {
MaybeObject* Object::ToObject() {
- Context* global_context = Top::context()->global_context();
if (IsJSObject()) {
return this;
} else if (IsNumber()) {
+ Isolate* isolate = Isolate::Current();
+ Context* global_context = isolate->context()->global_context();
return CreateJSValue(global_context->number_function(), this);
} else if (IsBoolean()) {
+ Isolate* isolate = HeapObject::cast(this)->GetIsolate();
+ Context* global_context = isolate->context()->global_context();
return CreateJSValue(global_context->boolean_function(), this);
} else if (IsString()) {
+ Isolate* isolate = HeapObject::cast(this)->GetIsolate();
+ Context* global_context = isolate->context()->global_context();
return CreateJSValue(global_context->string_function(), this);
}
@@ -103,36 +111,50 @@ MaybeObject* Object::ToObject() {
Object* Object::ToBoolean() {
- if (IsTrue()) return Heap::true_value();
- if (IsFalse()) return Heap::false_value();
+ if (IsTrue()) return this;
+ if (IsFalse()) return this;
if (IsSmi()) {
- return Heap::ToBoolean(Smi::cast(this)->value() != 0);
+ return Isolate::Current()->heap()->ToBoolean(Smi::cast(this)->value() != 0);
+ }
+ HeapObject* heap_object = HeapObject::cast(this);
+ if (heap_object->IsUndefined() || heap_object->IsNull()) {
+ return heap_object->GetHeap()->false_value();
}
- if (IsUndefined() || IsNull()) return Heap::false_value();
// Undetectable object is false
- if (IsUndetectableObject()) {
- return Heap::false_value();
+ if (heap_object->IsUndetectableObject()) {
+ return heap_object->GetHeap()->false_value();
}
- if (IsString()) {
- return Heap::ToBoolean(String::cast(this)->length() != 0);
+ if (heap_object->IsString()) {
+ return heap_object->GetHeap()->ToBoolean(
+ String::cast(this)->length() != 0);
}
- if (IsHeapNumber()) {
+ if (heap_object->IsHeapNumber()) {
return HeapNumber::cast(this)->HeapNumberToBoolean();
}
- return Heap::true_value();
+ return heap_object->GetHeap()->true_value();
}
void Object::Lookup(String* name, LookupResult* result) {
- if (IsJSObject()) return JSObject::cast(this)->Lookup(name, result);
Object* holder = NULL;
- Context* global_context = Top::context()->global_context();
- if (IsString()) {
- holder = global_context->string_function()->instance_prototype();
- } else if (IsNumber()) {
+ if (IsSmi()) {
+ Context* global_context = Isolate::Current()->context()->global_context();
holder = global_context->number_function()->instance_prototype();
- } else if (IsBoolean()) {
- holder = global_context->boolean_function()->instance_prototype();
+ } else {
+ HeapObject* heap_object = HeapObject::cast(this);
+ if (heap_object->IsJSObject()) {
+ return JSObject::cast(this)->Lookup(name, result);
+ } else if (heap_object->IsJSProxy()) {
+ return result->HandlerResult();
+ }
+ Context* global_context = Isolate::Current()->context()->global_context();
+ if (heap_object->IsString()) {
+ holder = global_context->string_function()->instance_prototype();
+ } else if (heap_object->IsHeapNumber()) {
+ holder = global_context->number_function()->instance_prototype();
+ } else if (heap_object->IsBoolean()) {
+ holder = global_context->boolean_function()->instance_prototype();
+ }
}
ASSERT(holder != NULL); // Cannot handle null or undefined.
JSObject::cast(holder)->Lookup(name, result);
@@ -154,14 +176,16 @@ MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
Object* structure,
String* name,
Object* holder) {
+ Isolate* isolate = name->GetIsolate();
// To accommodate both the old and the new api we switch on the
- // data structure used to store the callbacks. Eventually proxy
+ // data structure used to store the callbacks. Eventually foreign
// callbacks should be phased out.
- if (structure->IsProxy()) {
+ if (structure->IsForeign()) {
AccessorDescriptor* callback =
- reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
+ reinterpret_cast<AccessorDescriptor*>(
+ Foreign::cast(structure)->address());
MaybeObject* value = (callback->getter)(receiver, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return value;
}
@@ -174,17 +198,19 @@ MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
JSObject* self = JSObject::cast(receiver);
JSObject* holder_handle = JSObject::cast(holder);
Handle<String> key(name);
- LOG(ApiNamedPropertyAccess("load", self, name));
- CustomArguments args(data->data(), self, holder_handle);
+ LOG(isolate, ApiNamedPropertyAccess("load", self, name));
+ CustomArguments args(isolate, data->data(), self, holder_handle);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = call_fun(v8::Utils::ToLocal(key), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
- if (result.IsEmpty()) return Heap::undefined_value();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ if (result.IsEmpty()) {
+ return isolate->heap()->undefined_value();
+ }
return *v8::Utils::OpenHandle(*result);
}
@@ -196,7 +222,7 @@ MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
JSFunction::cast(getter));
}
// Getter is not a function.
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
UNREACHABLE();
@@ -204,15 +230,44 @@ MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
}
+MaybeObject* Object::GetPropertyWithHandler(Object* receiver_raw,
+ String* name_raw,
+ Object* handler_raw) {
+ Isolate* isolate = name_raw->GetIsolate();
+ HandleScope scope;
+ Handle<Object> receiver(receiver_raw);
+ Handle<Object> name(name_raw);
+ Handle<Object> handler(handler_raw);
+
+ // Extract trap function.
+ Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("get");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ if (trap->IsUndefined()) {
+ // Get the derived `get' property.
+ trap = isolate->derived_get_trap();
+ }
+
+ // Call trap function.
+ Object** args[] = { receiver.location(), name.location() };
+ bool has_exception;
+ Handle<Object> result =
+ Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+ if (has_exception) return Failure::Exception();
+
+ return *result;
+}
+
+
MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
JSFunction* getter) {
HandleScope scope;
Handle<JSFunction> fun(JSFunction::cast(getter));
Handle<Object> self(receiver);
#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug* debug = fun->GetHeap()->isolate()->debug();
// Handle stepping into a getter if step into is active.
- if (Debug::StepInActive()) {
- Debug::HandleStepIn(fun, Handle<Object>::null(), 0, false);
+ if (debug->StepInActive()) {
+ debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
}
#endif
bool has_pending_exception;
@@ -281,8 +336,9 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
// No accessible property found.
*attributes = ABSENT;
- Top::ReportFailedAccessCheck(this, v8::ACCESS_GET);
- return Heap::undefined_value();
+ Heap* heap = name->GetHeap();
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
+ return heap->undefined_value();
}
@@ -344,7 +400,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
}
}
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ GetHeap()->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
return ABSENT;
}
@@ -382,12 +438,10 @@ MaybeObject* JSObject::SetNormalizedProperty(String* name,
if (entry == StringDictionary::kNotFound) {
Object* store_value = value;
if (IsGlobalObject()) {
- { MaybeObject* maybe_store_value =
- Heap::AllocateJSGlobalPropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) {
- return maybe_store_value;
- }
- }
+ Heap* heap = name->GetHeap();
+ MaybeObject* maybe_store_value =
+ heap->AllocateJSGlobalPropertyCell(value);
+ if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
}
Object* dict;
{ MaybeObject* maybe_dict =
@@ -423,7 +477,7 @@ MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
if (IsGlobalObject()) {
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.IsDontDelete()) {
- if (mode != FORCE_DELETION) return Heap::false_value();
+ if (mode != FORCE_DELETION) return GetHeap()->false_value();
// When forced to delete global properties, we have to make a
// map change to invalidate any ICs that think they can load
// from the DontDelete cell without checking if it contains
@@ -436,13 +490,22 @@ MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
}
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
- cell->set_value(Heap::the_hole_value());
+ cell->set_value(cell->heap()->the_hole_value());
dictionary->DetailsAtPut(entry, details.AsDeleted());
} else {
- return dictionary->DeleteProperty(entry, mode);
+ Object* deleted = dictionary->DeleteProperty(entry, mode);
+ if (deleted == GetHeap()->true_value()) {
+ FixedArray* new_properties = NULL;
+ MaybeObject* maybe_properties = dictionary->Shrink(name);
+ if (!maybe_properties->To(&new_properties)) {
+ return maybe_properties;
+ }
+ set_properties(new_properties);
+ }
+ return deleted;
}
}
- return Heap::true_value();
+ return GetHeap()->true_value();
}
@@ -468,37 +531,42 @@ MaybeObject* Object::GetProperty(Object* receiver,
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
+ Heap* heap = name->GetHeap();
// Traverse the prototype chain from the current object (this) to
- // the holder and check for access rights. This avoid traversing the
+ // the holder and check for access rights. This avoids traversing the
// objects more than once in case of interceptors, because the
// holder will always be the interceptor holder and the search may
// only continue with a current object just after the interceptor
// holder in the prototype chain.
- Object* last = result->IsProperty() ? result->holder() : Heap::null_value();
- for (Object* current = this; true; current = current->GetPrototype()) {
- if (current->IsAccessCheckNeeded()) {
- // Check if we're allowed to read from the current object. Note
- // that even though we may not actually end up loading the named
- // property from the current object, we still check that we have
- // access to it.
- JSObject* checked = JSObject::cast(current);
- if (!Top::MayNamedAccess(checked, name, v8::ACCESS_GET)) {
- return checked->GetPropertyWithFailedAccessCheck(receiver,
- result,
- name,
- attributes);
- }
- }
- // Stop traversing the chain once we reach the last object in the
- // chain; either the holder of the result or null in case of an
- // absent property.
- if (current == last) break;
+ // Proxy handlers do not use the proxy's prototype, so we can skip this.
+ if (!result->IsHandler()) {
+ Object* last = result->IsProperty() ? result->holder() : heap->null_value();
+ ASSERT(this != this->GetPrototype());
+ for (Object* current = this; true; current = current->GetPrototype()) {
+ if (current->IsAccessCheckNeeded()) {
+ // Check if we're allowed to read from the current object. Note
+ // that even though we may not actually end up loading the named
+ // property from the current object, we still check that we have
+ // access to it.
+ JSObject* checked = JSObject::cast(current);
+ if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
+ return checked->GetPropertyWithFailedAccessCheck(receiver,
+ result,
+ name,
+ attributes);
+ }
+ }
+ // Stop traversing the chain once we reach the last object in the
+ // chain; either the holder of the result or null in case of an
+ // absent property.
+ if (current == last) break;
+ }
}
if (!result->IsProperty()) {
*attributes = ABSENT;
- return Heap::undefined_value();
+ return heap->undefined_value();
}
*attributes = result->GetAttributes();
Object* value;
@@ -507,11 +575,11 @@ MaybeObject* Object::GetProperty(Object* receiver,
case NORMAL:
value = holder->GetNormalizedProperty(result);
ASSERT(!value->IsTheHole() || result->IsReadOnly());
- return value->IsTheHole() ? Heap::undefined_value() : value;
+ return value->IsTheHole() ? heap->undefined_value() : value;
case FIELD:
value = holder->FastPropertyAt(result->GetFieldIndex());
ASSERT(!value->IsTheHole() || result->IsReadOnly());
- return value->IsTheHole() ? Heap::undefined_value() : value;
+ return value->IsTheHole() ? heap->undefined_value() : value;
case CONSTANT_FUNCTION:
return result->GetConstantFunction();
case CALLBACKS:
@@ -519,34 +587,53 @@ MaybeObject* Object::GetProperty(Object* receiver,
result->GetCallbackObject(),
name,
holder);
+ case HANDLER: {
+ JSProxy* proxy = JSProxy::cast(this);
+ return GetPropertyWithHandler(receiver, name, proxy->handler());
+ }
case INTERCEPTOR: {
JSObject* recvr = JSObject::cast(receiver);
return holder->GetPropertyWithInterceptor(recvr, name, attributes);
}
- default:
- UNREACHABLE();
- return NULL;
+ case MAP_TRANSITION:
+ case EXTERNAL_ARRAY_TRANSITION:
+ case CONSTANT_TRANSITION:
+ case NULL_DESCRIPTOR:
+ break;
}
+ UNREACHABLE();
+ return NULL;
}
MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
- if (IsJSObject()) {
- return JSObject::cast(this)->GetElementWithReceiver(receiver, index);
- }
-
Object* holder = NULL;
- Context* global_context = Top::context()->global_context();
- if (IsString()) {
- holder = global_context->string_function()->instance_prototype();
- } else if (IsNumber()) {
+ if (IsSmi()) {
+ Context* global_context = Isolate::Current()->context()->global_context();
holder = global_context->number_function()->instance_prototype();
- } else if (IsBoolean()) {
- holder = global_context->boolean_function()->instance_prototype();
} else {
- // Undefined and null have no indexed properties.
- ASSERT(IsUndefined() || IsNull());
- return Heap::undefined_value();
+ HeapObject* heap_object = HeapObject::cast(this);
+
+ if (heap_object->IsJSObject()) {
+ return JSObject::cast(this)->GetElementWithReceiver(receiver, index);
+ }
+ Heap* heap = heap_object->GetHeap();
+ Isolate* isolate = heap->isolate();
+
+ Context* global_context = isolate->context()->global_context();
+ if (heap_object->IsString()) {
+ holder = global_context->string_function()->instance_prototype();
+ } else if (heap_object->IsHeapNumber()) {
+ holder = global_context->number_function()->instance_prototype();
+ } else if (heap_object->IsBoolean()) {
+ holder = global_context->boolean_function()->instance_prototype();
+ } else if (heap_object->IsJSProxy()) {
+ return heap->undefined_value(); // For now...
+ } else {
+ // Undefined and null have no indexed properties.
+ ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
+ return heap->undefined_value();
+ }
}
return JSObject::cast(holder)->GetElementWithReceiver(receiver, index);
@@ -554,16 +641,32 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
Object* Object::GetPrototype() {
- // The object is either a number, a string, a boolean, or a real JS object.
- if (IsJSObject()) return JSObject::cast(this)->map()->prototype();
- Context* context = Top::context()->global_context();
+ if (IsSmi()) {
+ Heap* heap = Isolate::Current()->heap();
+ Context* context = heap->isolate()->context()->global_context();
+ return context->number_function()->instance_prototype();
+ }
+
+ HeapObject* heap_object = HeapObject::cast(this);
- if (IsNumber()) return context->number_function()->instance_prototype();
- if (IsString()) return context->string_function()->instance_prototype();
- if (IsBoolean()) {
+ // The object is either a number, a string, a boolean,
+ // a real JS object, or a Harmony proxy.
+ if (heap_object->IsJSReceiver()) {
+ return heap_object->map()->prototype();
+ }
+ Heap* heap = heap_object->GetHeap();
+ Context* context = heap->isolate()->context()->global_context();
+
+ if (heap_object->IsHeapNumber()) {
+ return context->number_function()->instance_prototype();
+ }
+ if (heap_object->IsString()) {
+ return context->string_function()->instance_prototype();
+ }
+ if (heap_object->IsBoolean()) {
return context->boolean_function()->instance_prototype();
} else {
- return Heap::null_value();
+ return heap->null_value();
}
}
@@ -637,9 +740,10 @@ MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
// allowed. This is to avoid an assertion failure when allocating.
// Flattening strings is the only case where we always allow
// allocation because no GC is performed if the allocation fails.
- if (!Heap::IsAllocationAllowed()) return this;
+ if (!HEAP->IsAllocationAllowed()) return this;
#endif
+ Heap* heap = GetHeap();
switch (StringShape(this).representation_tag()) {
case kConsStringTag: {
ConsString* cs = ConsString::cast(this);
@@ -649,12 +753,12 @@ MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
// There's little point in putting the flat string in new space if the
// cons string is in old space. It can never get GCed until there is
// an old space GC.
- PretenureFlag tenure = Heap::InNewSpace(this) ? pretenure : TENURED;
+ PretenureFlag tenure = heap->InNewSpace(this) ? pretenure : TENURED;
int len = length();
Object* object;
String* result;
if (IsAsciiRepresentation()) {
- { MaybeObject* maybe_object = Heap::AllocateRawAsciiString(len, tenure);
+ { MaybeObject* maybe_object = heap->AllocateRawAsciiString(len, tenure);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
result = String::cast(object);
@@ -669,7 +773,7 @@ MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
len - first_length);
} else {
{ MaybeObject* maybe_object =
- Heap::AllocateRawTwoByteString(len, tenure);
+ heap->AllocateRawTwoByteString(len, tenure);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
result = String::cast(object);
@@ -684,7 +788,7 @@ MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
len - first_length);
}
cs->set_first(result);
- cs->set_second(Heap::empty_string());
+ cs->set_second(heap->empty_string());
return result;
}
default:
@@ -708,7 +812,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
resource->length() * sizeof(smart_chars[0])) == 0);
}
#endif // DEBUG
-
+ Heap* heap = GetHeap();
int size = this->Size(); // Byte size of the original string.
if (size < ExternalString::kSize) {
// The string is too small to fit an external String in its place. This can
@@ -724,8 +828,8 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
this->set_map(is_ascii ?
- Heap::external_string_with_ascii_data_map() :
- Heap::external_string_map());
+ heap->external_string_with_ascii_data_map() :
+ heap->external_string_map());
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_length(length);
self->set_hash_field(hash_field);
@@ -736,13 +840,13 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
self->Hash(); // Force regeneration of the hash value.
// Now morph this external string into a external symbol.
this->set_map(is_ascii ?
- Heap::external_symbol_with_ascii_data_map() :
- Heap::external_symbol_map());
+ heap->external_symbol_with_ascii_data_map() :
+ heap->external_symbol_map());
}
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
- Heap::CreateFillerObjectAt(this->address() + new_size, size - new_size);
+ heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
return true;
}
@@ -759,7 +863,7 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
resource->length() * sizeof(smart_chars[0])) == 0);
}
#endif // DEBUG
-
+ Heap* heap = GetHeap();
int size = this->Size(); // Byte size of the original string.
if (size < ExternalString::kSize) {
// The string is too small to fit an external String in its place. This can
@@ -773,7 +877,7 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
- this->set_map(Heap::external_ascii_string_map());
+ this->set_map(heap->external_ascii_string_map());
ExternalAsciiString* self = ExternalAsciiString::cast(this);
self->set_length(length);
self->set_hash_field(hash_field);
@@ -783,12 +887,12 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
if (is_symbol) {
self->Hash(); // Force regeneration of the hash value.
// Now morph this external string into a external symbol.
- this->set_map(Heap::external_ascii_symbol_map());
+ this->set_map(heap->external_ascii_symbol_map());
}
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
- Heap::CreateFillerObjectAt(this->address() + new_size, size - new_size);
+ heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
return true;
}
@@ -887,15 +991,17 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
// All other JSObjects are rather similar to each other (JSObject,
// JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
default: {
- Object* constructor = map()->constructor();
+ Map* map_of_this = map();
+ Heap* heap = map_of_this->heap();
+ Object* constructor = map_of_this->constructor();
bool printed = false;
if (constructor->IsHeapObject() &&
- !Heap::Contains(HeapObject::cast(constructor))) {
+ !heap->Contains(HeapObject::cast(constructor))) {
accumulator->Add("!!!INVALID CONSTRUCTOR!!!");
} else {
bool global_object = IsJSGlobalProxy();
if (constructor->IsJSFunction()) {
- if (!Heap::Contains(JSFunction::cast(constructor)->shared())) {
+ if (!heap->Contains(JSFunction::cast(constructor)->shared())) {
accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
} else {
Object* constructor_name =
@@ -930,12 +1036,13 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
- // if (!Heap::InNewSpace(this)) PrintF("*", this);
- if (!Heap::Contains(this)) {
+ // if (!HEAP->InNewSpace(this)) PrintF("*", this);
+ Heap* heap = GetHeap();
+ if (!heap->Contains(this)) {
accumulator->Add("!!!INVALID POINTER!!!");
return;
}
- if (!Heap::Contains(map())) {
+ if (!heap->Contains(map())) {
accumulator->Add("!!!INVALID MAP!!!");
return;
}
@@ -960,8 +1067,9 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
case BYTE_ARRAY_TYPE:
accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
break;
- case PIXEL_ARRAY_TYPE:
- accumulator->Add("<PixelArray[%u]>", PixelArray::cast(this)->length());
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
+ accumulator->Add("<ExternalPixelArray[%u]>",
+ ExternalPixelArray::cast(this)->length());
break;
case EXTERNAL_BYTE_ARRAY_TYPE:
accumulator->Add("<ExternalByteArray[%u]>",
@@ -991,6 +1099,10 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<ExternalFloatArray[%u]>",
ExternalFloatArray::cast(this)->length());
break;
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
+ accumulator->Add("<ExternalDoubleArray[%u]>",
+ ExternalDoubleArray::cast(this)->length());
+ break;
case SHARED_FUNCTION_INFO_TYPE:
accumulator->Add("<SharedFunctionInfo>");
break;
@@ -1028,8 +1140,8 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
HeapNumber::cast(this)->HeapNumberPrint(accumulator);
accumulator->Put('>');
break;
- case PROXY_TYPE:
- accumulator->Add("<Proxy>");
+ case FOREIGN_TYPE:
+ accumulator->Add("<Foreign>");
break;
case JS_GLOBAL_PROPERTY_CELL_TYPE:
accumulator->Add("Cell for ");
@@ -1079,6 +1191,8 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case FIXED_ARRAY_TYPE:
FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
break;
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ break;
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_VALUE_TYPE:
@@ -1097,8 +1211,11 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case ODDBALL_TYPE:
Oddball::BodyDescriptor::IterateBody(this, v);
break;
- case PROXY_TYPE:
- reinterpret_cast<Proxy*>(this)->ProxyIterateBody(v);
+ case JS_PROXY_TYPE:
+ JSProxy::BodyDescriptor::IterateBody(this, v);
+ break;
+ case FOREIGN_TYPE:
+ reinterpret_cast<Foreign*>(this)->ForeignIterateBody(v);
break;
case MAP_TYPE:
Map::BodyDescriptor::IterateBody(this, v);
@@ -1112,7 +1229,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case HEAP_NUMBER_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
- case PIXEL_ARRAY_TYPE:
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
case EXTERNAL_BYTE_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
case EXTERNAL_SHORT_ARRAY_TYPE:
@@ -1120,6 +1237,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case EXTERNAL_INT_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
case EXTERNAL_FLOAT_ARRAY_TYPE:
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
break;
case SHARED_FUNCTION_INFO_TYPE:
SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
@@ -1149,14 +1267,14 @@ Object* HeapNumber::HeapNumberToBoolean() {
if (u.bits.exp == 2047) {
// Detect NaN for IEEE double precision floating point.
if ((u.bits.man_low | u.bits.man_high) != 0)
- return Heap::false_value();
+ return GetHeap()->false_value();
}
if (u.bits.exp == 0) {
// Detect +0, and -0 for IEEE double precision floating point.
if ((u.bits.man_low | u.bits.man_high) == 0)
- return Heap::false_value();
+ return GetHeap()->false_value();
}
- return Heap::true_value();
+ return GetHeap()->true_value();
}
@@ -1178,20 +1296,20 @@ void HeapNumber::HeapNumberPrint(StringStream* accumulator) {
}
-String* JSObject::class_name() {
- if (IsJSFunction()) {
- return Heap::function_class_symbol();
+String* JSReceiver::class_name() {
+ if (IsJSFunction() && IsJSFunctionProxy()) {
+ return GetHeap()->function_class_symbol();
}
if (map()->constructor()->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(map()->constructor());
return String::cast(constructor->shared()->instance_class_name());
}
// If the constructor is not present, return "Object".
- return Heap::Object_symbol();
+ return GetHeap()->Object_symbol();
}
-String* JSObject::constructor_name() {
+String* JSReceiver::constructor_name() {
if (map()->constructor()->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(map()->constructor());
String* name = String::cast(constructor->shared()->name());
@@ -1201,8 +1319,9 @@ String* JSObject::constructor_name() {
Object* proto = GetPrototype();
if (proto->IsJSObject()) return JSObject::cast(proto)->constructor_name();
}
+ // TODO(rossberg): what about proxies?
// If the constructor is not present, return "Object".
- return Heap::Object_symbol();
+ return GetHeap()->Object_symbol();
}
@@ -1225,6 +1344,22 @@ MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
}
+static bool IsIdentifier(UnicodeCache* cache,
+ unibrow::CharacterStream* buffer) {
+ // Checks whether the buffer contains an identifier (no escape).
+ if (!buffer->has_more()) return false;
+ if (!cache->IsIdentifierStart(buffer->GetNext())) {
+ return false;
+ }
+ while (buffer->has_more()) {
+ if (!cache->IsIdentifierPart(buffer->GetNext())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
MaybeObject* JSObject::AddFastProperty(String* name,
Object* value,
PropertyAttributes attributes) {
@@ -1232,9 +1367,10 @@ MaybeObject* JSObject::AddFastProperty(String* name,
// Normalize the object if the name is an actual string (not the
// hidden symbols) and is not a real identifier.
+ Isolate* isolate = GetHeap()->isolate();
StringInputBuffer buffer(name);
- if (!ScannerConstants::IsIdentifier(&buffer)
- && name != Heap::hidden_symbol()) {
+ if (!IsIdentifier(isolate->unicode_cache(), &buffer)
+ && name != isolate->heap()->hidden_symbol()) {
Object* obj;
{ MaybeObject* maybe_obj =
NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
@@ -1257,11 +1393,21 @@ MaybeObject* JSObject::AddFastProperty(String* name,
}
}
- // Only allow map transition if the object's map is NOT equal to the
- // global object_function's map and there is not a transition for name.
+ // Only allow map transition if the object isn't the global object and there
+ // is not a transition for the name, or there's a transition for the name but
+ // it's unrelated to properties.
+ int descriptor_index = old_descriptors->Search(name);
+
+ // External array transitions are stored in the descriptor for property "",
+ // which is not a identifier and should have forced a switch to slow
+ // properties above.
+ ASSERT(descriptor_index == DescriptorArray::kNotFound ||
+ old_descriptors->GetType(descriptor_index) != EXTERNAL_ARRAY_TRANSITION);
+ bool can_insert_transition = descriptor_index == DescriptorArray::kNotFound ||
+ old_descriptors->GetType(descriptor_index) == EXTERNAL_ARRAY_TRANSITION;
bool allow_map_transition =
- !old_descriptors->Contains(name) &&
- (Top::context()->global_context()->object_function()->map() != map());
+ can_insert_transition &&
+ (isolate->context()->global_context()->object_function()->map() != map());
ASSERT(index < map()->inobject_properties() ||
(index - map()->inobject_properties()) < properties()->length() ||
@@ -1315,7 +1461,7 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
String* name,
JSFunction* function,
PropertyAttributes attributes) {
- ASSERT(!Heap::InNewSpace(function));
+ ASSERT(!GetHeap()->InNewSpace(function));
// Allocate new instance descriptors with (name, function) added
ConstantFunctionDescriptor d(name, function, attributes);
@@ -1340,7 +1486,9 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
// If the old map is the global object map (from new Object()),
// then transitions are not added to it, so we are done.
- if (old_map == Top::context()->global_context()->object_function()->map()) {
+ Heap* heap = old_map->heap();
+ if (old_map == heap->isolate()->context()->global_context()->
+ object_function()->map()) {
return function;
}
@@ -1391,8 +1539,9 @@ MaybeObject* JSObject::AddSlowProperty(String* name,
dict->SetEntry(entry, name, store_value, details);
return value;
}
+ Heap* heap = GetHeap();
{ MaybeObject* maybe_store_value =
- Heap::AllocateJSGlobalPropertyCell(value);
+ heap->AllocateJSGlobalPropertyCell(value);
if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
}
JSGlobalPropertyCell::cast(store_value)->set_value(value);
@@ -1409,18 +1558,26 @@ MaybeObject* JSObject::AddSlowProperty(String* name,
MaybeObject* JSObject::AddProperty(String* name,
Object* value,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
ASSERT(!IsJSGlobalProxy());
- if (!map()->is_extensible()) {
- Handle<Object> args[1] = {Handle<String>(name)};
- return Top::Throw(*Factory::NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
+ Map* map_of_this = map();
+ Heap* heap = map_of_this->heap();
+ if (!map_of_this->is_extensible()) {
+ if (strict_mode == kNonStrictMode) {
+ return heap->undefined_value();
+ } else {
+ Handle<Object> args[1] = {Handle<String>(name)};
+ return heap->isolate()->Throw(
+ *FACTORY->NewTypeError("object_not_extensible",
+ HandleVector(args, 1)));
+ }
}
if (HasFastProperties()) {
// Ensure the descriptor array does not get too big.
- if (map()->instance_descriptors()->number_of_descriptors() <
+ if (map_of_this->instance_descriptors()->number_of_descriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
- if (value->IsJSFunction() && !Heap::InNewSpace(value)) {
+ if (value->IsJSFunction() && !heap->InNewSpace(value)) {
return AddConstantFunctionProperty(name,
JSFunction::cast(value),
attributes);
@@ -1445,17 +1602,17 @@ MaybeObject* JSObject::SetPropertyPostInterceptor(
String* name,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict) {
+ StrictModeFlag strict_mode) {
// Check local property, ignore interceptor.
LookupResult result;
LocalLookupRealNamedProperty(name, &result);
if (result.IsFound()) {
// An existing property, a map transition or a null descriptor was
// found. Use set property to handle all these cases.
- return SetProperty(&result, name, value, attributes, strict);
+ return SetProperty(&result, name, value, attributes, strict_mode);
}
// Add a new real property.
- return AddProperty(name, value, attributes);
+ return AddProperty(name, value, attributes, strict_mode);
}
@@ -1492,7 +1649,8 @@ MaybeObject* JSObject::ConvertDescriptorToFieldAndMapTransition(
return result;
}
// Do not add transitions to the map of "new Object()".
- if (map() == Top::context()->global_context()->object_function()->map()) {
+ if (map() == old_map->heap()->isolate()->context()->global_context()->
+ object_function()->map()) {
return result;
}
@@ -1578,71 +1736,76 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
String* name,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict) {
- HandleScope scope;
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
Handle<JSObject> this_handle(this);
Handle<String> name_handle(name);
- Handle<Object> value_handle(value);
+ Handle<Object> value_handle(value, isolate);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
if (!interceptor->setter()->IsUndefined()) {
- LOG(ApiNamedPropertyAccess("interceptor-named-set", this, name));
- CustomArguments args(interceptor->data(), this, this);
+ LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", this, name));
+ CustomArguments args(isolate, interceptor->data(), this, this);
v8::AccessorInfo info(args.end());
v8::NamedPropertySetter setter =
v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
Handle<Object> value_unhole(value->IsTheHole() ?
- Heap::undefined_value() :
- value);
+ isolate->heap()->undefined_value() :
+ value,
+ isolate);
result = setter(v8::Utils::ToLocal(name_handle),
v8::Utils::ToLocal(value_unhole),
info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) return *value_handle;
}
MaybeObject* raw_result =
this_handle->SetPropertyPostInterceptor(*name_handle,
*value_handle,
attributes,
- strict);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ strict_mode);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
-MaybeObject* JSObject::SetProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict) {
+MaybeObject* JSReceiver::SetProperty(String* name,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
LookupResult result;
LocalLookup(name, &result);
- return SetProperty(&result, name, value, attributes, strict);
+ return SetProperty(&result, name, value, attributes, strict_mode);
}
MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
String* name,
Object* value,
- JSObject* holder) {
- HandleScope scope;
+ JSObject* holder,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value);
+ Handle<Object> value_handle(value, isolate);
// To accommodate both the old and the new api we switch on the
- // data structure used to store the callbacks. Eventually proxy
+ // data structure used to store the callbacks. Eventually foreign
// callbacks should be phased out.
- if (structure->IsProxy()) {
+ if (structure->IsForeign()) {
AccessorDescriptor* callback =
- reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
+ reinterpret_cast<AccessorDescriptor*>(
+ Foreign::cast(structure)->address());
MaybeObject* obj = (callback->setter)(this, value, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (obj->IsFailure()) return obj;
return *value_handle;
}
@@ -1654,17 +1817,17 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
if (call_fun == NULL) return value;
Handle<String> key(name);
- LOG(ApiNamedPropertyAccess("store", this, name));
- CustomArguments args(data->data(), this, JSObject::cast(holder));
+ LOG(isolate, ApiNamedPropertyAccess("store", this, name));
+ CustomArguments args(isolate, data->data(), this, JSObject::cast(holder));
v8::AccessorInfo info(args.end());
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
call_fun(v8::Utils::ToLocal(key),
v8::Utils::ToLocal(value_handle),
info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value_handle;
}
@@ -1673,11 +1836,15 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
if (setter->IsJSFunction()) {
return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
} else {
+ if (strict_mode == kNonStrictMode) {
+ return value;
+ }
Handle<String> key(name);
- Handle<Object> holder_handle(holder);
+ Handle<Object> holder_handle(holder, isolate);
Handle<Object> args[2] = { key, holder_handle };
- return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
+ return isolate->Throw(
+ *isolate->factory()->NewTypeError("no_setter_in_callback",
+ HandleVector(args, 2)));
}
}
@@ -1688,13 +1855,15 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
MaybeObject* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter,
Object* value) {
- Handle<Object> value_handle(value);
- Handle<JSFunction> fun(JSFunction::cast(setter));
- Handle<JSObject> self(this);
+ Isolate* isolate = GetIsolate();
+ Handle<Object> value_handle(value, isolate);
+ Handle<JSFunction> fun(JSFunction::cast(setter), isolate);
+ Handle<JSObject> self(this, isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug* debug = isolate->debug();
// Handle stepping into a setter if step into is active.
- if (Debug::StepInActive()) {
- Debug::HandleStepIn(fun, Handle<Object>::null(), 0, false);
+ if (debug->StepInActive()) {
+ debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
}
#endif
bool has_pending_exception;
@@ -1708,8 +1877,9 @@ MaybeObject* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter,
void JSObject::LookupCallbackSetterInPrototypes(String* name,
LookupResult* result) {
+ Heap* heap = GetHeap();
for (Object* pt = GetPrototype();
- pt != Heap::null_value();
+ pt != heap->null_value();
pt = pt->GetPrototype()) {
JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
if (result->IsProperty()) {
@@ -1726,14 +1896,17 @@ void JSObject::LookupCallbackSetterInPrototypes(String* name,
}
-MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
- Object* value,
- bool* found) {
+MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
+ uint32_t index,
+ Object* value,
+ bool* found,
+ StrictModeFlag strict_mode) {
+ Heap* heap = GetHeap();
for (Object* pt = GetPrototype();
- pt != Heap::null_value();
+ pt != heap->null_value();
pt = pt->GetPrototype()) {
if (!JSObject::cast(pt)->HasDictionaryElements()) {
- continue;
+ continue;
}
NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary();
int entry = dictionary->FindEntry(index);
@@ -1741,13 +1914,16 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
*found = true;
- return SetElementWithCallback(
- dictionary->ValueAt(entry), index, value, JSObject::cast(pt));
+ return SetElementWithCallback(dictionary->ValueAt(entry),
+ index,
+ value,
+ JSObject::cast(pt),
+ strict_mode);
}
}
}
*found = false;
- return Heap::the_hole_value();
+ return heap->the_hole_value();
}
@@ -1766,10 +1942,11 @@ void Map::LookupInDescriptors(JSObject* holder,
String* name,
LookupResult* result) {
DescriptorArray* descriptors = instance_descriptors();
- int number = DescriptorLookupCache::Lookup(descriptors, name);
+ DescriptorLookupCache* cache = heap()->isolate()->descriptor_lookup_cache();
+ int number = cache->Lookup(descriptors, name);
if (number == DescriptorLookupCache::kAbsent) {
number = descriptors->Search(name);
- DescriptorLookupCache::Update(descriptors, name, number);
+ cache->Update(descriptors, name, number);
}
if (number != DescriptorArray::kNotFound) {
result->DescriptorResult(holder, descriptors->GetDetails(number), number);
@@ -1779,6 +1956,114 @@ void Map::LookupInDescriptors(JSObject* holder,
}
+static JSObject::ElementsKind GetElementsKindFromExternalArrayType(
+ ExternalArrayType array_type) {
+ switch (array_type) {
+ case kExternalByteArray:
+ return JSObject::EXTERNAL_BYTE_ELEMENTS;
+ break;
+ case kExternalUnsignedByteArray:
+ return JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
+ break;
+ case kExternalShortArray:
+ return JSObject::EXTERNAL_SHORT_ELEMENTS;
+ break;
+ case kExternalUnsignedShortArray:
+ return JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
+ break;
+ case kExternalIntArray:
+ return JSObject::EXTERNAL_INT_ELEMENTS;
+ break;
+ case kExternalUnsignedIntArray:
+ return JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS;
+ break;
+ case kExternalFloatArray:
+ return JSObject::EXTERNAL_FLOAT_ELEMENTS;
+ break;
+ case kExternalDoubleArray:
+ return JSObject::EXTERNAL_DOUBLE_ELEMENTS;
+ break;
+ case kExternalPixelArray:
+ return JSObject::EXTERNAL_PIXEL_ELEMENTS;
+ break;
+ }
+ UNREACHABLE();
+ return JSObject::DICTIONARY_ELEMENTS;
+}
+
+
+MaybeObject* Map::GetExternalArrayElementsMap(ExternalArrayType array_type,
+ bool safe_to_add_transition) {
+ Heap* current_heap = heap();
+ DescriptorArray* descriptors = instance_descriptors();
+ String* external_array_sentinel_name = current_heap->empty_symbol();
+
+ if (safe_to_add_transition) {
+ // It's only safe to manipulate the descriptor array if it would be
+ // safe to add a transition.
+
+ ASSERT(!is_shared()); // no transitions can be added to shared maps.
+ // Check if the external array transition already exists.
+ DescriptorLookupCache* cache =
+ current_heap->isolate()->descriptor_lookup_cache();
+ int index = cache->Lookup(descriptors, external_array_sentinel_name);
+ if (index == DescriptorLookupCache::kAbsent) {
+ index = descriptors->Search(external_array_sentinel_name);
+ cache->Update(descriptors,
+ external_array_sentinel_name,
+ index);
+ }
+
+ // If the transition already exists, check the type. If there is a match,
+ // return it.
+ if (index != DescriptorArray::kNotFound) {
+ PropertyDetails details(PropertyDetails(descriptors->GetDetails(index)));
+ if (details.type() == EXTERNAL_ARRAY_TRANSITION &&
+ details.array_type() == array_type) {
+ return descriptors->GetValue(index);
+ } else {
+ safe_to_add_transition = false;
+ }
+ }
+ }
+
+ // No transition to an existing external array map. Make a new one.
+ Object* obj;
+ { MaybeObject* maybe_map = CopyDropTransitions();
+ if (!maybe_map->ToObject(&obj)) return maybe_map;
+ }
+ Map* new_map = Map::cast(obj);
+
+ new_map->set_elements_kind(GetElementsKindFromExternalArrayType(array_type));
+ GetIsolate()->counters()->map_to_external_array_elements()->Increment();
+
+ // Only remember the map transition if the object's map is NOT equal to the
+ // global object_function's map and there is not an already existing
+ // non-matching external array transition.
+ bool allow_map_transition =
+ safe_to_add_transition &&
+ (GetIsolate()->context()->global_context()->object_function()->map() !=
+ map());
+ if (allow_map_transition) {
+ // Allocate new instance descriptors for the old map with map transition.
+ ExternalArrayTransitionDescriptor desc(external_array_sentinel_name,
+ Map::cast(new_map),
+ array_type);
+ Object* new_descriptors;
+ MaybeObject* maybe_new_descriptors = descriptors->CopyInsert(
+ &desc,
+ KEEP_TRANSITIONS);
+ if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
+ return maybe_new_descriptors;
+ }
+ descriptors = DescriptorArray::cast(new_descriptors);
+ set_instance_descriptors(descriptors);
+ }
+
+ return new_map;
+}
+
+
void JSObject::LocalLookupRealNamedProperty(String* name,
LookupResult* result) {
if (IsJSGlobalProxy()) {
@@ -1837,8 +2122,9 @@ void JSObject::LookupRealNamedProperty(String* name, LookupResult* result) {
void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
LookupResult* result) {
+ Heap* heap = GetHeap();
for (Object* pt = GetPrototype();
- pt != Heap::null_value();
+ pt != heap->null_value();
pt = JSObject::cast(pt)->GetPrototype()) {
JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
if (result->IsProperty() && (result->type() != INTERCEPTOR)) return;
@@ -1848,10 +2134,12 @@ void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
// We only need to deal with CALLBACKS and INTERCEPTORS
-MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
- String* name,
- Object* value,
- bool check_prototype) {
+MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
+ LookupResult* result,
+ String* name,
+ Object* value,
+ bool check_prototype,
+ StrictModeFlag strict_mode) {
if (check_prototype && !result->IsProperty()) {
LookupCallbackSetterInPrototypes(name, result);
}
@@ -1867,7 +2155,8 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
return SetPropertyWithCallback(result->GetCallbackObject(),
name,
value,
- result->holder());
+ result->holder(),
+ strict_mode);
}
}
break;
@@ -1878,8 +2167,11 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
LookupResult r;
LookupRealNamedProperty(name, &r);
if (r.IsProperty()) {
- return SetPropertyWithFailedAccessCheck(&r, name, value,
- check_prototype);
+ return SetPropertyWithFailedAccessCheck(&r,
+ name,
+ value,
+ check_prototype,
+ strict_mode);
}
break;
}
@@ -1892,16 +2184,100 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
HandleScope scope;
Handle<Object> value_handle(value);
- Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ Heap* heap = GetHeap();
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
return *value_handle;
}
-MaybeObject* JSObject::SetProperty(LookupResult* result,
+MaybeObject* JSReceiver::SetProperty(LookupResult* result,
+ String* key,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
+ if (result->IsFound() && result->type() == HANDLER) {
+ return JSProxy::cast(this)->SetPropertyWithHandler(
+ key, value, attributes, strict_mode);
+ } else {
+ return JSObject::cast(this)->SetPropertyForResult(
+ result, key, value, attributes, strict_mode);
+ }
+}
+
+
+MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
+ String* name_raw,
+ Object* value_raw,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope;
+ Handle<Object> receiver(this);
+ Handle<Object> name(name_raw);
+ Handle<Object> value(value_raw);
+ Handle<Object> handler(this->handler());
+
+ // Extract trap function.
+ Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("set");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ if (trap->IsUndefined()) {
+ trap = isolate->derived_set_trap();
+ }
+
+ // Call trap function.
+ Object** args[] = {
+ receiver.location(), name.location(), value.location()
+ };
+ bool has_exception;
+ Handle<Object> result =
+ Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+ if (has_exception) return Failure::Exception();
+
+ return *value;
+}
+
+
+MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
+ JSReceiver* receiver_raw,
+ String* name_raw,
+ bool* has_exception) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope;
+ Handle<JSReceiver> receiver(receiver_raw);
+ Handle<Object> name(name_raw);
+ Handle<Object> handler(this->handler());
+
+ // Extract trap function.
+ Handle<String> trap_name =
+ isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ if (trap->IsUndefined()) {
+ Handle<Object> args[] = { handler, trap_name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ *has_exception = true;
+ return NONE;
+ }
+
+ // Call trap function.
+ Object** args[] = { name.location() };
+ Handle<Object> result =
+ Execution::Call(trap, handler, ARRAY_SIZE(args), args, has_exception);
+ if (has_exception) return NONE;
+
+ // TODO(rossberg): convert result to PropertyAttributes
+ USE(result);
+ return NONE;
+}
+
+
+MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
String* name,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict) {
+ StrictModeFlag strict_mode) {
+ Heap* heap = GetHeap();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
@@ -1911,7 +2287,7 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
// reallocating them.
if (!name->IsSymbol() && name->length() <= 2) {
Object* symbol_version;
- { MaybeObject* maybe_symbol_version = Heap::LookupSymbol(name);
+ { MaybeObject* maybe_symbol_version = heap->LookupSymbol(name);
if (maybe_symbol_version->ToObject(&symbol_version)) {
name = String::cast(symbol_version);
}
@@ -1920,8 +2296,12 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
// Check access rights if needed.
if (IsAccessCheckNeeded()
- && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(result, name, value, true);
+ && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(result,
+ name,
+ value,
+ true,
+ strict_mode);
}
if (IsJSGlobalProxy()) {
@@ -1929,7 +2309,7 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetProperty(
- result, name, value, attributes, strict);
+ result, name, value, attributes, strict_mode);
}
if (!result->IsProperty() && !IsJSContextExtensionObject()) {
@@ -1941,22 +2321,22 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
name,
value,
- accessor_result.holder());
+ accessor_result.holder(),
+ strict_mode);
}
}
if (!result->IsFound()) {
// Neither properties nor transitions found.
- return AddProperty(name, value, attributes);
+ return AddProperty(name, value, attributes, strict_mode);
}
if (result->IsReadOnly() && result->IsProperty()) {
- if (strict == kStrictMode) {
+ if (strict_mode == kStrictMode) {
HandleScope scope;
Handle<String> key(name);
Handle<Object> holder(this);
Handle<Object> args[2] = { key, holder };
- return Top::Throw(*Factory::NewTypeError("strict_read_only_property",
- HandleVector(args, 2)));
-
+ return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, 2)));
} else {
return value;
}
@@ -1986,9 +2366,10 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
return SetPropertyWithCallback(result->GetCallbackObject(),
name,
value,
- result->holder());
+ result->holder(),
+ strict_mode);
case INTERCEPTOR:
- return SetPropertyWithInterceptor(name, value, attributes, strict);
+ return SetPropertyWithInterceptor(name, value, attributes, strict_mode);
case CONSTANT_TRANSITION: {
// If the same constant function is being added we can simply
// transition to the target map.
@@ -1999,7 +2380,7 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
JSFunction* function =
JSFunction::cast(target_descriptors->GetValue(number));
- ASSERT(!Heap::InNewSpace(function));
+ ASSERT(!HEAP->InNewSpace(function));
if (value == function) {
set_map(target_map);
return value;
@@ -2009,6 +2390,7 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
}
case NULL_DESCRIPTOR:
+ case EXTERNAL_ARRAY_TRANSITION:
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
default:
UNREACHABLE();
@@ -2028,15 +2410,22 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
String* name,
Object* value,
PropertyAttributes attributes) {
+
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
LookupResult result;
LocalLookup(name, &result);
// Check access rights if needed.
- if (IsAccessCheckNeeded()
- && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(&result, name, value, false);
+ if (IsAccessCheckNeeded()) {
+ Heap* heap = GetHeap();
+ if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(&result,
+ name,
+ value,
+ false,
+ kNonStrictMode);
+ }
}
if (IsJSGlobalProxy()) {
@@ -2052,7 +2441,7 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
// Check for accessor in prototype chain removed here in clone.
if (!result.IsFound()) {
// Neither properties nor transitions found.
- return AddProperty(name, value, attributes);
+ return AddProperty(name, value, attributes, kNonStrictMode);
}
PropertyDetails details = PropertyDetails(attributes, NORMAL);
@@ -2086,6 +2475,7 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
// if the value is a function.
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
case NULL_DESCRIPTOR:
+ case EXTERNAL_ARRAY_TRANSITION:
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
default:
UNREACHABLE();
@@ -2107,7 +2497,7 @@ PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
if (continue_search) {
// Continue searching via the prototype chain.
Object* pt = GetPrototype();
- if (pt != Heap::null_value()) {
+ if (!pt->IsNull()) {
return JSObject::cast(pt)->
GetPropertyAttributeWithReceiver(receiver, name);
}
@@ -2120,25 +2510,28 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
JSObject* receiver,
String* name,
bool continue_search) {
+ Isolate* isolate = GetIsolate();
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
- CustomArguments args(interceptor->data(), receiver, this);
+ CustomArguments args(isolate, interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
v8::NamedPropertyQuery query =
v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
- LOG(ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
v8::Handle<v8::Integer> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = query(v8::Utils::ToLocal(name_handle), info);
}
if (!result.IsEmpty()) {
@@ -2148,11 +2541,12 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
} else if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetter getter =
v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
- LOG(ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = getter(v8::Utils::ToLocal(name_handle), info);
}
if (!result.IsEmpty()) return DONT_ENUM;
@@ -2163,12 +2557,13 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
}
-PropertyAttributes JSObject::GetPropertyAttributeWithReceiver(
- JSObject* receiver,
+PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver(
+ JSReceiver* receiver,
String* key) {
uint32_t index = 0;
- if (key->AsArrayIndex(&index)) {
- if (HasElementWithReceiver(receiver, index)) return NONE;
+ if (IsJSObject() && key->AsArrayIndex(&index)) {
+ if (JSObject::cast(this)->HasElementWithReceiver(receiver, index))
+ return NONE;
return ABSENT;
}
// Named property.
@@ -2178,17 +2573,18 @@ PropertyAttributes JSObject::GetPropertyAttributeWithReceiver(
}
-PropertyAttributes JSObject::GetPropertyAttribute(JSObject* receiver,
- LookupResult* result,
- String* name,
- bool continue_search) {
+PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver,
+ LookupResult* result,
+ String* name,
+ bool continue_search) {
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, name, v8::ACCESS_HAS)) {
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- result,
- name,
- continue_search);
+ if (IsAccessCheckNeeded()) {
+ JSObject* this_obj = JSObject::cast(this);
+ Heap* heap = GetHeap();
+ if (!heap->isolate()->MayNamedAccess(this_obj, name, v8::ACCESS_HAS)) {
+ return this_obj->GetPropertyAttributeWithFailedAccessCheck(
+ receiver, result, name, continue_search);
+ }
}
if (result->IsProperty()) {
switch (result->type()) {
@@ -2197,9 +2593,15 @@ PropertyAttributes JSObject::GetPropertyAttribute(JSObject* receiver,
case CONSTANT_FUNCTION:
case CALLBACKS:
return result->GetAttributes();
+ case HANDLER: {
+ // TODO(rossberg): propagate exceptions properly.
+ bool has_exception = false;
+ return JSProxy::cast(this)->GetPropertyAttributeWithHandler(
+ receiver, name, &has_exception);
+ }
case INTERCEPTOR:
- return result->holder()->
- GetPropertyAttributeWithInterceptor(receiver, name, continue_search);
+ return result->holder()->GetPropertyAttributeWithInterceptor(
+ JSObject::cast(receiver), name, continue_search);
default:
UNREACHABLE();
}
@@ -2208,11 +2610,11 @@ PropertyAttributes JSObject::GetPropertyAttribute(JSObject* receiver,
}
-PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
+PropertyAttributes JSReceiver::GetLocalPropertyAttribute(String* name) {
// Check whether the name is an array index.
uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- if (HasLocalElement(index)) return NONE;
+ if (IsJSObject() && name->AsArrayIndex(&index)) {
+ if (JSObject::cast(this)->HasLocalElement(index)) return NONE;
return ABSENT;
}
// Named property.
@@ -2224,11 +2626,14 @@ PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
MaybeObject* NormalizedMapCache::Get(JSObject* obj,
PropertyNormalizationMode mode) {
+ Isolate* isolate = obj->GetIsolate();
Map* fast = obj->map();
- int index = Hash(fast) % kEntries;
+ int index = fast->Hash() % kEntries;
Object* result = get(index);
- if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
+ if (result->IsMap() &&
+ Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
#ifdef DEBUG
+ Map::cast(result)->SharedMapVerify();
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map bit-by-bit.
Object* fresh;
@@ -2250,7 +2655,7 @@ MaybeObject* NormalizedMapCache::Get(JSObject* obj,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
set(index, result);
- Counters::normalized_maps.Increment();
+ isolate->counters()->normalized_maps()->Increment();
return result;
}
@@ -2264,42 +2669,6 @@ void NormalizedMapCache::Clear() {
}
-int NormalizedMapCache::Hash(Map* fast) {
- // For performance reasons we only hash the 3 most variable fields of a map:
- // constructor, prototype and bit_field2.
-
- // Shift away the tag.
- int hash = (static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(fast->constructor())) >> 2);
-
- // XOR-ing the prototype and constructor directly yields too many zero bits
- // when the two pointers are close (which is fairly common).
- // To avoid this we shift the prototype 4 bits relatively to the constructor.
- hash ^= (static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(fast->prototype())) << 2);
-
- return hash ^ (hash >> 16) ^ fast->bit_field2();
-}
-
-
-bool NormalizedMapCache::CheckHit(Map* slow,
- Map* fast,
- PropertyNormalizationMode mode) {
-#ifdef DEBUG
- slow->SharedMapVerify();
-#endif
- return
- slow->constructor() == fast->constructor() &&
- slow->prototype() == fast->prototype() &&
- slow->inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
- 0 :
- fast->inobject_properties()) &&
- slow->instance_type() == fast->instance_type() &&
- slow->bit_field() == fast->bit_field() &&
- (slow->bit_field2() & ~(1<<Map::kIsShared)) == fast->bit_field2();
-}
-
-
MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) {
if (map()->is_shared()) {
// Fast case maps are never marked as shared.
@@ -2310,7 +2679,7 @@ MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) {
UNIQUE_NORMALIZED_MAP);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- Counters::normalized_maps.Increment();
+ GetIsolate()->counters()->normalized_maps()->Increment();
set_map(Map::cast(obj));
}
@@ -2324,12 +2693,13 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
// The global object is always normalized.
ASSERT(!IsGlobalObject());
-
// JSGlobalProxy must never be normalized
ASSERT(!IsJSGlobalProxy());
+ Map* map_of_this = map();
+
// Allocate new content.
- int property_count = map()->NumberOfDescribedProperties();
+ int property_count = map_of_this->NumberOfDescribedProperties();
if (expected_additional_properties > 0) {
property_count += expected_additional_properties;
} else {
@@ -2342,9 +2712,9 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
}
StringDictionary* dictionary = StringDictionary::cast(obj);
- DescriptorArray* descs = map()->instance_descriptors();
+ DescriptorArray* descs = map_of_this->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
+ PropertyDetails details(descs->GetDetails(i));
switch (details.type()) {
case CONSTANT_FUNCTION: {
PropertyDetails d =
@@ -2386,18 +2756,22 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
case INTERCEPTOR:
+ case EXTERNAL_ARRAY_TRANSITION:
break;
default:
UNREACHABLE();
}
}
+ Heap* current_heap = map_of_this->heap();
+
// Copy the next enumeration index from instance descriptor.
- int index = map()->instance_descriptors()->NextEnumerationIndex();
+ int index = map_of_this->instance_descriptors()->NextEnumerationIndex();
dictionary->SetNextEnumerationIndex(index);
- { MaybeObject* maybe_obj = Top::context()->global_context()->
- normalized_map_cache()->Get(this, mode);
+ { MaybeObject* maybe_obj =
+ current_heap->isolate()->context()->global_context()->
+ normalized_map_cache()->Get(this, mode);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
@@ -2407,16 +2781,17 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
// Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size();
- int instance_size_delta = map()->instance_size() - new_instance_size;
+ int instance_size_delta = map_of_this->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- Heap::CreateFillerObjectAt(this->address() + new_instance_size,
- instance_size_delta);
+ current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
+ instance_size_delta);
set_map(new_map);
+ new_map->clear_instance_descriptors();
set_properties(dictionary);
- Counters::props_to_dictionary.Increment();
+ current_heap->isolate()->counters()->props_to_dictionary()->Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -2437,47 +2812,75 @@ MaybeObject* JSObject::TransformToFastProperties(int unused_property_fields) {
MaybeObject* JSObject::NormalizeElements() {
- ASSERT(!HasPixelElements() && !HasExternalArrayElements());
- if (HasDictionaryElements()) return this;
- ASSERT(map()->has_fast_elements());
-
- Object* obj;
- { MaybeObject* maybe_obj = map()->GetSlowElementsMap();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
+ ASSERT(!HasExternalArrayElements());
- // Get number of entries.
+ // Find the backing store.
FixedArray* array = FixedArray::cast(elements());
-
- // Compute the effective length.
- int length = IsJSArray() ?
- Smi::cast(JSArray::cast(this)->length())->value() :
- array->length();
- { MaybeObject* maybe_obj = NumberDictionary::Allocate(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- NumberDictionary* dictionary = NumberDictionary::cast(obj);
- // Copy entries.
+ Map* old_map = array->map();
+ bool is_arguments =
+ (old_map == old_map->heap()->non_strict_arguments_elements_map());
+ if (is_arguments) {
+ array = FixedArray::cast(array->get(1));
+ }
+ if (array->IsDictionary()) return array;
+
+ ASSERT(HasFastElements() || HasFastArgumentsElements());
+ // Compute the effective length and allocate a new backing store.
+ int length = IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : array->length();
+ NumberDictionary* dictionary = NULL;
+ { Object* object;
+ MaybeObject* maybe = NumberDictionary::Allocate(length);
+ if (!maybe->ToObject(&object)) return maybe;
+ dictionary = NumberDictionary::cast(object);
+ }
+
+ // Copy the elements to the new backing store.
+ bool has_double_elements = old_map->has_fast_double_elements();
for (int i = 0; i < length; i++) {
- Object* value = array->get(i);
+ Object* value = NULL;
+ if (has_double_elements) {
+ FixedDoubleArray* double_array = FixedDoubleArray::cast(array);
+ if (double_array->is_the_hole(i)) {
+ value = GetIsolate()->heap()->the_hole_value();
+ } else {
+ // Objects must be allocated in the old object space, since the
+ // overall number of HeapNumbers needed for the conversion might
+ // exceed the capacity of new space, and we would fail repeatedly
+ // trying to convert the FixedDoubleArray.
+ MaybeObject* maybe_value_object =
+ GetHeap()->AllocateHeapNumber(double_array->get(i), TENURED);
+ if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
+ }
+ } else {
+ ASSERT(old_map->has_fast_elements());
+ value = array->get(i);
+ }
+ PropertyDetails details = PropertyDetails(NONE, NORMAL);
if (!value->IsTheHole()) {
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
Object* result;
- { MaybeObject* maybe_result =
- dictionary->AddNumberEntry(i, array->get(i), details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result =
+ dictionary->AddNumberEntry(i, value, details);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
dictionary = NumberDictionary::cast(result);
}
}
- // Switch to using the dictionary as the backing storage for
- // elements. Set the new map first to satify the elements type
- // assert in set_elements().
- set_map(new_map);
- set_elements(dictionary);
- Counters::elements_to_dictionary.Increment();
+ // Switch to using the dictionary as the backing storage for elements.
+ if (is_arguments) {
+ FixedArray::cast(elements())->set(1, dictionary);
+ } else {
+ // Set the new map first to satify the elements type assert in
+ // set_elements().
+ Object* new_map;
+ MaybeObject* maybe = map()->GetSlowElementsMap();
+ if (!maybe->ToObject(&new_map)) return maybe;
+ set_map(Map::cast(new_map));
+ set_elements(dictionary);
+ }
+
+ old_map->isolate()->counters()->elements_to_dictionary()->Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -2486,7 +2889,8 @@ MaybeObject* JSObject::NormalizeElements() {
}
#endif
- return this;
+ ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ return dictionary;
}
@@ -2495,7 +2899,7 @@ MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
// Check local property, ignore interceptor.
LookupResult result;
LocalLookupRealNamedProperty(name, &result);
- if (!result.IsProperty()) return Heap::true_value();
+ if (!result.IsProperty()) return GetHeap()->true_value();
// Normalize object if needed.
Object* obj;
@@ -2508,23 +2912,25 @@ MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
MaybeObject* JSObject::DeletePropertyWithInterceptor(String* name) {
- HandleScope scope;
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
Handle<String> name_handle(name);
Handle<JSObject> this_handle(this);
if (!interceptor->deleter()->IsUndefined()) {
v8::NamedPropertyDeleter deleter =
v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
- LOG(ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
- CustomArguments args(interceptor->data(), this, this);
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
+ CustomArguments args(isolate, interceptor->data(), this, this);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Boolean> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = deleter(v8::Utils::ToLocal(name_handle), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
return *v8::Utils::OpenHandle(*result);
@@ -2532,14 +2938,14 @@ MaybeObject* JSObject::DeletePropertyWithInterceptor(String* name) {
}
MaybeObject* raw_result =
this_handle->DeletePropertyPostInterceptor(*name_handle, NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
MaybeObject* JSObject::DeleteElementPostInterceptor(uint32_t index,
DeleteMode mode) {
- ASSERT(!HasPixelElements() && !HasExternalArrayElements());
+ ASSERT(!HasExternalArrayElements());
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
Object* obj;
@@ -2558,7 +2964,16 @@ MaybeObject* JSObject::DeleteElementPostInterceptor(uint32_t index,
NumberDictionary* dictionary = element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != NumberDictionary::kNotFound) {
- return dictionary->DeleteProperty(entry, mode);
+ Object* deleted = dictionary->DeleteProperty(entry, mode);
+ if (deleted == GetHeap()->true_value()) {
+ MaybeObject* maybe_elements = dictionary->Shrink(index);
+ FixedArray* new_elements = NULL;
+ if (!maybe_elements->To(&new_elements)) {
+ return maybe_elements;
+ }
+ set_elements(new_elements);
+ }
+ return deleted;
}
break;
}
@@ -2566,79 +2981,143 @@ MaybeObject* JSObject::DeleteElementPostInterceptor(uint32_t index,
UNREACHABLE();
break;
}
- return Heap::true_value();
+ return GetHeap()->true_value();
}
MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
+ Isolate* isolate = GetIsolate();
+ Heap* heap = isolate->heap();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- if (interceptor->deleter()->IsUndefined()) return Heap::false_value();
+ if (interceptor->deleter()->IsUndefined()) return heap->false_value();
v8::IndexedPropertyDeleter deleter =
v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
Handle<JSObject> this_handle(this);
- LOG(ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
- CustomArguments args(interceptor->data(), this, this);
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
+ CustomArguments args(isolate, interceptor->data(), this, this);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Boolean> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = deleter(index, info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
return *v8::Utils::OpenHandle(*result);
}
MaybeObject* raw_result =
this_handle->DeleteElementPostInterceptor(index, NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
+MaybeObject* JSObject::DeleteFastElement(uint32_t index) {
+ ASSERT(HasFastElements() || HasFastArgumentsElements());
+ Heap* heap = GetHeap();
+ FixedArray* backing_store = FixedArray::cast(elements());
+ if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
+ backing_store = FixedArray::cast(backing_store->get(1));
+ } else {
+ Object* writable;
+ MaybeObject* maybe = EnsureWritableFastElements();
+ if (!maybe->ToObject(&writable)) return maybe;
+ backing_store = FixedArray::cast(writable);
+ }
+ int length = IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : backing_store->length();
+ if (index < static_cast<uint32_t>(length)) {
+ backing_store->set_the_hole(index);
+ }
+ return heap->true_value();
+}
+
+
+MaybeObject* JSObject::DeleteDictionaryElement(uint32_t index,
+ DeleteMode mode) {
+ Isolate* isolate = GetIsolate();
+ Heap* heap = isolate->heap();
+ FixedArray* backing_store = FixedArray::cast(elements());
+ if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
+ backing_store = FixedArray::cast(backing_store->get(1));
+ }
+ NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* result = dictionary->DeleteProperty(entry, mode);
+ if (result == heap->true_value()) {
+ MaybeObject* maybe_elements = dictionary->Shrink(index);
+ FixedArray* new_elements = NULL;
+ if (!maybe_elements->To(&new_elements)) {
+ return maybe_elements;
+ }
+ set_elements(new_elements);
+ }
+ if (mode == STRICT_DELETION && result == heap->false_value()) {
+ // In strict mode, attempting to delete a non-configurable property
+ // throws an exception.
+ HandleScope scope(isolate);
+ Handle<Object> holder(this);
+ Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
+ Handle<Object> args[2] = { name, holder };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("strict_delete_property",
+ HandleVector(args, 2));
+ return isolate->Throw(*error);
+ }
+ }
+ return heap->true_value();
+}
+
+
MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
+ Isolate* isolate = GetIsolate();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- return Heap::false_value();
+ !isolate->MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
+ return isolate->heap()->false_value();
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return Heap::false_value();
+ if (proto->IsNull()) return isolate->heap()->false_value();
ASSERT(proto->IsJSGlobalObject());
return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
}
if (HasIndexedInterceptor()) {
// Skip interceptor if forcing deletion.
- if (mode == FORCE_DELETION) {
- return DeleteElementPostInterceptor(index, mode);
- }
- return DeleteElementWithInterceptor(index);
+ return (mode == FORCE_DELETION)
+ ? DeleteElementPostInterceptor(index, FORCE_DELETION)
+ : DeleteElementWithInterceptor(index);
}
switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- Object* obj;
- { MaybeObject* maybe_obj = EnsureWritableFastElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if (index < length) {
- FixedArray::cast(elements())->set_the_hole(index);
+ case FAST_ELEMENTS:
+ return DeleteFastElement(index);
+
+ case DICTIONARY_ELEMENTS:
+ return DeleteDictionaryElement(index, mode);
+
+ case FAST_DOUBLE_ELEMENTS: {
+ int length = IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : FixedArray::cast(elements())->length();
+ if (index < static_cast<uint32_t>(length)) {
+ FixedDoubleArray::cast(elements())->set_the_hole(index);
}
break;
}
- case PIXEL_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
@@ -2646,49 +3125,51 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
// Pixel and external array elements cannot be deleted. Just
// silently ignore here.
break;
- case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* result = dictionary->DeleteProperty(entry, mode);
- if (mode == STRICT_DELETION && result == Heap::false_value()) {
- // In strict mode, deleting a non-configurable property throws
- // exception. dictionary->DeleteProperty will return false_value()
- // if a non-configurable property is being deleted.
- HandleScope scope;
- Handle<Object> i = Factory::NewNumberFromUint(index);
- Handle<Object> args[2] = { i, Handle<Object>(this) };
- return Top::Throw(*Factory::NewTypeError("strict_delete_property",
- HandleVector(args, 2)));
+
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ index < (length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) {
+ // TODO(kmillikin): We could check if this was the last aliased
+ // parameter, and revert to normal elements in that case. That
+ // would enable GC of the context.
+ parameter_map->set_the_hole(index + 2);
+ } else {
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ return DeleteDictionaryElement(index, mode);
+ } else {
+ return DeleteFastElement(index);
}
}
break;
}
- default:
- UNREACHABLE();
- break;
}
- return Heap::true_value();
+ return isolate->heap()->true_value();
}
MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
+ Isolate* isolate = GetIsolate();
// ECMA-262, 3rd, 8.6.2.5
ASSERT(name->IsString());
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, name, v8::ACCESS_DELETE)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- return Heap::false_value();
+ !isolate->MayNamedAccess(this, name, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
+ return isolate->heap()->false_value();
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return Heap::false_value();
+ if (proto->IsNull()) return isolate->heap()->false_value();
ASSERT(proto->IsJSGlobalObject());
return JSGlobalObject::cast(proto)->DeleteProperty(name, mode);
}
@@ -2699,17 +3180,17 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
} else {
LookupResult result;
LocalLookup(name, &result);
- if (!result.IsProperty()) return Heap::true_value();
+ if (!result.IsProperty()) return isolate->heap()->true_value();
// Ignore attributes if forcing a deletion.
if (result.IsDontDelete() && mode != FORCE_DELETION) {
if (mode == STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) };
- return Top::Throw(*Factory::NewTypeError("strict_delete_property",
- HandleVector(args, 2)));
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_delete_property", HandleVector(args, 2)));
}
- return Heap::false_value();
+ return isolate->heap()->false_value();
}
// Check for interceptor.
if (result.type() == INTERCEPTOR) {
@@ -2731,29 +3212,52 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
}
+bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
+ ElementsKind kind,
+ Object* object) {
+ ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
+ if (kind == FAST_ELEMENTS) {
+ int length = IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : elements->length();
+ for (int i = 0; i < length; ++i) {
+ Object* element = elements->get(i);
+ if (!element->IsTheHole() && element == object) return true;
+ }
+ } else {
+ Object* key = NumberDictionary::cast(elements)->SlowReverseLookup(object);
+ if (!key->IsUndefined()) return true;
+ }
+ return false;
+}
+
+
// Check whether this object references another object.
bool JSObject::ReferencesObject(Object* obj) {
+ Map* map_of_this = map();
+ Heap* heap = map_of_this->heap();
AssertNoAllocation no_alloc;
// Is the object the constructor for this object?
- if (map()->constructor() == obj) {
+ if (map_of_this->constructor() == obj) {
return true;
}
// Is the object the prototype for this object?
- if (map()->prototype() == obj) {
+ if (map_of_this->prototype() == obj) {
return true;
}
// Check if the object is among the named properties.
Object* key = SlowReverseLookup(obj);
- if (key != Heap::undefined_value()) {
+ if (!key->IsUndefined()) {
return true;
}
// Check if the object is among the indexed properties.
- switch (GetElementsKind()) {
- case PIXEL_ELEMENTS:
+ ElementsKind kind = GetElementsKind();
+ switch (kind) {
+ case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
@@ -2761,38 +3265,39 @@ bool JSObject::ReferencesObject(Object* obj) {
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
// Raw pixels and external arrays do not reference other
// objects.
break;
- case FAST_ELEMENTS: {
- int length = IsJSArray() ?
- Smi::cast(JSArray::cast(this)->length())->value() :
- FixedArray::cast(elements())->length();
- for (int i = 0; i < length; i++) {
- Object* element = FixedArray::cast(elements())->get(i);
- if (!element->IsTheHole() && element == obj) {
- return true;
- }
- }
- break;
- }
+ case FAST_ELEMENTS:
case DICTIONARY_ELEMENTS: {
- key = element_dictionary()->SlowReverseLookup(obj);
- if (key != Heap::undefined_value()) {
- return true;
- }
+ FixedArray* elements = FixedArray::cast(this->elements());
+ if (ReferencesObjectFromElements(elements, kind, obj)) return true;
break;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ // Check the mapped parameters.
+ int length = parameter_map->length();
+ for (int i = 2; i < length; ++i) {
+ Object* value = parameter_map->get(i);
+ if (!value->IsTheHole() && value == obj) return true;
+ }
+ // Check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : FAST_ELEMENTS;
+ if (ReferencesObjectFromElements(arguments, kind, obj)) return true;
break;
+ }
}
// For functions check the context.
if (IsJSFunction()) {
// Get the constructor function for arguments array.
JSObject* arguments_boilerplate =
- Top::context()->global_context()->arguments_boilerplate();
+ heap->isolate()->context()->global_context()->
+ arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
@@ -2819,9 +3324,9 @@ bool JSObject::ReferencesObject(Object* obj) {
}
}
- // Check the context extension if any.
- if (context->has_extension()) {
- return context->extension()->ReferencesObject(obj);
+ // Check the context extension (if any) if it can have references.
+ if (context->has_extension() && !context->IsCatchContext()) {
+ return JSObject::cast(context->extension())->ReferencesObject(obj);
}
}
@@ -2831,10 +3336,13 @@ bool JSObject::ReferencesObject(Object* obj) {
MaybeObject* JSObject::PreventExtensions() {
+ Isolate* isolate = GetIsolate();
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, Heap::undefined_value(), v8::ACCESS_KEYS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
- return Heap::false_value();
+ !isolate->MayNamedAccess(this,
+ isolate->heap()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
+ return isolate->heap()->false_value();
}
if (IsJSGlobalProxy()) {
@@ -2846,11 +3354,11 @@ MaybeObject* JSObject::PreventExtensions() {
// If there are fast elements we normalize.
if (HasFastElements()) {
- Object* ok;
- { MaybeObject* maybe_ok = NormalizeElements();
- if (!maybe_ok->ToObject(&ok)) return maybe_ok;
- }
+ MaybeObject* result = NormalizeElements();
+ if (result->IsFailure()) return result;
}
+ // TODO(kmillikin): Handle arguments object with dictionary elements.
+ ASSERT(HasDictionaryElements());
// Make sure that we never go back to fast case.
element_dictionary()->set_requires_slow_elements();
@@ -2873,8 +3381,9 @@ MaybeObject* JSObject::PreventExtensions() {
// - This object has no elements.
// - No prototype has enumerable properties/elements.
bool JSObject::IsSimpleEnum() {
+ Heap* heap = GetHeap();
for (Object* o = this;
- o != Heap::null_value();
+ o != heap->null_value();
o = JSObject::cast(o)->GetPrototype()) {
JSObject* curr = JSObject::cast(o);
if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
@@ -2937,9 +3446,20 @@ AccessorDescriptor* Map::FindAccessor(String* name) {
}
+void JSReceiver::LocalLookup(String* name, LookupResult* result) {
+ if (IsJSProxy()) {
+ result->HandlerResult();
+ } else {
+ JSObject::cast(this)->LocalLookup(name, result);
+ }
+}
+
+
void JSObject::LocalLookup(String* name, LookupResult* result) {
ASSERT(name->IsString());
+ Heap* heap = GetHeap();
+
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
@@ -2954,13 +3474,13 @@ void JSObject::LocalLookup(String* name, LookupResult* result) {
}
// Check __proto__ before interceptor.
- if (name->Equals(Heap::Proto_symbol()) && !IsJSContextExtensionObject()) {
+ if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) {
result->ConstantResult(this);
return;
}
// Check for lookup interceptor except when bootstrapping.
- if (HasNamedInterceptor() && !Bootstrapper::IsActive()) {
+ if (HasNamedInterceptor() && !heap->isolate()->bootstrapper()->IsActive()) {
result->InterceptorResult(this);
return;
}
@@ -2969,10 +3489,11 @@ void JSObject::LocalLookup(String* name, LookupResult* result) {
}
-void JSObject::Lookup(String* name, LookupResult* result) {
+void JSReceiver::Lookup(String* name, LookupResult* result) {
// Ecma-262 3rd 8.6.2.4
+ Heap* heap = GetHeap();
for (Object* current = this;
- current != Heap::null_value();
+ current != heap->null_value();
current = JSObject::cast(current)->GetPrototype()) {
JSObject::cast(current)->LocalLookup(name, result);
if (result->IsProperty()) return;
@@ -2983,8 +3504,9 @@ void JSObject::Lookup(String* name, LookupResult* result) {
// Search object and it's prototype chain for callback properties.
void JSObject::LookupCallback(String* name, LookupResult* result) {
+ Heap* heap = GetHeap();
for (Object* current = this;
- current != Heap::null_value();
+ current != heap->null_value();
current = JSObject::cast(current)->GetPrototype()) {
JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
if (result->IsProperty() && result->type() == CALLBACKS) return;
@@ -2993,8 +3515,27 @@ void JSObject::LookupCallback(String* name, LookupResult* result) {
}
+// Search for a getter or setter in an elements dictionary. Returns either
+// undefined if the element is read-only, or the getter/setter pair (fixed
+// array) if there is an existing one, or the hole value if the element does
+// not exist or is a normal non-getter/setter data element.
+static Object* FindGetterSetterInDictionary(NumberDictionary* dictionary,
+ uint32_t index,
+ Heap* heap) {
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* result = dictionary->ValueAt(entry);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.IsReadOnly()) return heap->undefined_value();
+ if (details.type() == CALLBACKS && result->IsFixedArray()) return result;
+ }
+ return heap->the_hole_value();
+}
+
+
MaybeObject* JSObject::DefineGetterSetter(String* name,
PropertyAttributes attributes) {
+ Heap* heap = GetHeap();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
@@ -3003,7 +3544,7 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
name->TryFlatten();
if (!CanSetCallback(name)) {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
uint32_t index = 0;
@@ -3013,7 +3554,7 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
switch (GetElementsKind()) {
case FAST_ELEMENTS:
break;
- case PIXEL_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
@@ -3021,36 +3562,43 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
- return Heap::undefined_value();
+ return heap->undefined_value();
case DICTIONARY_ELEMENTS: {
- // Lookup the index.
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* result = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.IsReadOnly()) return Heap::undefined_value();
- if (details.type() == CALLBACKS) {
- if (result->IsFixedArray()) {
- return result;
- }
- // Otherwise allow to override it.
+ Object* probe =
+ FindGetterSetterInDictionary(element_dictionary(), index, heap);
+ if (!probe->IsTheHole()) return probe;
+ // Otherwise allow to override it.
+ break;
+ }
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ // Ascertain whether we have read-only properties or an existing
+ // getter/setter pair in an arguments elements dictionary backing
+ // store.
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ index < (length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe == NULL || probe->IsTheHole()) {
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ NumberDictionary* dictionary = NumberDictionary::cast(arguments);
+ probe = FindGetterSetterInDictionary(dictionary, index, heap);
+ if (!probe->IsTheHole()) return probe;
}
}
break;
}
- default:
- UNREACHABLE();
- break;
}
} else {
// Lookup the name.
LookupResult result;
LocalLookup(name, &result);
if (result.IsProperty()) {
- if (result.IsReadOnly()) return Heap::undefined_value();
+ if (result.IsReadOnly()) return heap->undefined_value();
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
// Need to preserve old getters/setters.
@@ -3064,7 +3612,7 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
// Allocate the fixed array to hold getter and setter.
Object* structure;
- { MaybeObject* maybe_structure = Heap::AllocateFixedArray(2, TENURED);
+ { MaybeObject* maybe_structure = heap->AllocateFixedArray(2, TENURED);
if (!maybe_structure->ToObject(&structure)) return maybe_structure;
}
@@ -3078,7 +3626,7 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
bool JSObject::CanSetCallback(String* name) {
ASSERT(!IsAccessCheckNeeded()
- || Top::MayNamedAccess(this, name, v8::ACCESS_SET));
+ || Isolate::Current()->MayNamedAccess(this, name, v8::ACCESS_SET));
// Check if there is an API defined callback object which prohibits
// callback overwriting in this object or it's prototype chain.
@@ -3106,23 +3654,39 @@ MaybeObject* JSObject::SetElementCallback(uint32_t index,
PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
// Normalize elements to make this operation simple.
- Object* ok;
- { MaybeObject* maybe_ok = NormalizeElements();
- if (!maybe_ok->ToObject(&ok)) return maybe_ok;
+ NumberDictionary* dictionary = NULL;
+ { Object* result;
+ MaybeObject* maybe = NormalizeElements();
+ if (!maybe->ToObject(&result)) return maybe;
+ dictionary = NumberDictionary::cast(result);
}
+ ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
// Update the dictionary with the new CALLBACKS property.
- Object* dict;
- { MaybeObject* maybe_dict =
- element_dictionary()->Set(index, structure, details);
- if (!maybe_dict->ToObject(&dict)) return maybe_dict;
+ { Object* result;
+ MaybeObject* maybe = dictionary->Set(index, structure, details);
+ if (!maybe->ToObject(&result)) return maybe;
+ dictionary = NumberDictionary::cast(result);
+ }
+
+ dictionary->set_requires_slow_elements();
+ // Update the dictionary backing store on the object.
+ if (elements()->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ // Also delete any parameter alias.
+ //
+ // TODO(kmillikin): when deleting the last parameter alias we could
+ // switch to a direct backing store without the parameter map. This
+ // would allow GC of the context.
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ if (index < length - 2) {
+ parameter_map->set(index + 2, GetHeap()->the_hole_value());
+ }
+ parameter_map->set(1, dictionary);
+ } else {
+ set_elements(dictionary);
}
- NumberDictionary* elements = NumberDictionary::cast(dict);
- elements->set_requires_slow_elements();
- // Set the potential new dictionary on the object.
- set_elements(elements);
-
return structure;
}
@@ -3175,11 +3739,12 @@ MaybeObject* JSObject::DefineAccessor(String* name,
Object* fun,
PropertyAttributes attributes) {
ASSERT(fun->IsJSFunction() || fun->IsUndefined());
+ Isolate* isolate = GetIsolate();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return Heap::undefined_value();
+ !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ return isolate->heap()->undefined_value();
}
if (IsJSGlobalProxy()) {
@@ -3201,12 +3766,13 @@ MaybeObject* JSObject::DefineAccessor(String* name,
MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
+ Isolate* isolate = GetIsolate();
String* name = String::cast(info->name());
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return Heap::undefined_value();
+ !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ return isolate->heap()->undefined_value();
}
if (IsJSGlobalProxy()) {
@@ -3224,20 +3790,20 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
name->TryFlatten();
if (!CanSetCallback(name)) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
if (is_element) {
- if (IsJSArray()) return Heap::undefined_value();
+ if (IsJSArray()) return isolate->heap()->undefined_value();
// Accessors overwrite previous callbacks (cf. with getters/setters).
switch (GetElementsKind()) {
case FAST_ELEMENTS:
break;
- case PIXEL_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
@@ -3245,13 +3811,15 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
case DICTIONARY_ELEMENTS:
break;
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
break;
}
@@ -3267,7 +3835,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
// ES5 forbids turning a property into an accessor if it's not
// configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
Object* ok;
{ MaybeObject* maybe_ok =
@@ -3281,15 +3849,17 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
Object* JSObject::LookupAccessor(String* name, bool is_getter) {
+ Heap* heap = GetHeap();
+
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, name, v8::ACCESS_HAS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return Heap::undefined_value();
+ !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return heap->undefined_value();
}
// Make the lookup and include prototypes.
@@ -3297,7 +3867,7 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) {
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
for (Object* obj = this;
- obj != Heap::null_value();
+ obj != heap->null_value();
obj = JSObject::cast(obj)->GetPrototype()) {
JSObject* js_object = JSObject::cast(obj);
if (js_object->HasDictionaryElements()) {
@@ -3316,12 +3886,12 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) {
}
} else {
for (Object* obj = this;
- obj != Heap::null_value();
+ obj != heap->null_value();
obj = JSObject::cast(obj)->GetPrototype()) {
LookupResult result;
JSObject::cast(obj)->LocalLookup(name, &result);
if (result.IsProperty()) {
- if (result.IsReadOnly()) return Heap::undefined_value();
+ if (result.IsReadOnly()) return heap->undefined_value();
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
if (obj->IsFixedArray()) {
@@ -3331,7 +3901,7 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) {
}
}
}
- return Heap::undefined_value();
+ return heap->undefined_value();
}
@@ -3349,7 +3919,7 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
}
}
- return Heap::undefined_value();
+ return GetHeap()->undefined_value();
} else {
return property_dictionary()->SlowReverseLookup(value);
}
@@ -3357,9 +3927,10 @@ Object* JSObject::SlowReverseLookup(Object* value) {
MaybeObject* Map::CopyDropDescriptors() {
+ Heap* heap = GetHeap();
Object* result;
{ MaybeObject* maybe_result =
- Heap::AllocateMap(instance_type(), instance_size());
+ heap->AllocateMap(instance_type(), instance_size());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Map::cast(result)->set_prototype(prototype());
@@ -3369,7 +3940,7 @@ MaybeObject* Map::CopyDropDescriptors() {
// pointing to the same transition which is bad because the garbage
// collector relies on being able to reverse pointers from transitions
// to maps. If properties need to be retained use CopyDropTransitions.
- Map::cast(result)->set_instance_descriptors(Heap::empty_descriptor_array());
+ Map::cast(result)->clear_instance_descriptors();
// Please note instance_type and instance_size are set when allocated.
Map::cast(result)->set_inobject_properties(inobject_properties());
Map::cast(result)->set_unused_property_fields(unused_property_fields());
@@ -3391,8 +3962,9 @@ MaybeObject* Map::CopyDropDescriptors() {
}
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2());
+ Map::cast(result)->set_bit_field3(bit_field3());
Map::cast(result)->set_is_shared(false);
- Map::cast(result)->ClearCodeCache();
+ Map::cast(result)->ClearCodeCache(heap);
return result;
}
@@ -3406,7 +3978,7 @@ MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
Object* result;
{ MaybeObject* maybe_result =
- Heap::AllocateMap(instance_type(), new_instance_size);
+ GetHeap()->AllocateMap(instance_type(), new_instance_size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -3419,6 +3991,7 @@ MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2());
+ Map::cast(result)->set_bit_field3(bit_field3());
Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
@@ -3451,7 +4024,7 @@ MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
// Allocate the code cache if not present.
if (code_cache()->IsFixedArray()) {
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateCodeCache();
+ { MaybeObject* maybe_result = code->heap()->AllocateCodeCache();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
set_code_cache(result);
@@ -3467,7 +4040,7 @@ Object* Map::FindInCodeCache(String* name, Code::Flags flags) {
if (!code_cache()->IsFixedArray()) {
return CodeCache::cast(code_cache())->Lookup(name, flags);
} else {
- return Heap::undefined_value();
+ return GetHeap()->undefined_value();
}
}
@@ -3490,40 +4063,70 @@ void Map::RemoveFromCodeCache(String* name, Code* code, int index) {
void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
+ // Traverse the transition tree without using a stack. We do this by
+ // reversing the pointers in the maps and descriptor arrays.
Map* current = this;
- while (current != Heap::meta_map()) {
+ Map* meta_map = heap()->meta_map();
+ Object** map_or_index_field = NULL;
+ while (current != meta_map) {
DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
- *RawField(current, Map::kInstanceDescriptorsOffset));
- if (d == Heap::empty_descriptor_array()) {
- Map* prev = current->map();
- current->set_map(Heap::meta_map());
- callback(current, data);
- current = prev;
- continue;
- }
-
- FixedArray* contents = reinterpret_cast<FixedArray*>(
- d->get(DescriptorArray::kContentArrayIndex));
- Object** map_or_index_field = RawField(contents, HeapObject::kMapOffset);
- Object* map_or_index = *map_or_index_field;
- bool map_done = true;
- for (int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : 0;
- i < contents->length();
- i += 2) {
- PropertyDetails details(Smi::cast(contents->get(i + 1)));
- if (details.IsTransition()) {
- Map* next = reinterpret_cast<Map*>(contents->get(i));
+ *RawField(current, Map::kInstanceDescriptorsOrBitField3Offset));
+ if (!d->IsEmpty()) {
+ FixedArray* contents = reinterpret_cast<FixedArray*>(
+ d->get(DescriptorArray::kContentArrayIndex));
+ map_or_index_field = RawField(contents, HeapObject::kMapOffset);
+ Object* map_or_index = *map_or_index_field;
+ bool map_done = true; // Controls a nested continue statement.
+ for (int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : 0;
+ i < contents->length();
+ i += 2) {
+ PropertyDetails details(Smi::cast(contents->get(i + 1)));
+ if (details.IsTransition()) {
+ // Found a map in the transition array. We record our progress in
+ // the transition array by recording the current map in the map field
+ // of the next map and recording the index in the transition array in
+ // the map field of the array.
+ Map* next = Map::cast(contents->get(i));
+ next->set_map(current);
+ *map_or_index_field = Smi::FromInt(i + 2);
+ current = next;
+ map_done = false;
+ break;
+ }
+ }
+ if (!map_done) continue;
+ }
+ // That was the regular transitions, now for the prototype transitions.
+ FixedArray* prototype_transitions =
+ current->unchecked_prototype_transitions();
+ Object** proto_map_or_index_field =
+ RawField(prototype_transitions, HeapObject::kMapOffset);
+ Object* map_or_index = *proto_map_or_index_field;
+ const int start = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
+ int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : start;
+ if (i < prototype_transitions->length()) {
+ // Found a map in the prototype transition array. Record progress in
+ // an analogous way to the regular transitions array above.
+ Object* perhaps_map = prototype_transitions->get(i);
+ if (perhaps_map->IsMap()) {
+ Map* next = Map::cast(perhaps_map);
next->set_map(current);
- *map_or_index_field = Smi::FromInt(i + 2);
+ *proto_map_or_index_field =
+ Smi::FromInt(i + kProtoTransitionElementsPerEntry);
current = next;
- map_done = false;
- break;
+ continue;
}
}
- if (!map_done) continue;
- *map_or_index_field = Heap::fixed_array_map();
+ *proto_map_or_index_field = heap()->fixed_array_map();
+ if (map_or_index_field != NULL) {
+ *map_or_index_field = heap()->fixed_array_map();
+ }
+
+ // The callback expects a map to have a real map as its map, so we save
+ // the map field, which is being used to track the traversal and put the
+ // correct map (the meta_map) in place while we do the callback.
Map* prev = current->map();
- current->set_map(Heap::meta_map());
+ current->set_map(meta_map);
callback(current, data);
current = prev;
}
@@ -3531,8 +4134,6 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
MaybeObject* CodeCache::Update(String* name, Code* code) {
- ASSERT(code->ic_state() == MONOMORPHIC);
-
// The number of monomorphic stubs for normal load/store/call IC's can grow to
// a large number and therefore they need to go into a hash table. They are
// used to load global properties from cells.
@@ -3650,7 +4251,7 @@ Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) {
}
}
}
- return Heap::undefined_value();
+ return GetHeap()->undefined_value();
}
@@ -3659,7 +4260,7 @@ Object* CodeCache::LookupNormalTypeCache(String* name, Code::Flags flags) {
CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
return cache->Lookup(name, flags);
} else {
- return Heap::undefined_value();
+ return GetHeap()->undefined_value();
}
}
@@ -3741,7 +4342,7 @@ class CodeCacheHashTableKey : public HashTableKey {
MUST_USE_RESULT MaybeObject* AsObject() {
ASSERT(code_ != NULL);
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(2);
+ { MaybeObject* maybe_obj = code_->heap()->AllocateFixedArray(2);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* pair = FixedArray::cast(obj);
@@ -3753,6 +4354,7 @@ class CodeCacheHashTableKey : public HashTableKey {
private:
String* name_;
Code::Flags flags_;
+ // TODO(jkummerow): We should be able to get by without this.
Code* code_;
};
@@ -3760,7 +4362,7 @@ class CodeCacheHashTableKey : public HashTableKey {
Object* CodeCacheHashTable::Lookup(String* name, Code::Flags flags) {
CodeCacheHashTableKey key(name, flags);
int entry = FindEntry(&key);
- if (entry == kNotFound) return Heap::undefined_value();
+ if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
}
@@ -3772,7 +4374,7 @@ MaybeObject* CodeCacheHashTable::Put(String* name, Code* code) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- // Don't use this, as the table might have grown.
+ // Don't use |this|, as the table might have grown.
CodeCacheHashTable* cache = reinterpret_cast<CodeCacheHashTable*>(obj);
int entry = cache->FindInsertionEntry(key.Hash());
@@ -3797,8 +4399,9 @@ int CodeCacheHashTable::GetIndex(String* name, Code::Flags flags) {
void CodeCacheHashTable::RemoveByIndex(int index) {
ASSERT(index >= 0);
- set(EntryToIndex(index), Heap::null_value());
- set(EntryToIndex(index) + 1, Heap::null_value());
+ Heap* heap = GetHeap();
+ set(EntryToIndex(index), heap->null_value());
+ set(EntryToIndex(index) + 1, heap->null_value());
ElementRemoved();
}
@@ -3817,8 +4420,166 @@ static bool HasKey(FixedArray* array, Object* key) {
}
+MaybeObject* PolymorphicCodeCache::Update(MapList* maps,
+ Code::Flags flags,
+ Code* code) {
+ // Initialize cache if necessary.
+ if (cache()->IsUndefined()) {
+ Object* result;
+ { MaybeObject* maybe_result =
+ PolymorphicCodeCacheHashTable::Allocate(
+ PolymorphicCodeCacheHashTable::kInitialSize);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ set_cache(result);
+ } else {
+ // This entry shouldn't be contained in the cache yet.
+ ASSERT(PolymorphicCodeCacheHashTable::cast(cache())
+ ->Lookup(maps, flags)->IsUndefined());
+ }
+ PolymorphicCodeCacheHashTable* hash_table =
+ PolymorphicCodeCacheHashTable::cast(cache());
+ Object* new_cache;
+ { MaybeObject* maybe_new_cache = hash_table->Put(maps, flags, code);
+ if (!maybe_new_cache->ToObject(&new_cache)) return maybe_new_cache;
+ }
+ set_cache(new_cache);
+ return this;
+}
+
+
+Object* PolymorphicCodeCache::Lookup(MapList* maps, Code::Flags flags) {
+ if (!cache()->IsUndefined()) {
+ PolymorphicCodeCacheHashTable* hash_table =
+ PolymorphicCodeCacheHashTable::cast(cache());
+ return hash_table->Lookup(maps, flags);
+ } else {
+ return GetHeap()->undefined_value();
+ }
+}
+
+
+// Despite their name, object of this class are not stored in the actual
+// hash table; instead they're temporarily used for lookups. It is therefore
+// safe to have a weak (non-owning) pointer to a MapList as a member field.
+class PolymorphicCodeCacheHashTableKey : public HashTableKey {
+ public:
+ // Callers must ensure that |maps| outlives the newly constructed object.
+ PolymorphicCodeCacheHashTableKey(MapList* maps, int code_flags)
+ : maps_(maps),
+ code_flags_(code_flags) {}
+
+ bool IsMatch(Object* other) {
+ MapList other_maps(kDefaultListAllocationSize);
+ int other_flags;
+ FromObject(other, &other_flags, &other_maps);
+ if (code_flags_ != other_flags) return false;
+ if (maps_->length() != other_maps.length()) return false;
+ // Compare just the hashes first because it's faster.
+ int this_hash = MapsHashHelper(maps_, code_flags_);
+ int other_hash = MapsHashHelper(&other_maps, other_flags);
+ if (this_hash != other_hash) return false;
+
+ // Full comparison: for each map in maps_, look for an equivalent map in
+ // other_maps. This implementation is slow, but probably good enough for
+ // now because the lists are short (<= 4 elements currently).
+ for (int i = 0; i < maps_->length(); ++i) {
+ bool match_found = false;
+ for (int j = 0; j < other_maps.length(); ++j) {
+ if (maps_->at(i)->EquivalentTo(other_maps.at(j))) {
+ match_found = true;
+ break;
+ }
+ }
+ if (!match_found) return false;
+ }
+ return true;
+ }
+
+ static uint32_t MapsHashHelper(MapList* maps, int code_flags) {
+ uint32_t hash = code_flags;
+ for (int i = 0; i < maps->length(); ++i) {
+ hash ^= maps->at(i)->Hash();
+ }
+ return hash;
+ }
+
+ uint32_t Hash() {
+ return MapsHashHelper(maps_, code_flags_);
+ }
+
+ uint32_t HashForObject(Object* obj) {
+ MapList other_maps(kDefaultListAllocationSize);
+ int other_flags;
+ FromObject(obj, &other_flags, &other_maps);
+ return MapsHashHelper(&other_maps, other_flags);
+ }
+
+ MUST_USE_RESULT MaybeObject* AsObject() {
+ Object* obj;
+ // The maps in |maps_| must be copied to a newly allocated FixedArray,
+ // both because the referenced MapList is short-lived, and because C++
+ // objects can't be stored in the heap anyway.
+ { MaybeObject* maybe_obj =
+ HEAP->AllocateUninitializedFixedArray(maps_->length() + 1);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ FixedArray* list = FixedArray::cast(obj);
+ list->set(0, Smi::FromInt(code_flags_));
+ for (int i = 0; i < maps_->length(); ++i) {
+ list->set(i + 1, maps_->at(i));
+ }
+ return list;
+ }
+
+ private:
+ static MapList* FromObject(Object* obj, int* code_flags, MapList* maps) {
+ FixedArray* list = FixedArray::cast(obj);
+ maps->Rewind(0);
+ *code_flags = Smi::cast(list->get(0))->value();
+ for (int i = 1; i < list->length(); ++i) {
+ maps->Add(Map::cast(list->get(i)));
+ }
+ return maps;
+ }
+
+ MapList* maps_; // weak.
+ int code_flags_;
+ static const int kDefaultListAllocationSize = kMaxKeyedPolymorphism + 1;
+};
+
+
+Object* PolymorphicCodeCacheHashTable::Lookup(MapList* maps, int code_flags) {
+ PolymorphicCodeCacheHashTableKey key(maps, code_flags);
+ int entry = FindEntry(&key);
+ if (entry == kNotFound) return GetHeap()->undefined_value();
+ return get(EntryToIndex(entry) + 1);
+}
+
+
+MaybeObject* PolymorphicCodeCacheHashTable::Put(MapList* maps,
+ int code_flags,
+ Code* code) {
+ PolymorphicCodeCacheHashTableKey key(maps, code_flags);
+ Object* obj;
+ { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ PolymorphicCodeCacheHashTable* cache =
+ reinterpret_cast<PolymorphicCodeCacheHashTable*>(obj);
+ int entry = cache->FindInsertionEntry(key.Hash());
+ { MaybeObject* maybe_obj = key.AsObject();
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ cache->set(EntryToIndex(entry), obj);
+ cache->set(EntryToIndex(entry) + 1, code);
+ cache->ElementAdded();
+ return cache;
+}
+
+
MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
- ASSERT(!array->HasPixelElements() && !array->HasExternalArrayElements());
+ ASSERT(!array->HasExternalArrayElements());
switch (array->GetElementsKind()) {
case JSObject::FAST_ELEMENTS:
return UnionOfKeys(FixedArray::cast(array->elements()));
@@ -3828,7 +4589,7 @@ MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
// Allocate a temporary fixed array.
Object* object;
- { MaybeObject* maybe_object = Heap::AllocateFixedArray(size);
+ { MaybeObject* maybe_object = GetHeap()->AllocateFixedArray(size);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
FixedArray* key_array = FixedArray::cast(object);
@@ -3844,11 +4605,23 @@ MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
// Compute the union of this and the temporary fixed array.
return UnionOfKeys(key_array);
}
- default:
- UNREACHABLE();
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ break;
}
UNREACHABLE();
- return Heap::null_value(); // Failure case needs to "return" a value.
+ return GetHeap()->null_value(); // Failure case needs to "return" a value.
}
@@ -3878,7 +4651,7 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
// Allocate the result
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(len0 + extra);
+ { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(len0 + extra);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
// Fill in the content
@@ -3907,9 +4680,10 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
MaybeObject* FixedArray::CopySize(int new_length) {
- if (new_length == 0) return Heap::empty_fixed_array();
+ Heap* heap = GetHeap();
+ if (new_length == 0) return heap->empty_fixed_array();
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(new_length);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* result = FixedArray::cast(obj);
@@ -3947,13 +4721,14 @@ bool FixedArray::IsEqualTo(FixedArray* other) {
MaybeObject* DescriptorArray::Allocate(int number_of_descriptors) {
+ Heap* heap = Isolate::Current()->heap();
if (number_of_descriptors == 0) {
- return Heap::empty_descriptor_array();
+ return heap->empty_descriptor_array();
}
// Allocate the array of keys.
Object* array;
{ MaybeObject* maybe_array =
- Heap::AllocateFixedArray(ToKeyIndex(number_of_descriptors));
+ heap->AllocateFixedArray(ToKeyIndex(number_of_descriptors));
if (!maybe_array->ToObject(&array)) return maybe_array;
}
// Do not use DescriptorArray::cast on incomplete object.
@@ -3961,9 +4736,10 @@ MaybeObject* DescriptorArray::Allocate(int number_of_descriptors) {
// Allocate the content array and set it in the descriptor array.
{ MaybeObject* maybe_array =
- Heap::AllocateFixedArray(number_of_descriptors << 1);
+ heap->AllocateFixedArray(number_of_descriptors << 1);
if (!maybe_array->ToObject(&array)) return maybe_array;
}
+ result->set(kBitField3StorageIndex, Smi::FromInt(0));
result->set(kContentArrayIndex, array);
result->set(kEnumerationIndexIndex,
Smi::FromInt(PropertyDetails::kInitialIndex));
@@ -4230,15 +5006,15 @@ int DescriptorArray::LinearSearch(String* name, int len) {
MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count,
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
- return Heap::AllocateFixedArray(LengthFor(deopt_entry_count),
+ return HEAP->AllocateFixedArray(LengthFor(deopt_entry_count),
pretenure);
}
MaybeObject* DeoptimizationOutputData::Allocate(int number_of_deopt_points,
PretenureFlag pretenure) {
- if (number_of_deopt_points == 0) return Heap::empty_fixed_array();
- return Heap::AllocateFixedArray(LengthOfFixedArray(number_of_deopt_points),
+ if (number_of_deopt_points == 0) return HEAP->empty_fixed_array();
+ return HEAP->AllocateFixedArray(LengthOfFixedArray(number_of_deopt_points),
pretenure);
}
@@ -4256,11 +5032,8 @@ bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
#endif
-static StaticResource<StringInputBuffer> string_input_buffer;
-
-
bool String::LooksValid() {
- if (!Heap::Contains(this)) return false;
+ if (!Isolate::Current()->heap()->Contains(this)) return false;
return true;
}
@@ -4271,8 +5044,10 @@ int String::Utf8Length() {
// doesn't make Utf8Length faster, but it is very likely that
// the string will be accessed later (for example by WriteUtf8)
// so it's still a good idea.
+ Heap* heap = GetHeap();
TryFlatten();
- Access<StringInputBuffer> buffer(&string_input_buffer);
+ Access<StringInputBuffer> buffer(
+ heap->isolate()->objects_string_input_buffer());
buffer->Reset(0, this);
int result = 0;
while (buffer->has_more())
@@ -4338,16 +5113,17 @@ SmartPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
int offset,
int length,
int* length_return) {
- ASSERT(NativeAllocationChecker::allocation_allowed());
if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
return SmartPointer<char>(NULL);
}
+ Heap* heap = GetHeap();
// Negative length means the to the end of the string.
if (length < 0) length = kMaxInt - offset;
// Compute the size of the UTF-8 string. Start at the specified offset.
- Access<StringInputBuffer> buffer(&string_input_buffer);
+ Access<StringInputBuffer> buffer(
+ heap->isolate()->objects_string_input_buffer());
buffer->Reset(offset, this);
int character_position = offset;
int utf8_bytes = 0;
@@ -4416,13 +5192,13 @@ const uc16* String::GetTwoByteData(unsigned start) {
SmartPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) {
- ASSERT(NativeAllocationChecker::allocation_allowed());
-
if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
return SmartPointer<uc16>();
}
+ Heap* heap = GetHeap();
- Access<StringInputBuffer> buffer(&string_input_buffer);
+ Access<StringInputBuffer> buffer(
+ heap->isolate()->objects_string_input_buffer());
buffer->Reset(this);
uc16* result = NewArray<uc16>(length() + 1);
@@ -4705,11 +5481,9 @@ const unibrow::byte* String::ReadBlock(String* input,
}
-Relocatable* Relocatable::top_ = NULL;
-
-
void Relocatable::PostGarbageCollectionProcessing() {
- Relocatable* current = top_;
+ Isolate* isolate = Isolate::Current();
+ Relocatable* current = isolate->relocatable_top();
while (current != NULL) {
current->PostGarbageCollection();
current = current->prev_;
@@ -4719,21 +5493,21 @@ void Relocatable::PostGarbageCollectionProcessing() {
// Reserve space for statics needing saving and restoring.
int Relocatable::ArchiveSpacePerThread() {
- return sizeof(top_);
+ return sizeof(Isolate::Current()->relocatable_top());
}
// Archive statics that are thread local.
-char* Relocatable::ArchiveState(char* to) {
- *reinterpret_cast<Relocatable**>(to) = top_;
- top_ = NULL;
+char* Relocatable::ArchiveState(Isolate* isolate, char* to) {
+ *reinterpret_cast<Relocatable**>(to) = isolate->relocatable_top();
+ isolate->set_relocatable_top(NULL);
return to + ArchiveSpacePerThread();
}
// Restore statics that are thread local.
-char* Relocatable::RestoreState(char* from) {
- top_ = *reinterpret_cast<Relocatable**>(from);
+char* Relocatable::RestoreState(Isolate* isolate, char* from) {
+ isolate->set_relocatable_top(*reinterpret_cast<Relocatable**>(from));
return from + ArchiveSpacePerThread();
}
@@ -4746,7 +5520,8 @@ char* Relocatable::Iterate(ObjectVisitor* v, char* thread_storage) {
void Relocatable::Iterate(ObjectVisitor* v) {
- Iterate(v, top_);
+ Isolate* isolate = Isolate::Current();
+ Iterate(v, isolate->relocatable_top());
}
@@ -4759,15 +5534,17 @@ void Relocatable::Iterate(ObjectVisitor* v, Relocatable* top) {
}
-FlatStringReader::FlatStringReader(Handle<String> str)
- : str_(str.location()),
+FlatStringReader::FlatStringReader(Isolate* isolate, Handle<String> str)
+ : Relocatable(isolate),
+ str_(str.location()),
length_(str->length()) {
PostGarbageCollection();
}
-FlatStringReader::FlatStringReader(Vector<const char> input)
- : str_(0),
+FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
+ : Relocatable(isolate),
+ str_(0),
is_ascii_(true),
length_(input.length()),
start_(input.start()) { }
@@ -5097,11 +5874,10 @@ static inline bool CompareRawStringContents(Vector<Char> a, Vector<Char> b) {
}
-static StringInputBuffer string_compare_buffer_b;
-
-
template <typename IteratorA>
-static inline bool CompareStringContentsPartial(IteratorA* ia, String* b) {
+static inline bool CompareStringContentsPartial(Isolate* isolate,
+ IteratorA* ia,
+ String* b) {
if (b->IsFlat()) {
if (b->IsAsciiRepresentation()) {
VectorIterator<char> ib(b->ToAsciiVector());
@@ -5111,15 +5887,13 @@ static inline bool CompareStringContentsPartial(IteratorA* ia, String* b) {
return CompareStringContents(ia, &ib);
}
} else {
- string_compare_buffer_b.Reset(0, b);
- return CompareStringContents(ia, &string_compare_buffer_b);
+ isolate->objects_string_compare_buffer_b()->Reset(0, b);
+ return CompareStringContents(ia,
+ isolate->objects_string_compare_buffer_b());
}
}
-static StringInputBuffer string_compare_buffer_a;
-
-
bool String::SlowEquals(String* other) {
// Fast check: negative check with lengths.
int len = length();
@@ -5147,6 +5921,7 @@ bool String::SlowEquals(String* other) {
Vector<const char>(str2, len));
}
+ Isolate* isolate = GetIsolate();
if (lhs->IsFlat()) {
if (lhs->IsAsciiRepresentation()) {
Vector<const char> vec1 = lhs->ToAsciiVector();
@@ -5161,8 +5936,9 @@ bool String::SlowEquals(String* other) {
}
} else {
VectorIterator<char> buf1(vec1);
- string_compare_buffer_b.Reset(0, rhs);
- return CompareStringContents(&buf1, &string_compare_buffer_b);
+ isolate->objects_string_compare_buffer_b()->Reset(0, rhs);
+ return CompareStringContents(&buf1,
+ isolate->objects_string_compare_buffer_b());
}
} else {
Vector<const uc16> vec1 = lhs->ToUC16Vector();
@@ -5177,13 +5953,15 @@ bool String::SlowEquals(String* other) {
}
} else {
VectorIterator<uc16> buf1(vec1);
- string_compare_buffer_b.Reset(0, rhs);
- return CompareStringContents(&buf1, &string_compare_buffer_b);
+ isolate->objects_string_compare_buffer_b()->Reset(0, rhs);
+ return CompareStringContents(&buf1,
+ isolate->objects_string_compare_buffer_b());
}
}
} else {
- string_compare_buffer_a.Reset(0, lhs);
- return CompareStringContentsPartial(&string_compare_buffer_a, rhs);
+ isolate->objects_string_compare_buffer_a()->Reset(0, lhs);
+ return CompareStringContentsPartial(isolate,
+ isolate->objects_string_compare_buffer_a(), rhs);
}
}
@@ -5192,11 +5970,12 @@ bool String::MarkAsUndetectable() {
if (StringShape(this).IsSymbol()) return false;
Map* map = this->map();
- if (map == Heap::string_map()) {
- this->set_map(Heap::undetectable_string_map());
+ Heap* heap = map->heap();
+ if (map == heap->string_map()) {
+ this->set_map(heap->undetectable_string_map());
return true;
- } else if (map == Heap::ascii_string_map()) {
- this->set_map(Heap::undetectable_ascii_string_map());
+ } else if (map == heap->ascii_string_map()) {
+ this->set_map(heap->undetectable_ascii_string_map());
return true;
}
// Rest cannot be marked as undetectable
@@ -5205,9 +5984,10 @@ bool String::MarkAsUndetectable() {
bool String::IsEqualTo(Vector<const char> str) {
+ Isolate* isolate = GetIsolate();
int slen = length();
- Access<ScannerConstants::Utf8Decoder>
- decoder(ScannerConstants::utf8_decoder());
+ Access<UnicodeCache::Utf8Decoder>
+ decoder(isolate->unicode_cache()->utf8_decoder());
decoder->Reset(str.start(), str.length());
int i;
for (i = 0; i < slen && decoder->has_more(); i++) {
@@ -5221,6 +6001,9 @@ bool String::IsEqualTo(Vector<const char> str) {
bool String::IsAsciiEqualTo(Vector<const char> str) {
int slen = length();
if (str.length() != slen) return false;
+ if (IsFlat() && IsAsciiRepresentation()) {
+ return CompareChars(ToAsciiVector().start(), str.start(), slen) == 0;
+ }
for (int i = 0; i < slen; i++) {
if (Get(i) != static_cast<uint16_t>(str[i])) return false;
}
@@ -5231,6 +6014,9 @@ bool String::IsAsciiEqualTo(Vector<const char> str) {
bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
int slen = length();
if (str.length() != slen) return false;
+ if (IsFlat() && IsTwoByteRepresentation()) {
+ return CompareChars(ToUC16Vector().start(), str.start(), slen) == 0;
+ }
for (int i = 0; i < slen; i++) {
if (Get(i) != str[i]) return false;
}
@@ -5238,22 +6024,6 @@ bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
}
-template <typename schar>
-static inline uint32_t HashSequentialString(const schar* chars, int length) {
- StringHasher hasher(length);
- if (!hasher.has_trivial_hash()) {
- int i;
- for (i = 0; hasher.is_array_index() && (i < length); i++) {
- hasher.AddCharacter(chars[i]);
- }
- for (; i < length; i++) {
- hasher.AddCharacterNoIndex(chars[i]);
- }
- }
- return hasher.GetHashField();
-}
-
-
uint32_t String::ComputeAndSetHash() {
// Should only be called if hash code has not yet been computed.
ASSERT(!HasHashCode());
@@ -5385,8 +6155,9 @@ uint32_t String::ComputeHashField(unibrow::CharacterStream* buffer,
MaybeObject* String::SubString(int start, int end, PretenureFlag pretenure) {
+ Heap* heap = GetHeap();
if (start == 0 && end == length()) return this;
- MaybeObject* result = Heap::AllocateSubString(this, start, end, pretenure);
+ MaybeObject* result = heap->AllocateSubString(this, start, end, pretenure);
return result;
}
@@ -5403,6 +6174,7 @@ void Map::CreateBackPointers() {
DescriptorArray* descriptors = instance_descriptors();
for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
if (descriptors->GetType(i) == MAP_TRANSITION ||
+ descriptors->GetType(i) == EXTERNAL_ARRAY_TRANSITION ||
descriptors->GetType(i) == CONSTANT_TRANSITION) {
// Get target.
Map* target = Map::cast(descriptors->GetValue(i));
@@ -5426,12 +6198,12 @@ void Map::CreateBackPointers() {
}
-void Map::ClearNonLiveTransitions(Object* real_prototype) {
+void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
// Live DescriptorArray objects will be marked, so we must use
// low-level accessors to get and modify their data.
DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
- *RawField(this, Map::kInstanceDescriptorsOffset));
- if (d == Heap::raw_unchecked_empty_descriptor_array()) return;
+ *RawField(this, Map::kInstanceDescriptorsOrBitField3Offset));
+ if (d->IsEmpty()) return;
Smi* NullDescriptorDetails =
PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
FixedArray* contents = reinterpret_cast<FixedArray*>(
@@ -5445,13 +6217,14 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) {
// non-live object.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
if (details.type() == MAP_TRANSITION ||
+ details.type() == EXTERNAL_ARRAY_TRANSITION ||
details.type() == CONSTANT_TRANSITION) {
Map* target = reinterpret_cast<Map*>(contents->get(i));
ASSERT(target->IsHeapObject());
if (!target->IsMarked()) {
ASSERT(target->IsMap());
contents->set_unchecked(i + 1, NullDescriptorDetails);
- contents->set_null_unchecked(i);
+ contents->set_null_unchecked(heap, i);
ASSERT(target->prototype() == this ||
target->prototype() == real_prototype);
// Getter prototype() is read-only, set_prototype() has side effects.
@@ -5462,6 +6235,40 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) {
}
+int Map::Hash() {
+ // For performance reasons we only hash the 3 most variable fields of a map:
+ // constructor, prototype and bit_field2.
+
+ // Shift away the tag.
+ int hash = (static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(constructor())) >> 2);
+
+ // XOR-ing the prototype and constructor directly yields too many zero bits
+ // when the two pointers are close (which is fairly common).
+ // To avoid this we shift the prototype 4 bits relatively to the constructor.
+ hash ^= (static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(prototype())) << 2);
+
+ return hash ^ (hash >> 16) ^ bit_field2();
+}
+
+
+bool Map::EquivalentToForNormalization(Map* other,
+ PropertyNormalizationMode mode) {
+ return
+ constructor() == other->constructor() &&
+ prototype() == other->prototype() &&
+ inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
+ 0 :
+ other->inobject_properties()) &&
+ instance_type() == other->instance_type() &&
+ bit_field() == other->bit_field() &&
+ bit_field2() == other->bit_field2() &&
+ (bit_field3() & ~(1<<Map::kIsShared)) ==
+ (other->bit_field3() & ~(1<<Map::kIsShared));
+}
+
+
void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
// Iterate over all fields in the body but take care in dealing with
// the code entry.
@@ -5475,7 +6282,8 @@ void JSFunction::MarkForLazyRecompilation() {
ASSERT(is_compiled() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
- ReplaceCode(Builtins::builtin(Builtins::LazyRecompile));
+ Builtins* builtins = GetIsolate()->builtins();
+ ReplaceCode(builtins->builtin(Builtins::kLazyRecompile));
}
@@ -5508,7 +6316,7 @@ bool JSFunction::IsInlineable() {
Object* JSFunction::SetInstancePrototype(Object* value) {
ASSERT(value->IsJSObject());
-
+ Heap* heap = GetHeap();
if (has_initial_map()) {
initial_map()->set_prototype(value);
} else {
@@ -5517,7 +6325,7 @@ Object* JSFunction::SetInstancePrototype(Object* value) {
// prototype is put into the initial map where it belongs.
set_prototype_or_initial_map(value);
}
- Heap::ClearInstanceofCache();
+ heap->ClearInstanceofCache();
return value;
}
@@ -5534,15 +6342,18 @@ MaybeObject* JSFunction::SetPrototype(Object* value) {
// Copy the map so this does not affect unrelated functions.
// Remove map transitions because they point to maps with a
// different prototype.
- Object* new_map;
+ Object* new_object;
{ MaybeObject* maybe_new_map = map()->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
+ if (!maybe_new_map->ToObject(&new_object)) return maybe_new_map;
}
- set_map(Map::cast(new_map));
- map()->set_constructor(value);
- map()->set_non_instance_prototype(true);
+ Map* new_map = Map::cast(new_object);
+ Heap* heap = new_map->heap();
+ set_map(new_map);
+ new_map->set_constructor(value);
+ new_map->set_non_instance_prototype(true);
construct_prototype =
- Top::context()->global_context()->initial_object_prototype();
+ heap->isolate()->context()->global_context()->
+ initial_object_prototype();
} else {
map()->set_non_instance_prototype(false);
}
@@ -5552,9 +6363,22 @@ MaybeObject* JSFunction::SetPrototype(Object* value) {
Object* JSFunction::RemovePrototype() {
- ASSERT(map() == context()->global_context()->function_map());
- set_map(context()->global_context()->function_without_prototype_map());
- set_prototype_or_initial_map(Heap::the_hole_value());
+ Context* global_context = context()->global_context();
+ Map* no_prototype_map = shared()->strict_mode()
+ ? global_context->strict_mode_function_without_prototype_map()
+ : global_context->function_without_prototype_map();
+
+ if (map() == no_prototype_map) {
+ // Be idempotent.
+ return this;
+ }
+
+ ASSERT(!shared()->strict_mode() ||
+ map() == global_context->strict_mode_function_map());
+ ASSERT(shared()->strict_mode() || map() == global_context->function_map());
+
+ set_map(no_prototype_map);
+ set_prototype_or_initial_map(no_prototype_map->heap()->the_hole_value());
return this;
}
@@ -5576,13 +6400,17 @@ Context* JSFunction::GlobalContextFromLiterals(FixedArray* literals) {
}
-MaybeObject* Oddball::Initialize(const char* to_string, Object* to_number) {
+MaybeObject* Oddball::Initialize(const char* to_string,
+ Object* to_number,
+ byte kind) {
Object* symbol;
- { MaybeObject* maybe_symbol = Heap::LookupAsciiSymbol(to_string);
+ { MaybeObject* maybe_symbol =
+ Isolate::Current()->heap()->LookupAsciiSymbol(to_string);
if (!maybe_symbol->ToObject(&symbol)) return maybe_symbol;
}
set_to_string(String::cast(symbol));
set_to_number(to_number);
+ set_kind(kind);
return this;
}
@@ -5601,10 +6429,11 @@ bool SharedFunctionInfo::HasSourceCode() {
Object* SharedFunctionInfo::GetSourceCode() {
- if (!HasSourceCode()) return Heap::undefined_value();
- HandleScope scope;
+ Isolate* isolate = GetIsolate();
+ if (!HasSourceCode()) return isolate->heap()->undefined_value();
+ HandleScope scope(isolate);
Object* source = Script::cast(script())->source();
- return *SubString(Handle<String>(String::cast(source)),
+ return *SubString(Handle<String>(String::cast(source), isolate),
start_position(), end_position());
}
@@ -5644,10 +6473,12 @@ bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
return true;
}
+ Heap* heap = GetHeap();
+
// Traverse the proposed prototype chain looking for setters for properties of
// the same names as are set by the inline constructor.
for (Object* obj = prototype;
- obj != Heap::null_value();
+ obj != heap->null_value();
obj = obj->GetPrototype()) {
JSObject* js_object = JSObject::cast(obj);
for (int i = 0; i < this_property_assignments_count(); i++) {
@@ -5683,10 +6514,11 @@ void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
+ Heap* heap = GetHeap();
set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlySimpleThisPropertyAssignments,
false));
- set_this_property_assignments(Heap::undefined_value());
+ set_this_property_assignments(heap->undefined_value());
set_this_property_assignments_count(0);
}
@@ -5799,6 +6631,29 @@ void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
}
+void SharedFunctionInfo::DisableOptimization(JSFunction* function) {
+ // Disable optimization for the shared function info and mark the
+ // code as non-optimizable. The marker on the shared function info
+ // is there because we flush non-optimized code thereby loosing the
+ // non-optimizable information for the code. When the code is
+ // regenerated and set on the shared function info it is marked as
+ // non-optimizable if optimization is disabled for the shared
+ // function info.
+ set_optimization_disabled(true);
+ // Code should be the lazy compilation stub or else unoptimized. If the
+ // latter, disable optimization for the code too.
+ ASSERT(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
+ if (code()->kind() == Code::FUNCTION) {
+ code()->set_optimizable(false);
+ }
+ if (FLAG_trace_opt) {
+ PrintF("[disabled optimization for: ");
+ function->PrintName();
+ PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ }
+}
+
+
bool SharedFunctionInfo::VerifyBailoutId(int id) {
// TODO(srdjan): debugging ARM crashes in hydrogen. OK to disable while
// we are always bailing out on ARM.
@@ -5831,9 +6686,10 @@ void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
set_construction_count(kGenerousAllocationCount);
}
set_initial_map(map);
- ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
+ Builtins* builtins = map->heap()->isolate()->builtins();
+ ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
construct_stub());
- set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
+ set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
}
@@ -5850,10 +6706,11 @@ void SharedFunctionInfo::DetachInitialMap() {
// then StartInobjectTracking will be called again the next time the
// constructor is called. The countdown will continue and (possibly after
// several more GCs) CompleteInobjectSlackTracking will eventually be called.
- set_initial_map(Heap::raw_unchecked_undefined_value());
- ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
+ set_initial_map(map->heap()->raw_unchecked_undefined_value());
+ Builtins* builtins = map->heap()->isolate()->builtins();
+ ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
*RawField(this, kConstructStubOffset));
- set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
+ set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
// It is safe to clear the flag: it will be set again if the map is live.
set_live_objects_may_exist(false);
}
@@ -5866,9 +6723,10 @@ void SharedFunctionInfo::AttachInitialMap(Map* map) {
// Resume inobject slack tracking.
set_initial_map(map);
- ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
+ Builtins* builtins = map->heap()->isolate()->builtins();
+ ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
*RawField(this, kConstructStubOffset));
- set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
+ set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
// The map survived the gc, so there may be objects referencing it.
set_live_objects_may_exist(true);
}
@@ -5897,16 +6755,19 @@ void SharedFunctionInfo::CompleteInobjectSlackTracking() {
ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
Map* map = Map::cast(initial_map());
- set_initial_map(Heap::undefined_value());
- ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
+ Heap* heap = map->heap();
+ set_initial_map(heap->undefined_value());
+ Builtins* builtins = heap->isolate()->builtins();
+ ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
construct_stub());
- set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
+ set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
int slack = map->unused_property_fields();
map->TraverseTransitionTree(&GetMinInobjectSlack, &slack);
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
map->TraverseTransitionTree(&ShrinkInstanceSize, &slack);
+
// Give the correct expected_nof_properties to initial maps created later.
ASSERT(expected_nof_properties() >= slack);
set_expected_nof_properties(expected_nof_properties() - slack);
@@ -5957,8 +6818,7 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
void Code::InvalidateRelocation() {
- HandleScope scope;
- set_relocation_info(Heap::empty_byte_array());
+ set_relocation_info(heap()->empty_byte_array());
}
@@ -6090,9 +6950,7 @@ Map* Code::FindFirstMap() {
}
-#ifdef ENABLE_DISASSEMBLER
-
-#ifdef OBJECT_PRINT
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
disasm::NameConverter converter;
@@ -6240,8 +7098,10 @@ void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
}
}
-#endif
+#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+
+#ifdef ENABLE_DISASSEMBLER
// Identify kind of code.
const char* Code::Kind2String(Kind kind) {
@@ -6256,8 +7116,8 @@ const char* Code::Kind2String(Kind kind) {
case KEYED_STORE_IC: return "KEYED_STORE_IC";
case CALL_IC: return "CALL_IC";
case KEYED_CALL_IC: return "KEYED_CALL_IC";
+ case UNARY_OP_IC: return "UNARY_OP_IC";
case BINARY_OP_IC: return "BINARY_OP_IC";
- case TYPE_RECORDING_BINARY_OP_IC: return "TYPE_RECORDING_BINARY_OP_IC";
case COMPARE_IC: return "COMPARE_IC";
}
UNREACHABLE();
@@ -6286,8 +7146,10 @@ const char* Code::PropertyType2String(PropertyType type) {
case FIELD: return "FIELD";
case CONSTANT_FUNCTION: return "CONSTANT_FUNCTION";
case CALLBACKS: return "CALLBACKS";
+ case HANDLER: return "HANDLER";
case INTERCEPTOR: return "INTERCEPTOR";
case MAP_TRANSITION: return "MAP_TRANSITION";
+ case EXTERNAL_ARRAY_TRANSITION: return "EXTERNAL_ARRAY_TRANSITION";
case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
}
@@ -6316,7 +7178,7 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
if (name != NULL) {
PrintF(out, "extra_ic_state = %s\n", name);
} else {
- PrintF(out, "etra_ic_state = %d\n", extra);
+ PrintF(out, "extra_ic_state = %d\n", extra);
}
}
@@ -6342,7 +7204,6 @@ void Code::Disassemble(const char* name, FILE* out) {
Disassembler::Decode(out, this);
PrintF(out, "\n");
-#ifdef DEBUG
if (kind() == FUNCTION) {
DeoptimizationOutputData* data =
DeoptimizationOutputData::cast(this->deoptimization_data());
@@ -6353,7 +7214,6 @@ void Code::Disassemble(const char* name, FILE* out) {
data->DeoptimizationInputDataPrint(out);
}
PrintF("\n");
-#endif
if (kind() == OPTIMIZED_FUNCTION) {
SafepointTable table(this);
@@ -6400,43 +7260,158 @@ void Code::Disassemble(const char* name, FILE* out) {
#endif // ENABLE_DISASSEMBLER
+static void CopyFastElementsToFast(FixedArray* source,
+ FixedArray* destination,
+ WriteBarrierMode mode) {
+ uint32_t count = static_cast<uint32_t>(source->length());
+ for (uint32_t i = 0; i < count; ++i) {
+ destination->set(i, source->get(i), mode);
+ }
+}
+
+
+static void CopySlowElementsToFast(NumberDictionary* source,
+ FixedArray* destination,
+ WriteBarrierMode mode) {
+ for (int i = 0; i < source->Capacity(); ++i) {
+ Object* key = source->KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t entry = static_cast<uint32_t>(key->Number());
+ destination->set(entry, source->ValueAt(i), mode);
+ }
+ }
+}
+
+
MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
int length) {
+ Heap* heap = GetHeap();
+ // We should never end in here with a pixel or external array.
+ ASSERT(!HasExternalArrayElements());
+
+ // Allocate a new fast elements backing store.
+ FixedArray* new_elements = NULL;
+ { Object* object;
+ MaybeObject* maybe = heap->AllocateFixedArrayWithHoles(capacity);
+ if (!maybe->ToObject(&object)) return maybe;
+ new_elements = FixedArray::cast(object);
+ }
+
+ // Find the new map to use for this object if there is a map change.
+ Map* new_map = NULL;
+ if (elements()->map() != heap->non_strict_arguments_elements_map()) {
+ Object* object;
+ MaybeObject* maybe = map()->GetFastElementsMap();
+ if (!maybe->ToObject(&object)) return maybe;
+ new_map = Map::cast(object);
+ }
+
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS:
+ CopyFastElementsToFast(FixedArray::cast(elements()), new_elements, mode);
+ set_map(new_map);
+ set_elements(new_elements);
+ break;
+ case DICTIONARY_ELEMENTS:
+ CopySlowElementsToFast(NumberDictionary::cast(elements()),
+ new_elements,
+ mode);
+ set_map(new_map);
+ set_elements(new_elements);
+ break;
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ // The object's map and the parameter map are unchanged, the unaliased
+ // arguments are copied to the new backing store.
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ CopySlowElementsToFast(NumberDictionary::cast(arguments),
+ new_elements,
+ mode);
+ } else {
+ CopyFastElementsToFast(arguments, new_elements, mode);
+ }
+ parameter_map->set(1, new_elements);
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* old_elements = FixedDoubleArray::cast(elements());
+ uint32_t old_length = static_cast<uint32_t>(old_elements->length());
+ // Fill out the new array with this content and array holes.
+ for (uint32_t i = 0; i < old_length; i++) {
+ if (!old_elements->is_the_hole(i)) {
+ Object* obj;
+ // Objects must be allocated in the old object space, since the
+ // overall number of HeapNumbers needed for the conversion might
+ // exceed the capacity of new space, and we would fail repeatedly
+ // trying to convert the FixedDoubleArray.
+ MaybeObject* maybe_value_object =
+ GetHeap()->AllocateHeapNumber(old_elements->get(i), TENURED);
+ if (!maybe_value_object->ToObject(&obj)) return maybe_value_object;
+ // Force write barrier. It's not worth trying to exploit
+ // elems->GetWriteBarrierMode(), since it requires an
+ // AssertNoAllocation stack object that would have to be positioned
+ // after the HeapNumber allocation anyway.
+ new_elements->set(i, obj, UPDATE_WRITE_BARRIER);
+ }
+ }
+ break;
+ }
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+
+ // Update the length if necessary.
+ if (IsJSArray()) {
+ JSArray::cast(this)->set_length(Smi::FromInt(length));
+ }
+
+ return new_elements;
+}
+
+
+MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
+ int capacity,
+ int length) {
+ Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
- ASSERT(!HasPixelElements() && !HasExternalArrayElements());
+ ASSERT(!HasExternalArrayElements());
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArrayWithHoles(capacity);
+ { MaybeObject* maybe_obj =
+ heap->AllocateUninitializedFixedDoubleArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- FixedArray* elems = FixedArray::cast(obj);
+ FixedDoubleArray* elems = FixedDoubleArray::cast(obj);
- { MaybeObject* maybe_obj = map()->GetFastElementsMap();
+ { MaybeObject* maybe_obj = map()->GetFastDoubleElementsMap();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
AssertNoAllocation no_gc;
- WriteBarrierMode mode = elems->GetWriteBarrierMode(no_gc);
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
- FixedArray* old_elements = FixedArray::cast(elements());
- uint32_t old_length = static_cast<uint32_t>(old_elements->length());
- // Fill out the new array with this content and array holes.
- for (uint32_t i = 0; i < old_length; i++) {
- elems->set(i, old_elements->get(i), mode);
- }
+ elems->Initialize(FixedArray::cast(elements()));
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ elems->Initialize(FixedDoubleArray::cast(elements()));
break;
}
case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = NumberDictionary::cast(elements());
- for (int i = 0; i < dictionary->Capacity(); i++) {
- Object* key = dictionary->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t entry = static_cast<uint32_t>(key->Number());
- elems->set(entry, dictionary->ValueAt(i), mode);
- }
- }
+ elems->Initialize(NumberDictionary::cast(elements()));
break;
}
default:
@@ -6457,7 +7432,7 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
MaybeObject* JSObject::SetSlowElements(Object* len) {
// We should never end in here with a pixel or external array.
- ASSERT(!HasPixelElements() && !HasExternalArrayElements());
+ ASSERT(!HasExternalArrayElements());
uint32_t new_length = static_cast<uint32_t>(len->Number());
@@ -6466,10 +7441,8 @@ MaybeObject* JSObject::SetSlowElements(Object* len) {
// Make sure we never try to shrink dense arrays into sparse arrays.
ASSERT(static_cast<uint32_t>(FixedArray::cast(elements())->length()) <=
new_length);
- Object* obj;
- { MaybeObject* maybe_obj = NormalizeElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* result = NormalizeElements();
+ if (result->IsFailure()) return result;
// Update length for JSArrays.
if (IsJSArray()) JSArray::cast(this)->set_length(len);
@@ -6484,7 +7457,19 @@ MaybeObject* JSObject::SetSlowElements(Object* len) {
}
break;
}
- default:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
UNREACHABLE();
break;
}
@@ -6493,14 +7478,15 @@ MaybeObject* JSObject::SetSlowElements(Object* len) {
MaybeObject* JSArray::Initialize(int capacity) {
+ Heap* heap = GetHeap();
ASSERT(capacity >= 0);
set_length(Smi::FromInt(0));
FixedArray* new_elements;
if (capacity == 0) {
- new_elements = Heap::empty_fixed_array();
+ new_elements = heap->empty_fixed_array();
} else {
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArrayWithHoles(capacity);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
new_elements = FixedArray::cast(obj);
@@ -6515,24 +7501,18 @@ void JSArray::Expand(int required_size) {
Handle<FixedArray> old_backing(FixedArray::cast(elements()));
int old_size = old_backing->length();
int new_size = required_size > old_size ? required_size : old_size;
- Handle<FixedArray> new_backing = Factory::NewFixedArray(new_size);
+ Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
// Can't use this any more now because we may have had a GC!
for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
self->SetContent(*new_backing);
}
-// Computes the new capacity when expanding the elements of a JSObject.
-static int NewElementsCapacity(int old_capacity) {
- // (old_capacity + 50%) + 16
- return old_capacity + (old_capacity >> 1) + 16;
-}
-
-
-static Failure* ArrayLengthRangeError() {
+static Failure* ArrayLengthRangeError(Heap* heap) {
HandleScope scope;
- return Top::Throw(*Factory::NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
+ return heap->isolate()->Throw(
+ *FACTORY->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
}
@@ -6544,7 +7524,7 @@ MaybeObject* JSObject::SetElementsLength(Object* len) {
Object* smi_length = Smi::FromInt(0);
if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
const int value = Smi::cast(smi_length)->value();
- if (value < 0) return ArrayLengthRangeError();
+ if (value < 0) return ArrayLengthRangeError(GetHeap());
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
int old_capacity = FixedArray::cast(elements())->length();
@@ -6554,12 +7534,24 @@ MaybeObject* JSObject::SetElementsLength(Object* len) {
{ MaybeObject* maybe_obj = EnsureWritableFastElements();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- int old_length = FastD2I(JSArray::cast(this)->length()->Number());
- // NOTE: We may be able to optimize this by removing the
- // last part of the elements backing storage array and
- // setting the capacity to the new size.
- for (int i = value; i < old_length; i++) {
- FixedArray::cast(elements())->set_the_hole(i);
+ FixedArray* fast_elements = FixedArray::cast(elements());
+ if (2 * value <= old_capacity) {
+ // If more than half the elements won't be used, trim the array.
+ if (value == 0) {
+ initialize_elements();
+ } else {
+ fast_elements->set_length(value);
+ Address filler_start = fast_elements->address() +
+ FixedArray::OffsetOfElementAt(value);
+ int filler_size = (old_capacity - value) * kPointerSize;
+ GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
+ }
+ } else {
+ // Otherwise, fill the unused tail with holes.
+ int old_length = FastD2I(JSArray::cast(this)->length()->Number());
+ for (int i = value; i < old_length; i++) {
+ fast_elements->set_the_hole(i);
+ }
}
JSArray::cast(this)->set_length(Smi::cast(smi_length));
}
@@ -6569,11 +7561,9 @@ MaybeObject* JSObject::SetElementsLength(Object* len) {
int new_capacity = value > min ? value : min;
if (new_capacity <= kMaxFastElementsLength ||
!ShouldConvertToSlowElements(new_capacity)) {
- Object* obj;
- { MaybeObject* maybe_obj =
- SetFastElementsCapacityAndLength(new_capacity, value);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* result =
+ SetFastElementsCapacityAndLength(new_capacity, value);
+ if (result->IsFailure()) return result;
return this;
}
break;
@@ -6598,7 +7588,17 @@ MaybeObject* JSObject::SetElementsLength(Object* len) {
}
return this;
}
- default:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
UNREACHABLE();
break;
}
@@ -6610,14 +7610,14 @@ MaybeObject* JSObject::SetElementsLength(Object* len) {
if (len->ToArrayIndex(&length)) {
return SetSlowElements(len);
} else {
- return ArrayLengthRangeError();
+ return ArrayLengthRangeError(GetHeap());
}
}
// len is not a number so make the array size one and
// set only element to len.
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(1);
+ { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray::cast(obj)->set(0, len);
@@ -6627,26 +7627,107 @@ MaybeObject* JSObject::SetElementsLength(Object* len) {
}
-MaybeObject* JSObject::SetPrototype(Object* value,
- bool skip_hidden_prototypes) {
+Object* Map::GetPrototypeTransition(Object* prototype) {
+ FixedArray* cache = prototype_transitions();
+ int number_of_transitions = NumberOfProtoTransitions();
+ const int proto_offset =
+ kProtoTransitionHeaderSize + kProtoTransitionPrototypeOffset;
+ const int map_offset = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
+ const int step = kProtoTransitionElementsPerEntry;
+ for (int i = 0; i < number_of_transitions; i++) {
+ if (cache->get(proto_offset + i * step) == prototype) {
+ Object* map = cache->get(map_offset + i * step);
+ ASSERT(map->IsMap());
+ return map;
+ }
+ }
+ return NULL;
+}
+
+
+MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
+ ASSERT(map->IsMap());
+ ASSERT(HeapObject::cast(prototype)->map()->IsMap());
+ // Don't cache prototype transition if this map is shared.
+ if (is_shared() || !FLAG_cache_prototype_transitions) return this;
+
+ FixedArray* cache = prototype_transitions();
+
+ const int step = kProtoTransitionElementsPerEntry;
+ const int header = kProtoTransitionHeaderSize;
+
+ int capacity = (cache->length() - header) / step;
+
+ int transitions = NumberOfProtoTransitions() + 1;
+
+ if (transitions > capacity) {
+ if (capacity > kMaxCachedPrototypeTransitions) return this;
+
+ FixedArray* new_cache;
+ // Grow array by factor 2 over and above what we need.
+ { MaybeObject* maybe_cache =
+ heap()->AllocateFixedArray(transitions * 2 * step + header);
+ if (!maybe_cache->To<FixedArray>(&new_cache)) return maybe_cache;
+ }
+
+ for (int i = 0; i < capacity * step; i++) {
+ new_cache->set(i + header, cache->get(i + header));
+ }
+ cache = new_cache;
+ set_prototype_transitions(cache);
+ }
+
+ int last = transitions - 1;
+
+ cache->set(header + last * step + kProtoTransitionPrototypeOffset, prototype);
+ cache->set(header + last * step + kProtoTransitionMapOffset, map);
+ SetNumberOfProtoTransitions(transitions);
+
+ return cache;
+}
+
+
+MaybeObject* JSReceiver::SetPrototype(Object* value,
+ bool skip_hidden_prototypes) {
+#ifdef DEBUG
+ int size = Size();
+#endif
+
+ Heap* heap = GetHeap();
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
- if (!value->IsJSObject() && !value->IsNull()) return value;
+ if (!value->IsJSReceiver() && !value->IsNull()) return value;
+
+ // From 8.6.2 Object Internal Methods
+ // ...
+ // In addition, if [[Extensible]] is false the value of the [[Class]] and
+ // [[Prototype]] internal properties of the object may not be modified.
+ // ...
+ // Implementation specific extensions that modify [[Class]], [[Prototype]]
+ // or [[Extensible]] must not violate the invariants defined in the preceding
+ // paragraph.
+ if (!this->map()->is_extensible()) {
+ HandleScope scope;
+ Handle<Object> handle(this, heap->isolate());
+ return heap->isolate()->Throw(
+ *FACTORY->NewTypeError("non_extensible_proto",
+ HandleVector<Object>(&handle, 1)));
+ }
// Before we can set the prototype we need to be sure
// prototype cycles are prevented.
// It is sufficient to validate that the receiver is not in the new prototype
// chain.
- for (Object* pt = value; pt != Heap::null_value(); pt = pt->GetPrototype()) {
+ for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
if (JSObject::cast(pt) == this) {
// Cycle detected.
HandleScope scope;
- return Top::Throw(*Factory::NewError("cyclic_proto",
- HandleVector<Object>(NULL, 0)));
+ return heap->isolate()->Throw(
+ *FACTORY->NewError("cyclic_proto", HandleVector<Object>(NULL, 0)));
}
}
- JSObject* real_receiver = this;
+ JSReceiver* real_receiver = this;
if (skip_hidden_prototypes) {
// Find the first object in the chain whose prototype object is not
@@ -6660,20 +7741,34 @@ MaybeObject* JSObject::SetPrototype(Object* value,
}
// Set the new prototype of the object.
- Object* new_map;
- { MaybeObject* maybe_new_map = real_receiver->map()->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
+ Map* map = real_receiver->map();
+
+ // Nothing to do if prototype is already set.
+ if (map->prototype() == value) return value;
+
+ Object* new_map = map->GetPrototypeTransition(value);
+ if (new_map == NULL) {
+ { MaybeObject* maybe_new_map = map->CopyDropTransitions();
+ if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
+ }
+
+ { MaybeObject* maybe_new_cache =
+ map->PutPrototypeTransition(value, Map::cast(new_map));
+ if (maybe_new_cache->IsFailure()) return maybe_new_cache;
+ }
+
+ Map::cast(new_map)->set_prototype(value);
}
- Map::cast(new_map)->set_prototype(value);
+ ASSERT(Map::cast(new_map)->prototype() == value);
real_receiver->set_map(Map::cast(new_map));
- Heap::ClearInstanceofCache();
-
+ heap->ClearInstanceofCache();
+ ASSERT(size == Size());
return value;
}
-bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
+bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
@@ -6686,8 +7781,8 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
}
break;
}
- case PIXEL_ELEMENTS: {
- PixelArray* pixels = PixelArray::cast(elements());
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
if (index < static_cast<uint32_t>(pixels->length())) {
return true;
}
@@ -6699,7 +7794,9 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
ExternalArray* array = ExternalArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
return true;
@@ -6713,7 +7810,7 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
}
break;
}
- default:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -6722,29 +7819,31 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
if (this->IsStringObjectWithCharacterAt(index)) return true;
Object* pt = GetPrototype();
- if (pt == Heap::null_value()) return false;
+ if (pt->IsNull()) return false;
return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
}
-bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
+bool JSObject::HasElementWithInterceptor(JSReceiver* receiver, uint32_t index) {
+ Isolate* isolate = GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSObject> receiver_handle(receiver);
+ Handle<JSReceiver> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
- CustomArguments args(interceptor->data(), receiver, this);
+ CustomArguments args(isolate, interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
v8::IndexedPropertyQuery query =
v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
- LOG(ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
v8::Handle<v8::Integer> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = query(index, info);
}
if (!result.IsEmpty()) {
@@ -6754,11 +7853,12 @@ bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
} else if (!interceptor->getter()->IsUndefined()) {
v8::IndexedPropertyGetter getter =
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
- LOG(ApiIndexedPropertyAccess("interceptor-indexed-has-get", this, index));
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-has-get", this, index));
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = getter(index, info);
}
if (!result.IsEmpty()) return true;
@@ -6769,10 +7869,12 @@ bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return UNDEFINED_ELEMENT;
+ if (IsAccessCheckNeeded()) {
+ Heap* heap = GetHeap();
+ if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return UNDEFINED_ELEMENT;
+ }
}
if (IsJSGlobalProxy()) {
@@ -6805,8 +7907,8 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
}
break;
}
- case PIXEL_ELEMENTS: {
- PixelArray* pixels = PixelArray::cast(elements());
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT;
break;
}
@@ -6816,33 +7918,81 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS: {
ExternalArray* array = ExternalArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
break;
}
+ case FAST_DOUBLE_ELEMENTS:
+ UNREACHABLE();
+ break;
case DICTIONARY_ELEMENTS: {
if (element_dictionary()->FindEntry(index) !=
- NumberDictionary::kNotFound) {
+ NumberDictionary::kNotFound) {
return DICTIONARY_ELEMENT;
}
break;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ // Aliased parameters and non-aliased elements in a fast backing store
+ // behave as FAST_ELEMENT. Non-aliased elements in a dictionary
+ // backing store behave as DICTIONARY_ELEMENT.
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ index < (length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
+ // If not aliased, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ NumberDictionary* dictionary = NumberDictionary::cast(arguments);
+ if (dictionary->FindEntry(index) != NumberDictionary::kNotFound) {
+ return DICTIONARY_ELEMENT;
+ }
+ } else {
+ length = arguments->length();
+ probe = (index < length) ? arguments->get(index) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
+ }
break;
+ }
}
return UNDEFINED_ELEMENT;
}
-bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
+bool JSObject::HasElementInElements(FixedArray* elements,
+ ElementsKind kind,
+ uint32_t index) {
+ ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
+ if (kind == FAST_ELEMENTS) {
+ int length = IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : elements->length();
+ if (index < static_cast<uint32_t>(length) &&
+ !elements->get(index)->IsTheHole()) {
+ return true;
+ }
+ } else {
+ if (NumberDictionary::cast(elements)->FindEntry(index) !=
+ NumberDictionary::kNotFound) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
+ if (IsAccessCheckNeeded()) {
+ Heap* heap = GetHeap();
+ if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return false;
+ }
}
// Check for lookup interceptor
@@ -6850,7 +8000,8 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
return HasElementWithInterceptor(receiver, index);
}
- switch (GetElementsKind()) {
+ ElementsKind kind = GetElementsKind();
+ switch (kind) {
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
@@ -6860,8 +8011,8 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
!FixedArray::cast(elements())->get(index)->IsTheHole()) return true;
break;
}
- case PIXEL_ELEMENTS: {
- PixelArray* pixels = PixelArray::cast(elements());
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
if (index < static_cast<uint32_t>(pixels->length())) {
return true;
}
@@ -6873,13 +8024,17 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS: {
ExternalArray* array = ExternalArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
return true;
}
break;
}
+ case FAST_DOUBLE_ELEMENTS:
+ UNREACHABLE();
+ break;
case DICTIONARY_ELEMENTS: {
if (element_dictionary()->FindEntry(index)
!= NumberDictionary::kNotFound) {
@@ -6887,50 +8042,64 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
}
break;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ (index < length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) return true;
+
+ // Not a mapped parameter, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : FAST_ELEMENTS;
+ if (HasElementInElements(arguments, kind, index)) return true;
break;
+ }
}
// Handle [] on String objects.
if (this->IsStringObjectWithCharacterAt(index)) return true;
Object* pt = GetPrototype();
- if (pt == Heap::null_value()) return false;
+ if (pt->IsNull()) return false;
return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
}
MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
Object* value,
+ StrictModeFlag strict_mode,
bool check_prototype) {
+ Isolate* isolate = GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSObject> this_handle(this);
- Handle<Object> value_handle(value);
+ Handle<Object> value_handle(value, isolate);
if (!interceptor->setter()->IsUndefined()) {
v8::IndexedPropertySetter setter =
v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
- LOG(ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
- CustomArguments args(interceptor->data(), this, this);
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
+ CustomArguments args(isolate, interceptor->data(), this, this);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = setter(index, v8::Utils::ToLocal(value_handle), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) return *value_handle;
}
MaybeObject* raw_result =
this_handle->SetElementWithoutInterceptor(index,
*value_handle,
+ strict_mode,
check_prototype);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
@@ -6939,29 +8108,30 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
Object* structure,
uint32_t index,
Object* holder) {
- ASSERT(!structure->IsProxy());
+ Isolate* isolate = GetIsolate();
+ ASSERT(!structure->IsForeign());
// api style callbacks.
if (structure->IsAccessorInfo()) {
- AccessorInfo* data = AccessorInfo::cast(structure);
+ Handle<AccessorInfo> data(AccessorInfo::cast(structure));
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSObject> self(JSObject::cast(receiver));
Handle<JSObject> holder_handle(JSObject::cast(holder));
- Handle<Object> number = Factory::NewNumberFromUint(index);
- Handle<String> key(Factory::NumberToString(number));
- LOG(ApiNamedPropertyAccess("load", *self, *key));
- CustomArguments args(data->data(), *self, *holder_handle);
+ Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+ Handle<String> key = isolate->factory()->NumberToString(number);
+ LOG(isolate, ApiNamedPropertyAccess("load", *self, *key));
+ CustomArguments args(isolate, data->data(), *self, *holder_handle);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = call_fun(v8::Utils::ToLocal(key), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
- if (result.IsEmpty()) return Heap::undefined_value();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ if (result.IsEmpty()) return isolate->heap()->undefined_value();
return *v8::Utils::OpenHandle(*result);
}
@@ -6973,7 +8143,7 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
JSFunction::cast(getter));
}
// Getter is not a function.
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
UNREACHABLE();
@@ -6984,51 +8154,59 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
MaybeObject* JSObject::SetElementWithCallback(Object* structure,
uint32_t index,
Object* value,
- JSObject* holder) {
- HandleScope scope;
+ JSObject* holder,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value);
+ Handle<Object> value_handle(value, isolate);
// To accommodate both the old and the new api we switch on the
- // data structure used to store the callbacks. Eventually proxy
+ // data structure used to store the callbacks. Eventually foreign
// callbacks should be phased out.
- ASSERT(!structure->IsProxy());
+ ASSERT(!structure->IsForeign());
if (structure->IsAccessorInfo()) {
// api style callbacks
- AccessorInfo* data = AccessorInfo::cast(structure);
+ Handle<JSObject> self(this);
+ Handle<JSObject> holder_handle(JSObject::cast(holder));
+ Handle<AccessorInfo> data(AccessorInfo::cast(structure));
Object* call_obj = data->setter();
v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
if (call_fun == NULL) return value;
- Handle<Object> number = Factory::NewNumberFromUint(index);
- Handle<String> key(Factory::NumberToString(number));
- LOG(ApiNamedPropertyAccess("store", this, *key));
- CustomArguments args(data->data(), this, JSObject::cast(holder));
+ Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+ Handle<String> key(isolate->factory()->NumberToString(number));
+ LOG(isolate, ApiNamedPropertyAccess("store", *self, *key));
+ CustomArguments args(isolate, data->data(), *self, *holder_handle);
v8::AccessorInfo info(args.end());
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
call_fun(v8::Utils::ToLocal(key),
v8::Utils::ToLocal(value_handle),
info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value_handle;
}
if (structure->IsFixedArray()) {
- Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
+ Handle<Object> setter(FixedArray::cast(structure)->get(kSetterIndex));
if (setter->IsJSFunction()) {
- return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+ return SetPropertyWithDefinedSetter(JSFunction::cast(*setter), value);
} else {
- Handle<Object> holder_handle(holder);
- Handle<Object> key(Factory::NewNumberFromUint(index));
+ if (strict_mode == kNonStrictMode) {
+ return value;
+ }
+ Handle<Object> holder_handle(holder, isolate);
+ Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
Handle<Object> args[2] = { key, holder_handle };
- return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
+ return isolate->Throw(
+ *isolate->factory()->NewTypeError("no_setter_in_callback",
+ HandleVector(args, 2)));
}
}
@@ -7037,33 +8215,245 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
}
+bool JSObject::HasFastArgumentsElements() {
+ Heap* heap = GetHeap();
+ if (!elements()->IsFixedArray()) return false;
+ FixedArray* elements = FixedArray::cast(this->elements());
+ if (elements->map() != heap->non_strict_arguments_elements_map()) {
+ return false;
+ }
+ FixedArray* arguments = FixedArray::cast(elements->get(1));
+ return !arguments->IsDictionary();
+}
+
+
+bool JSObject::HasDictionaryArgumentsElements() {
+ Heap* heap = GetHeap();
+ if (!elements()->IsFixedArray()) return false;
+ FixedArray* elements = FixedArray::cast(this->elements());
+ if (elements->map() != heap->non_strict_arguments_elements_map()) {
+ return false;
+ }
+ FixedArray* arguments = FixedArray::cast(elements->get(1));
+ return arguments->IsDictionary();
+}
+
+
// Adding n elements in fast case is O(n*n).
// Note: revisit design to have dual undefined values to capture absent
// elements.
MaybeObject* JSObject::SetFastElement(uint32_t index,
Object* value,
+ StrictModeFlag strict_mode,
bool check_prototype) {
- ASSERT(HasFastElements());
+ ASSERT(HasFastElements() || HasFastArgumentsElements());
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj = EnsureWritableFastElements();
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
+ FixedArray* backing_store = FixedArray::cast(elements());
+ if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ backing_store = FixedArray::cast(backing_store->get(1));
+ } else {
+ Object* writable;
+ MaybeObject* maybe = EnsureWritableFastElements();
+ if (!maybe->ToObject(&writable)) return maybe;
+ backing_store = FixedArray::cast(writable);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
- uint32_t elms_length = static_cast<uint32_t>(elms->length());
+ uint32_t length = static_cast<uint32_t>(backing_store->length());
if (check_prototype &&
- (index >= elms_length || elms->get(index)->IsTheHole())) {
+ (index >= length || backing_store->get(index)->IsTheHole())) {
bool found;
+ MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
+ value,
+ &found,
+ strict_mode);
+ if (found) return result;
+ }
+
+ // Check whether there is extra space in fixed array.
+ if (index < length) {
+ backing_store->set(index, value);
+ if (IsJSArray()) {
+ // Update the length of the array if needed.
+ uint32_t array_length = 0;
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+ if (index >= array_length) {
+ JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
+ }
+ }
+ return value;
+ }
+
+ // Allow gap in fast case.
+ if ((index - length) < kMaxGap) {
+ // Try allocating extra space.
+ int new_capacity = NewElementsCapacity(index + 1);
+ if (new_capacity <= kMaxFastElementsLength ||
+ !ShouldConvertToSlowElements(new_capacity)) {
+ ASSERT(static_cast<uint32_t>(new_capacity) > index);
+ Object* new_elements;
+ MaybeObject* maybe =
+ SetFastElementsCapacityAndLength(new_capacity, index + 1);
+ if (!maybe->ToObject(&new_elements)) return maybe;
+ FixedArray::cast(new_elements)->set(index, value);
+ return value;
+ }
+ }
+
+ // Otherwise default to slow case.
+ MaybeObject* result = NormalizeElements();
+ if (result->IsFailure()) return result;
+ return SetDictionaryElement(index, value, strict_mode, check_prototype);
+}
+
+
+MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode,
+ bool check_prototype) {
+ ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ Isolate* isolate = GetIsolate();
+ Heap* heap = isolate->heap();
+
+ // Insert element in the dictionary.
+ FixedArray* elements = FixedArray::cast(this->elements());
+ bool is_arguments =
+ (elements->map() == heap->non_strict_arguments_elements_map());
+ NumberDictionary* dictionary = NULL;
+ if (is_arguments) {
+ dictionary = NumberDictionary::cast(elements->get(1));
+ } else {
+ dictionary = NumberDictionary::cast(elements);
+ }
+
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* element = dictionary->ValueAt(entry);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.type() == CALLBACKS) {
+ return SetElementWithCallback(element, index, value, this, strict_mode);
+ } else {
+ dictionary->UpdateMaxNumberKey(index);
+ // If put fails in strict mode, throw an exception.
+ if (!dictionary->ValueAtPut(entry, value) && strict_mode == kStrictMode) {
+ Handle<Object> holder(this);
+ Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+ Handle<Object> args[2] = { number, holder };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("strict_read_only_property",
+ HandleVector(args, 2));
+ return isolate->Throw(*error);
+ }
+ }
+ } else {
+ // Index not already used. Look for an accessor in the prototype chain.
+ if (check_prototype) {
+ bool found;
+ MaybeObject* result =
+ SetElementWithCallbackSetterInPrototypes(
+ index, value, &found, strict_mode);
+ if (found) return result;
+ }
+ // When we set the is_extensible flag to false we always force the
+ // element into dictionary mode (and force them to stay there).
+ if (!map()->is_extensible()) {
+ if (strict_mode == kNonStrictMode) {
+ return isolate->heap()->undefined_value();
+ } else {
+ Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+ Handle<String> name = isolate->factory()->NumberToString(number);
+ Handle<Object> args[1] = { name };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("object_not_extensible",
+ HandleVector(args, 1));
+ return isolate->Throw(*error);
+ }
+ }
+ Object* new_dictionary;
+ MaybeObject* maybe = dictionary->AtNumberPut(index, value);
+ if (!maybe->ToObject(&new_dictionary)) return maybe;
+ if (dictionary != NumberDictionary::cast(new_dictionary)) {
+ if (is_arguments) {
+ elements->set(1, new_dictionary);
+ } else {
+ set_elements(HeapObject::cast(new_dictionary));
+ }
+ dictionary = NumberDictionary::cast(new_dictionary);
+ }
+ }
+
+ // Update the array length if this JSObject is an array.
+ if (IsJSArray()) {
+ MaybeObject* result =
+ JSArray::cast(this)->JSArrayUpdateLengthFromIndex(index, value);
+ if (result->IsFailure()) return result;
+ }
+
+ // Attempt to put this object back in fast case.
+ if (ShouldConvertToFastElements()) {
+ uint32_t new_length = 0;
+ if (IsJSArray()) {
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
+ } else {
+ new_length = dictionary->max_number_key() + 1;
+ }
MaybeObject* result =
- SetElementWithCallbackSetterInPrototypes(index, value, &found);
+ SetFastElementsCapacityAndLength(new_length, new_length);
+ if (result->IsFailure()) return result;
+#ifdef DEBUG
+ if (FLAG_trace_normalization) {
+ PrintF("Object elements are fast case again:\n");
+ Print();
+ }
+#endif
+ }
+ return value;
+}
+
+
+MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
+ uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode,
+ bool check_prototype) {
+ ASSERT(HasFastDoubleElements());
+
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
+ uint32_t elms_length = static_cast<uint32_t>(elms->length());
+
+ // If storing to an element that isn't in the array, pass the store request
+ // up the prototype chain before storing in the receiver's elements.
+ if (check_prototype &&
+ (index >= elms_length || elms->is_the_hole(index))) {
+ bool found;
+ MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
+ value,
+ &found,
+ strict_mode);
if (found) return result;
}
+ // If the value object is not a heap number, switch to fast elements and try
+ // again.
+ bool value_is_smi = value->IsSmi();
+ if (!value->IsNumber()) {
+ Object* obj;
+ uint32_t length = elms_length;
+ if (IsJSArray()) {
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ }
+ MaybeObject* maybe_obj =
+ SetFastElementsCapacityAndLength(elms_length, length);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ return SetFastElement(index, value, strict_mode, check_prototype);
+ }
+
+ double double_value = value_is_smi
+ ? static_cast<double>(Smi::cast(value)->value())
+ : HeapNumber::cast(value)->value();
- // Check whether there is extra space in fixed array..
+ // Check whether there is extra space in the fixed array.
if (index < elms_length) {
- elms->set(index, value);
+ elms->set(index, double_value);
if (IsJSArray()) {
// Update the length of the array if needed.
uint32_t array_length = 0;
@@ -7084,10 +8474,11 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
ASSERT(static_cast<uint32_t>(new_capacity) > index);
Object* obj;
{ MaybeObject* maybe_obj =
- SetFastElementsCapacityAndLength(new_capacity, index + 1);
+ SetFastDoubleElementsCapacityAndLength(new_capacity,
+ index + 1);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- FixedArray::cast(elements())->set(index, value);
+ FixedDoubleArray::cast(elements())->set(index, double_value);
return value;
}
}
@@ -7098,47 +8489,62 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
ASSERT(HasDictionaryElements());
- return SetElement(index, value, check_prototype);
+ return SetElement(index, value, strict_mode, check_prototype);
}
MaybeObject* JSObject::SetElement(uint32_t index,
Object* value,
+ StrictModeFlag strict_mode,
bool check_prototype) {
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(this, index, v8::ACCESS_SET)) {
- HandleScope scope;
- Handle<Object> value_handle(value);
- Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return *value_handle;
+ if (IsAccessCheckNeeded()) {
+ Heap* heap = GetHeap();
+ if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
+ HandleScope scope;
+ Handle<Object> value_handle(value);
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ return *value_handle;
+ }
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetElement(index, value, check_prototype);
+ return JSObject::cast(proto)->SetElement(index,
+ value,
+ strict_mode,
+ check_prototype);
}
// Check for lookup interceptor
if (HasIndexedInterceptor()) {
- return SetElementWithInterceptor(index, value, check_prototype);
+ return SetElementWithInterceptor(index,
+ value,
+ strict_mode,
+ check_prototype);
}
- return SetElementWithoutInterceptor(index, value, check_prototype);
+ return SetElementWithoutInterceptor(index,
+ value,
+ strict_mode,
+ check_prototype);
}
MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
Object* value,
+ StrictModeFlag strict_mode,
bool check_prototype) {
+ Isolate* isolate = GetIsolate();
switch (GetElementsKind()) {
case FAST_ELEMENTS:
- // Fast case.
- return SetFastElement(index, value, check_prototype);
- case PIXEL_ELEMENTS: {
- PixelArray* pixels = PixelArray::cast(elements());
+ return SetFastElement(index, value, strict_mode, check_prototype);
+ case FAST_DOUBLE_ELEMENTS:
+ return SetFastDoubleElement(index, value, strict_mode, check_prototype);
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
return pixels->SetValue(index, value);
}
case EXTERNAL_BYTE_ELEMENTS: {
@@ -7172,90 +8578,39 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
ExternalFloatArray* array = ExternalFloatArray::cast(elements());
return array->SetValue(index, value);
}
- case DICTIONARY_ELEMENTS: {
- // Insert element in the dictionary.
- FixedArray* elms = FixedArray::cast(elements());
- NumberDictionary* dictionary = NumberDictionary::cast(elms);
-
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- return SetElementWithCallback(element, index, value, this);
- } else {
- dictionary->UpdateMaxNumberKey(index);
- dictionary->ValueAtPut(entry, value);
- }
+ case EXTERNAL_DOUBLE_ELEMENTS: {
+ ExternalDoubleArray* array = ExternalDoubleArray::cast(elements());
+ return array->SetValue(index, value);
+ }
+ case DICTIONARY_ELEMENTS:
+ return SetDictionaryElement(index, value, strict_mode, check_prototype);
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ (index < length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) {
+ Context* context = Context::cast(parameter_map->get(0));
+ int context_index = Smi::cast(probe)->value();
+ ASSERT(!context->get(context_index)->IsTheHole());
+ context->set(context_index, value);
+ return value;
} else {
- // Index not already used. Look for an accessor in the prototype chain.
- if (check_prototype) {
- bool found;
- MaybeObject* result =
- SetElementWithCallbackSetterInPrototypes(index, value, &found);
- if (found) return result;
- }
- // When we set the is_extensible flag to false we always force
- // the element into dictionary mode (and force them to stay there).
- if (!map()->is_extensible()) {
- Handle<Object> number(Factory::NewNumberFromUint(index));
- Handle<String> index_string(Factory::NumberToString(number));
- Handle<Object> args[1] = { index_string };
- return Top::Throw(*Factory::NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
- }
- Object* result;
- { MaybeObject* maybe_result = dictionary->AtNumberPut(index, value);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- if (elms != FixedArray::cast(result)) {
- set_elements(FixedArray::cast(result));
- }
- }
-
- // Update the array length if this JSObject is an array.
- if (IsJSArray()) {
- JSArray* array = JSArray::cast(this);
- Object* return_value;
- { MaybeObject* maybe_return_value =
- array->JSArrayUpdateLengthFromIndex(index, value);
- if (!maybe_return_value->ToObject(&return_value)) {
- return maybe_return_value;
- }
- }
- }
-
- // Attempt to put this object back in fast case.
- if (ShouldConvertToFastElements()) {
- uint32_t new_length = 0;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
+ // Object is not mapped, defer to the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ return SetDictionaryElement(index, value, strict_mode,
+ check_prototype);
} else {
- new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
+ return SetFastElement(index, value, strict_mode, check_prototype);
}
- Object* obj;
- { MaybeObject* maybe_obj =
- SetFastElementsCapacityAndLength(new_length, new_length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-#ifdef DEBUG
- if (FLAG_trace_normalization) {
- PrintF("Object elements are fast case again:\n");
- Print();
- }
-#endif
}
-
- return value;
}
- default:
- UNREACHABLE();
- break;
}
// All possible cases have been handled above. Add a return to avoid the
// complaints from the compiler.
UNREACHABLE();
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
@@ -7268,7 +8623,7 @@ MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
if (index >= old_len && index != 0xffffffff) {
Object* len;
{ MaybeObject* maybe_len =
- Heap::NumberFromDouble(static_cast<double>(index) + 1);
+ GetHeap()->NumberFromDouble(static_cast<double>(index) + 1);
if (!maybe_len->ToObject(&len)) return maybe_len;
}
set_length(len);
@@ -7290,14 +8645,24 @@ MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
}
break;
}
- case PIXEL_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
+ if (index < static_cast<uint32_t>(elms->length())) {
+ if (!elms->is_the_hole(index)) {
+ return GetHeap()->NumberFromDouble(elms->get(index));
+ }
+ }
+ break;
+ }
+ case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS: {
MaybeObject* maybe_value = GetExternalElement(index);
Object* value;
if (!maybe_value->ToObject(&value)) return maybe_value;
@@ -7320,47 +8685,48 @@ MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
}
break;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
break;
}
// Continue searching via the prototype chain.
Object* pt = GetPrototype();
- if (pt == Heap::null_value()) return Heap::undefined_value();
+ if (pt->IsNull()) return GetHeap()->undefined_value();
return pt->GetElementWithReceiver(receiver, index);
}
MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
uint32_t index) {
+ Isolate* isolate = GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope;
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<Object> this_handle(receiver);
- Handle<JSObject> holder_handle(this);
-
+ HandleScope scope(isolate);
+ Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
+ Handle<Object> this_handle(receiver, isolate);
+ Handle<JSObject> holder_handle(this, isolate);
if (!interceptor->getter()->IsUndefined()) {
v8::IndexedPropertyGetter getter =
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
- LOG(ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
- CustomArguments args(interceptor->data(), receiver, this);
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
+ CustomArguments args(isolate, interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = getter(index, info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
}
MaybeObject* raw_result =
holder_handle->GetElementPostInterceptor(*this_handle, index);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
@@ -7368,10 +8734,12 @@ MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
uint32_t index) {
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(this, index, v8::ACCESS_GET)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_GET);
- return Heap::undefined_value();
+ if (IsAccessCheckNeeded()) {
+ Heap* heap = GetHeap();
+ if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_GET)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
+ return heap->undefined_value();
+ }
}
if (HasIndexedInterceptor()) {
@@ -7389,14 +8757,25 @@ MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
}
break;
}
- case PIXEL_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
+ if (index < static_cast<uint32_t>(elms->length())) {
+ if (!elms->is_the_hole(index)) {
+ double double_value = elms->get(index);
+ return GetHeap()->NumberFromDouble(double_value);
+ }
+ }
+ break;
+ }
+ case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS: {
MaybeObject* maybe_value = GetExternalElement(index);
Object* value;
if (!maybe_value->ToObject(&value)) return maybe_value;
@@ -7419,10 +8798,45 @@ MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
}
break;
}
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ (index < length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) {
+ Context* context = Context::cast(parameter_map->get(0));
+ int context_index = Smi::cast(probe)->value();
+ ASSERT(!context->get(context_index)->IsTheHole());
+ return context->get(context_index);
+ } else {
+ // Object is not mapped, defer to the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ NumberDictionary* dictionary = NumberDictionary::cast(arguments);
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* element = dictionary->ValueAt(entry);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.type() == CALLBACKS) {
+ return GetElementWithCallback(receiver,
+ element,
+ index,
+ this);
+ }
+ return element;
+ }
+ } else if (index < static_cast<uint32_t>(arguments->length())) {
+ Object* value = arguments->get(index);
+ if (!value->IsTheHole()) return value;
+ }
+ }
+ break;
+ }
}
Object* pt = GetPrototype();
- if (pt == Heap::null_value()) return Heap::undefined_value();
+ Heap* heap = GetHeap();
+ if (pt == heap->null_value()) return heap->undefined_value();
return pt->GetElementWithReceiver(receiver, index);
}
@@ -7431,8 +8845,8 @@ MaybeObject* JSObject::GetExternalElement(uint32_t index) {
// Get element works for both JSObject and JSArray since
// JSArray::length cannot change.
switch (GetElementsKind()) {
- case PIXEL_ELEMENTS: {
- PixelArray* pixels = PixelArray::cast(elements());
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
if (index < static_cast<uint32_t>(pixels->length())) {
uint8_t value = pixels->get(index);
return Smi::FromInt(value);
@@ -7477,7 +8891,7 @@ MaybeObject* JSObject::GetExternalElement(uint32_t index) {
ExternalIntArray* array = ExternalIntArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
int32_t value = array->get(index);
- return Heap::NumberFromInt32(value);
+ return GetHeap()->NumberFromInt32(value);
}
break;
}
@@ -7486,7 +8900,7 @@ MaybeObject* JSObject::GetExternalElement(uint32_t index) {
ExternalUnsignedIntArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
uint32_t value = array->get(index);
- return Heap::NumberFromUint32(value);
+ return GetHeap()->NumberFromUint32(value);
}
break;
}
@@ -7494,16 +8908,28 @@ MaybeObject* JSObject::GetExternalElement(uint32_t index) {
ExternalFloatArray* array = ExternalFloatArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
float value = array->get(index);
- return Heap::AllocateHeapNumber(value);
+ return GetHeap()->AllocateHeapNumber(value);
+ }
+ break;
+ }
+ case EXTERNAL_DOUBLE_ELEMENTS: {
+ ExternalDoubleArray* array = ExternalDoubleArray::cast(elements());
+ if (index < static_cast<uint32_t>(array->length())) {
+ double value = array->get(index);
+ return GetHeap()->AllocateHeapNumber(value);
}
break;
}
+ case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case DICTIONARY_ELEMENTS:
UNREACHABLE();
break;
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
}
- return Heap::undefined_value();
+ return GetHeap()->undefined_value();
}
@@ -7511,62 +8937,90 @@ bool JSObject::HasDenseElements() {
int capacity = 0;
int number_of_elements = 0;
+ FixedArray* backing_store = FixedArray::cast(elements());
switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- FixedArray* elms = FixedArray::cast(elements());
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ backing_store = FixedArray::cast(backing_store->get(1));
+ if (backing_store->IsDictionary()) {
+ NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
+ capacity = dictionary->Capacity();
+ number_of_elements = dictionary->NumberOfElements();
+ break;
+ }
+ // Fall through.
+ case FAST_ELEMENTS:
+ capacity = backing_store->length();
+ for (int i = 0; i < capacity; ++i) {
+ if (!backing_store->get(i)->IsTheHole()) ++number_of_elements;
+ }
+ break;
+ case DICTIONARY_ELEMENTS: {
+ NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
+ capacity = dictionary->Capacity();
+ number_of_elements = dictionary->NumberOfElements();
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
capacity = elms->length();
for (int i = 0; i < capacity; i++) {
- if (!elms->get(i)->IsTheHole()) number_of_elements++;
+ if (!elms->is_the_hole(i)) number_of_elements++;
}
break;
}
- case PIXEL_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS: {
return true;
}
- case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = NumberDictionary::cast(elements());
- capacity = dictionary->Capacity();
- number_of_elements = dictionary->NumberOfElements();
- break;
- }
- default:
- UNREACHABLE();
- break;
}
-
- if (capacity == 0) return true;
- return (number_of_elements > (capacity / 2));
+ return (capacity == 0) || (number_of_elements > (capacity / 2));
}
bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
- ASSERT(HasFastElements());
// Keep the array in fast case if the current backing storage is
// almost filled and if the new capacity is no more than twice the
// old capacity.
- int elements_length = FixedArray::cast(elements())->length();
+ int elements_length = 0;
+ if (elements()->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ FixedArray* backing_store = FixedArray::cast(elements());
+ elements_length = FixedArray::cast(backing_store->get(1))->length();
+ } else if (HasFastElements()) {
+ elements_length = FixedArray::cast(elements())->length();
+ } else if (HasFastDoubleElements()) {
+ elements_length = FixedDoubleArray::cast(elements())->length();
+ } else {
+ UNREACHABLE();
+ }
return !HasDenseElements() || ((new_capacity / 2) > elements_length);
}
bool JSObject::ShouldConvertToFastElements() {
- ASSERT(HasDictionaryElements());
- NumberDictionary* dictionary = NumberDictionary::cast(elements());
+ ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
// If the elements are sparse, we should not go back to fast case.
if (!HasDenseElements()) return false;
- // If an element has been added at a very high index in the elements
- // dictionary, we cannot go back to fast case.
- if (dictionary->requires_slow_elements()) return false;
// An object requiring access checks is never allowed to have fast
// elements. If it had fast elements we would skip security checks.
if (IsAccessCheckNeeded()) return false;
+
+ FixedArray* elements = FixedArray::cast(this->elements());
+ NumberDictionary* dictionary = NULL;
+ if (elements->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ dictionary = NumberDictionary::cast(elements->get(1));
+ } else {
+ dictionary = NumberDictionary::cast(elements);
+ }
+ // If an element has been added at a very high index in the elements
+ // dictionary, we cannot go back to fast case.
+ if (dictionary->requires_slow_elements()) return false;
// If the dictionary backing storage takes up roughly half as much
// space as a fast-case backing storage would the array should have
// fast elements.
@@ -7581,6 +9035,23 @@ bool JSObject::ShouldConvertToFastElements() {
}
+bool JSObject::ShouldConvertToFastDoubleElements() {
+ if (FLAG_unbox_double_arrays) {
+ ASSERT(HasDictionaryElements());
+ NumberDictionary* dictionary = NumberDictionary::cast(elements());
+ for (int i = 0; i < dictionary->Capacity(); i++) {
+ Object* key = dictionary->KeyAt(i);
+ if (key->IsNumber()) {
+ if (!dictionary->ValueAt(i)->IsNumber()) return false;
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
// Certain compilers request function template instantiation when they
// see the definition of the other template functions in the
// class. This requires us to have the template functions put
@@ -7645,7 +9116,7 @@ InterceptorInfo* JSObject::GetIndexedInterceptor() {
MaybeObject* JSObject::GetPropertyPostInterceptor(
- JSObject* receiver,
+ JSReceiver* receiver,
String* name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
@@ -7657,13 +9128,13 @@ MaybeObject* JSObject::GetPropertyPostInterceptor(
// Continue searching via the prototype chain.
Object* pt = GetPrototype();
*attributes = ABSENT;
- if (pt == Heap::null_value()) return Heap::undefined_value();
+ if (pt->IsNull()) return GetHeap()->undefined_value();
return pt->GetPropertyWithReceiver(receiver, name, attributes);
}
MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
- JSObject* receiver,
+ JSReceiver* receiver,
String* name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
@@ -7672,33 +9143,35 @@ MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
if (result.IsProperty()) {
return GetProperty(receiver, &result, name, attributes);
}
- return Heap::undefined_value();
+ return GetHeap()->undefined_value();
}
MaybeObject* JSObject::GetPropertyWithInterceptor(
- JSObject* receiver,
+ JSReceiver* receiver,
String* name,
PropertyAttributes* attributes) {
+ Isolate* isolate = GetIsolate();
InterceptorInfo* interceptor = GetNamedInterceptor();
- HandleScope scope;
- Handle<JSObject> receiver_handle(receiver);
+ HandleScope scope(isolate);
+ Handle<JSReceiver> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetter getter =
v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
- LOG(ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
- CustomArguments args(interceptor->data(), receiver, this);
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
+ CustomArguments args(isolate, interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
result = getter(v8::Utils::ToLocal(name_handle), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
*attributes = NONE;
return *v8::Utils::OpenHandle(*result);
@@ -7709,17 +9182,19 @@ MaybeObject* JSObject::GetPropertyWithInterceptor(
*receiver_handle,
*name_handle,
attributes);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return result;
}
bool JSObject::HasRealNamedProperty(String* key) {
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
+ if (IsAccessCheckNeeded()) {
+ Heap* heap = GetHeap();
+ if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return false;
+ }
}
LookupResult result;
@@ -7730,10 +9205,12 @@ bool JSObject::HasRealNamedProperty(String* key) {
bool JSObject::HasRealElementProperty(uint32_t index) {
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
+ if (IsAccessCheckNeeded()) {
+ Heap* heap = GetHeap();
+ if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return false;
+ }
}
// Handle [] on String objects.
@@ -7748,8 +9225,8 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
return (index < length) &&
!FixedArray::cast(elements())->get(index)->IsTheHole();
}
- case PIXEL_ELEMENTS: {
- PixelArray* pixels = PixelArray::cast(elements());
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
return index < static_cast<uint32_t>(pixels->length());
}
case EXTERNAL_BYTE_ELEMENTS:
@@ -7758,30 +9235,36 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS: {
ExternalArray* array = ExternalArray::cast(elements());
return index < static_cast<uint32_t>(array->length());
}
+ case FAST_DOUBLE_ELEMENTS:
+ UNREACHABLE();
+ break;
case DICTIONARY_ELEMENTS: {
return element_dictionary()->FindEntry(index)
!= NumberDictionary::kNotFound;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
break;
}
// All possibilities have been handled above already.
UNREACHABLE();
- return Heap::null_value();
+ return GetHeap()->null_value();
}
bool JSObject::HasRealNamedCallbackProperty(String* key) {
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
+ if (IsAccessCheckNeeded()) {
+ Heap* heap = GetHeap();
+ if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return false;
+ }
}
LookupResult result;
@@ -7795,7 +9278,7 @@ int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
DescriptorArray* descs = map()->instance_descriptors();
int result = 0;
for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
+ PropertyDetails details(descs->GetDetails(i));
if (details.IsProperty() && (details.attributes() & filter) == 0) {
result++;
}
@@ -7980,8 +9463,8 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
ASSERT(!storage || storage->length() >= counter);
break;
}
- case PIXEL_ELEMENTS: {
- int length = PixelArray::cast(elements())->length();
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ int length = ExternalPixelArray::cast(elements())->length();
while (counter < length) {
if (storage != NULL) {
storage->set(counter, Smi::FromInt(counter));
@@ -7997,7 +9480,8 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS: {
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS: {
int length = ExternalArray::cast(elements())->length();
while (counter < length) {
if (storage != NULL) {
@@ -8008,16 +9492,41 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
ASSERT(!storage || storage->length() >= counter);
break;
}
+ case FAST_DOUBLE_ELEMENTS:
+ UNREACHABLE();
+ break;
case DICTIONARY_ELEMENTS: {
if (storage != NULL) {
element_dictionary()->CopyKeysTo(storage, filter);
}
- counter = element_dictionary()->NumberOfElementsFilterAttributes(filter);
+ counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
break;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ int length = parameter_map->length();
+ for (int i = 2; i < length; ++i) {
+ if (!parameter_map->get(i)->IsTheHole()) {
+ if (storage != NULL) storage->set(i - 2, Smi::FromInt(i - 2));
+ ++counter;
+ }
+ }
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ NumberDictionary* dictionary = NumberDictionary::cast(arguments);
+ if (storage != NULL) dictionary->CopyKeysTo(storage, filter);
+ counter += dictionary->NumberOfElementsFilterAttributes(filter);
+ } else {
+ int length = arguments->length();
+ for (int i = 0; i < length; ++i) {
+ if (!arguments->get(i)->IsTheHole()) {
+ if (storage != NULL) storage->set(i, Smi::FromInt(i));
+ ++counter;
+ }
+ }
+ }
break;
+ }
}
if (this->IsJSValue()) {
@@ -8043,51 +9552,6 @@ int JSObject::GetEnumElementKeys(FixedArray* storage) {
}
-bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
- ASSERT(other->IsNumber());
- return key == static_cast<uint32_t>(other->Number());
-}
-
-
-uint32_t NumberDictionaryShape::Hash(uint32_t key) {
- return ComputeIntegerHash(key);
-}
-
-
-uint32_t NumberDictionaryShape::HashForObject(uint32_t key, Object* other) {
- ASSERT(other->IsNumber());
- return ComputeIntegerHash(static_cast<uint32_t>(other->Number()));
-}
-
-
-MaybeObject* NumberDictionaryShape::AsObject(uint32_t key) {
- return Heap::NumberFromUint32(key);
-}
-
-
-bool StringDictionaryShape::IsMatch(String* key, Object* other) {
- // We know that all entries in a hash table had their hash keys created.
- // Use that knowledge to have fast failure.
- if (key->Hash() != String::cast(other)->Hash()) return false;
- return key->Equals(String::cast(other));
-}
-
-
-uint32_t StringDictionaryShape::Hash(String* key) {
- return key->Hash();
-}
-
-
-uint32_t StringDictionaryShape::HashForObject(String* key, Object* other) {
- return String::cast(other)->Hash();
-}
-
-
-MaybeObject* StringDictionaryShape::AsObject(String* key) {
- return key;
-}
-
-
// StringKey simply carries a string object as key.
class StringKey : public HashTableKey {
public:
@@ -8170,7 +9634,7 @@ class StringSharedKey : public HashTableKey {
MUST_USE_RESULT MaybeObject* AsObject() {
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(3);
+ { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(3);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* pair = FixedArray::cast(obj);
@@ -8254,7 +9718,8 @@ class Utf8SymbolKey : public HashTableKey {
MaybeObject* AsObject() {
if (hash_field_ == 0) Hash();
- return Heap::AllocateSymbol(string_, chars_, hash_field_);
+ return Isolate::Current()->heap()->AllocateSymbol(
+ string_, chars_, hash_field_);
}
Vector<const char> string_;
@@ -8321,11 +9786,76 @@ class AsciiSymbolKey : public SequentialSymbolKey<char> {
MaybeObject* AsObject() {
if (hash_field_ == 0) Hash();
- return Heap::AllocateAsciiSymbol(string_, hash_field_);
+ return HEAP->AllocateAsciiSymbol(string_, hash_field_);
}
};
+class SubStringAsciiSymbolKey : public HashTableKey {
+ public:
+ explicit SubStringAsciiSymbolKey(Handle<SeqAsciiString> string,
+ int from,
+ int length)
+ : string_(string), from_(from), length_(length) { }
+
+ uint32_t Hash() {
+ ASSERT(length_ >= 0);
+ ASSERT(from_ + length_ <= string_->length());
+ StringHasher hasher(length_);
+
+ // Very long strings have a trivial hash that doesn't inspect the
+ // string contents.
+ if (hasher.has_trivial_hash()) {
+ hash_field_ = hasher.GetHashField();
+ } else {
+ int i = 0;
+ // Do the iterative array index computation as long as there is a
+ // chance this is an array index.
+ while (i < length_ && hasher.is_array_index()) {
+ hasher.AddCharacter(static_cast<uc32>(
+ string_->SeqAsciiStringGet(i + from_)));
+ i++;
+ }
+
+ // Process the remaining characters without updating the array
+ // index.
+ while (i < length_) {
+ hasher.AddCharacterNoIndex(static_cast<uc32>(
+ string_->SeqAsciiStringGet(i + from_)));
+ i++;
+ }
+ hash_field_ = hasher.GetHashField();
+ }
+
+ uint32_t result = hash_field_ >> String::kHashShift;
+ ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
+ return result;
+ }
+
+
+ uint32_t HashForObject(Object* other) {
+ return String::cast(other)->Hash();
+ }
+
+ bool IsMatch(Object* string) {
+ Vector<const char> chars(string_->GetChars() + from_, length_);
+ return String::cast(string)->IsAsciiEqualTo(chars);
+ }
+
+ MaybeObject* AsObject() {
+ if (hash_field_ == 0) Hash();
+ Vector<const char> chars(string_->GetChars() + from_, length_);
+ return HEAP->AllocateAsciiSymbol(chars, hash_field_);
+ }
+
+ private:
+ Handle<SeqAsciiString> string_;
+ int from_;
+ int length_;
+ uint32_t hash_field_;
+};
+
+
class TwoByteSymbolKey : public SequentialSymbolKey<uc16> {
public:
explicit TwoByteSymbolKey(Vector<const uc16> str)
@@ -8337,7 +9867,7 @@ class TwoByteSymbolKey : public SequentialSymbolKey<uc16> {
MaybeObject* AsObject() {
if (hash_field_ == 0) Hash();
- return Heap::AllocateTwoByteSymbol(string_, hash_field_);
+ return HEAP->AllocateTwoByteSymbol(string_, hash_field_);
}
};
@@ -8345,7 +9875,8 @@ class TwoByteSymbolKey : public SequentialSymbolKey<uc16> {
// SymbolKey carries a string/symbol object as key.
class SymbolKey : public HashTableKey {
public:
- explicit SymbolKey(String* string) : string_(string) { }
+ explicit SymbolKey(String* string)
+ : string_(string) { }
bool IsMatch(Object* string) {
return String::cast(string)->Equals(string_);
@@ -8361,8 +9892,9 @@ class SymbolKey : public HashTableKey {
// Attempt to flatten the string, so that symbols will most often
// be flat strings.
string_ = string_->TryFlattenGetString();
+ Heap* heap = string_->GetHeap();
// Transform string to symbol if possible.
- Map* map = Heap::SymbolMapForString(string_);
+ Map* map = heap->SymbolMapForString(string_);
if (map != NULL) {
string_->set_map(map);
ASSERT(string_->IsSymbol());
@@ -8370,7 +9902,7 @@ class SymbolKey : public HashTableKey {
}
// Otherwise allocate a new symbol.
StringInputBuffer buffer(string_);
- return Heap::AllocateInternalSymbol(&buffer,
+ return heap->AllocateInternalSymbol(&buffer,
string_->length(),
string_->hash_field());
}
@@ -8409,8 +9941,8 @@ MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
}
Object* obj;
- { MaybeObject* maybe_obj =
- Heap::AllocateHashTable(EntryToIndex(capacity), pretenure);
+ { MaybeObject* maybe_obj = Isolate::Current()->heap()->
+ AllocateHashTable(EntryToIndex(capacity), pretenure);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
HashTable::cast(obj)->SetNumberOfElements(0);
@@ -8421,23 +9953,6 @@ MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
// Find entry for key otherwise return kNotFound.
-template<typename Shape, typename Key>
-int HashTable<Shape, Key>::FindEntry(Key key) {
- uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(Shape::Hash(key), capacity);
- uint32_t count = 1;
- // EnsureCapacity will guarantee the hash table is never full.
- while (true) {
- Object* element = KeyAt(entry);
- if (element->IsUndefined()) break; // Empty entry.
- if (!element->IsNull() && Shape::IsMatch(key, element)) return entry;
- entry = NextProbe(entry, count++, capacity);
- }
- return kNotFound;
-}
-
-
-// Find entry for key otherwise return kNotFound.
int StringDictionary::FindEntry(String* key) {
if (!key->IsSymbol()) {
return HashTable<StringDictionaryShape, String*>::FindEntry(key);
@@ -8478,6 +9993,40 @@ int StringDictionary::FindEntry(String* key) {
template<typename Shape, typename Key>
+MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
+ ASSERT(NumberOfElements() < new_table->Capacity());
+
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
+
+ // Copy prefix to new array.
+ for (int i = kPrefixStartIndex;
+ i < kPrefixStartIndex + Shape::kPrefixSize;
+ i++) {
+ new_table->set(i, get(i), mode);
+ }
+
+ // Rehash the elements.
+ int capacity = Capacity();
+ for (int i = 0; i < capacity; i++) {
+ uint32_t from_index = EntryToIndex(i);
+ Object* k = get(from_index);
+ if (IsKey(k)) {
+ uint32_t hash = Shape::HashForObject(key, k);
+ uint32_t insertion_index =
+ EntryToIndex(new_table->FindInsertionEntry(hash));
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ new_table->set(insertion_index + j, get(from_index + j), mode);
+ }
+ }
+ }
+ new_table->SetNumberOfElements(NumberOfElements());
+ new_table->SetNumberOfDeletedElements(0);
+ return new_table;
+}
+
+
+template<typename Shape, typename Key>
MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
@@ -8492,39 +10041,43 @@ MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
const int kMinCapacityForPretenure = 256;
bool pretenure =
- (capacity > kMinCapacityForPretenure) && !Heap::InNewSpace(this);
+ (capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this);
Object* obj;
{ MaybeObject* maybe_obj =
Allocate(nof * 2, pretenure ? TENURED : NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- AssertNoAllocation no_gc;
- HashTable* table = HashTable::cast(obj);
- WriteBarrierMode mode = table->GetWriteBarrierMode(no_gc);
+ return Rehash(HashTable::cast(obj), key);
+}
- // Copy prefix to new array.
- for (int i = kPrefixStartIndex;
- i < kPrefixStartIndex + Shape::kPrefixSize;
- i++) {
- table->set(i, get(i), mode);
- }
- // Rehash the elements.
- for (int i = 0; i < capacity; i++) {
- uint32_t from_index = EntryToIndex(i);
- Object* k = get(from_index);
- if (IsKey(k)) {
- uint32_t hash = Shape::HashForObject(key, k);
- uint32_t insertion_index =
- EntryToIndex(table->FindInsertionEntry(hash));
- for (int j = 0; j < Shape::kEntrySize; j++) {
- table->set(insertion_index + j, get(from_index + j), mode);
- }
- }
+
+template<typename Shape, typename Key>
+MaybeObject* HashTable<Shape, Key>::Shrink(Key key) {
+ int capacity = Capacity();
+ int nof = NumberOfElements();
+
+ // Shrink to fit the number of elements if only a quarter of the
+ // capacity is filled with elements.
+ if (nof > (capacity >> 2)) return this;
+ // Allocate a new dictionary with room for at least the current
+ // number of elements. The allocation method will make sure that
+ // there is extra room in the dictionary for additions. Don't go
+ // lower than room for 16 elements.
+ int at_least_room_for = nof;
+ if (at_least_room_for < 16) return this;
+
+ const int kMinCapacityForPretenure = 256;
+ bool pretenure =
+ (at_least_room_for > kMinCapacityForPretenure) &&
+ !GetHeap()->InNewSpace(this);
+ Object* obj;
+ { MaybeObject* maybe_obj =
+ Allocate(at_least_room_for, pretenure ? TENURED : NOT_TENURED);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- table->SetNumberOfElements(NumberOfElements());
- table->SetNumberOfDeletedElements(0);
- return table;
+
+ return Rehash(HashTable::cast(obj), key);
}
@@ -8579,6 +10132,12 @@ template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty(
template Object* Dictionary<NumberDictionaryShape, uint32_t>::DeleteProperty(
int, JSObject::DeleteMode);
+template MaybeObject* Dictionary<StringDictionaryShape, String*>::Shrink(
+ String*);
+
+template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Shrink(
+ uint32_t);
+
template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo(
FixedArray*);
@@ -8633,7 +10192,7 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
// Allocate space for result before we start mutating the object.
Object* new_double;
- { MaybeObject* maybe_new_double = Heap::AllocateHeapNumber(0.0);
+ { MaybeObject* maybe_new_double = GetHeap()->AllocateHeapNumber(0.0);
if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
}
result_double = HeapNumber::cast(new_double);
@@ -8693,13 +10252,14 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
uint32_t result = pos;
PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
+ Heap* heap = GetHeap();
while (undefs > 0) {
if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
// Adding an entry with the key beyond smi-range requires
// allocation. Bailout.
return Smi::FromInt(-1);
}
- new_dict->AddNumberEntry(pos, Heap::undefined_value(), no_details)->
+ new_dict->AddNumberEntry(pos, heap->undefined_value(), no_details)->
ToObjectUnchecked();
pos++;
undefs--;
@@ -8722,7 +10282,9 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
// If the object is in dictionary mode, it is converted to fast elements
// mode.
MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
- ASSERT(!HasPixelElements() && !HasExternalArrayElements());
+ ASSERT(!HasExternalArrayElements());
+
+ Heap* heap = GetHeap();
if (HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
@@ -8740,10 +10302,10 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
}
Map* new_map = Map::cast(obj);
- PretenureFlag tenure = Heap::InNewSpace(this) ? NOT_TENURED: TENURED;
+ PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED;
Object* new_array;
{ MaybeObject* maybe_new_array =
- Heap::AllocateFixedArray(dict->NumberOfElements(), tenure);
+ heap->AllocateFixedArray(dict->NumberOfElements(), tenure);
if (!maybe_new_array->ToObject(&new_array)) return maybe_new_array;
}
FixedArray* fast_elements = FixedArray::cast(new_array);
@@ -8776,7 +10338,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
// Pessimistically allocate space for return value before
// we start mutating the array.
Object* new_double;
- { MaybeObject* maybe_new_double = Heap::AllocateHeapNumber(0.0);
+ { MaybeObject* maybe_new_double = heap->AllocateHeapNumber(0.0);
if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
}
result_double = HeapNumber::cast(new_double);
@@ -8834,7 +10396,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
}
-Object* PixelArray::SetValue(uint32_t index, Object* value) {
+Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
uint8_t clamped_value = 0;
if (index < static_cast<uint32_t>(length())) {
if (value->IsSmi()) {
@@ -8870,7 +10432,8 @@ Object* PixelArray::SetValue(uint32_t index, Object* value) {
template<typename ExternalArrayClass, typename ValueType>
-static MaybeObject* ExternalArrayIntSetter(ExternalArrayClass* receiver,
+static MaybeObject* ExternalArrayIntSetter(Heap* heap,
+ ExternalArrayClass* receiver,
uint32_t index,
Object* value) {
ValueType cast_value = 0;
@@ -8888,45 +10451,46 @@ static MaybeObject* ExternalArrayIntSetter(ExternalArrayClass* receiver,
}
receiver->set(index, cast_value);
}
- return Heap::NumberFromInt32(cast_value);
+ return heap->NumberFromInt32(cast_value);
}
MaybeObject* ExternalByteArray::SetValue(uint32_t index, Object* value) {
return ExternalArrayIntSetter<ExternalByteArray, int8_t>
- (this, index, value);
+ (GetHeap(), this, index, value);
}
MaybeObject* ExternalUnsignedByteArray::SetValue(uint32_t index,
Object* value) {
return ExternalArrayIntSetter<ExternalUnsignedByteArray, uint8_t>
- (this, index, value);
+ (GetHeap(), this, index, value);
}
MaybeObject* ExternalShortArray::SetValue(uint32_t index,
Object* value) {
return ExternalArrayIntSetter<ExternalShortArray, int16_t>
- (this, index, value);
+ (GetHeap(), this, index, value);
}
MaybeObject* ExternalUnsignedShortArray::SetValue(uint32_t index,
Object* value) {
return ExternalArrayIntSetter<ExternalUnsignedShortArray, uint16_t>
- (this, index, value);
+ (GetHeap(), this, index, value);
}
MaybeObject* ExternalIntArray::SetValue(uint32_t index, Object* value) {
return ExternalArrayIntSetter<ExternalIntArray, int32_t>
- (this, index, value);
+ (GetHeap(), this, index, value);
}
MaybeObject* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
uint32_t cast_value = 0;
+ Heap* heap = GetHeap();
if (index < static_cast<uint32_t>(length())) {
if (value->IsSmi()) {
int int_value = Smi::cast(value)->value();
@@ -8941,12 +10505,13 @@ MaybeObject* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
}
set(index, cast_value);
}
- return Heap::NumberFromUint32(cast_value);
+ return heap->NumberFromUint32(cast_value);
}
MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
float cast_value = 0;
+ Heap* heap = GetHeap();
if (index < static_cast<uint32_t>(length())) {
if (value->IsSmi()) {
int int_value = Smi::cast(value)->value();
@@ -8961,7 +10526,27 @@ MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
}
set(index, cast_value);
}
- return Heap::AllocateHeapNumber(cast_value);
+ return heap->AllocateHeapNumber(cast_value);
+}
+
+
+MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) {
+ double double_value = 0;
+ Heap* heap = GetHeap();
+ if (index < static_cast<uint32_t>(length())) {
+ if (value->IsSmi()) {
+ int int_value = Smi::cast(value)->value();
+ double_value = static_cast<double>(int_value);
+ } else if (value->IsHeapNumber()) {
+ double_value = HeapNumber::cast(value)->value();
+ } else {
+ // Clamp undefined to zero (default). All other types have been
+ // converted to a number type further up in the call chain.
+ ASSERT(value->IsUndefined());
+ }
+ set(index, double_value);
+ }
+ return heap->AllocateHeapNumber(double_value);
}
@@ -8976,9 +10561,10 @@ MaybeObject* GlobalObject::EnsurePropertyCell(String* name) {
ASSERT(!HasFastProperties());
int entry = property_dictionary()->FindEntry(name);
if (entry == StringDictionary::kNotFound) {
+ Heap* heap = GetHeap();
Object* cell;
{ MaybeObject* maybe_cell =
- Heap::AllocateJSGlobalPropertyCell(Heap::the_hole_value());
+ heap->AllocateJSGlobalPropertyCell(heap->the_hole_value());
if (!maybe_cell->ToObject(&cell)) return maybe_cell;
}
PropertyDetails details(NONE, NORMAL);
@@ -9058,6 +10644,7 @@ class TwoCharHashTableKey : public HashTableKey {
UNREACHABLE();
return NULL;
}
+
private:
uint32_t c1_;
uint32_t c2_;
@@ -9108,6 +10695,15 @@ MaybeObject* SymbolTable::LookupAsciiSymbol(Vector<const char> str,
}
+MaybeObject* SymbolTable::LookupSubStringAsciiSymbol(Handle<SeqAsciiString> str,
+ int from,
+ int length,
+ Object** s) {
+ SubStringAsciiSymbolKey key(str, from, length);
+ return LookupKey(&key, s);
+}
+
+
MaybeObject* SymbolTable::LookupTwoByteSymbol(Vector<const uc16> str,
Object** s) {
TwoByteSymbolKey key(str);
@@ -9152,7 +10748,7 @@ MaybeObject* SymbolTable::LookupKey(HashTableKey* key, Object** s) {
Object* CompilationCacheTable::Lookup(String* src) {
StringKey key(src);
int entry = FindEntry(&key);
- if (entry == kNotFound) return Heap::undefined_value();
+ if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
}
@@ -9162,7 +10758,7 @@ Object* CompilationCacheTable::LookupEval(String* src,
StrictModeFlag strict_mode) {
StringSharedKey key(src, context->closure()->shared(), strict_mode);
int entry = FindEntry(&key);
- if (entry == kNotFound) return Heap::undefined_value();
+ if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
}
@@ -9171,7 +10767,7 @@ Object* CompilationCacheTable::LookupRegExp(String* src,
JSRegExp::Flags flags) {
RegExpKey key(src, flags);
int entry = FindEntry(&key);
- if (entry == kNotFound) return Heap::undefined_value();
+ if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
}
@@ -9242,12 +10838,13 @@ MaybeObject* CompilationCacheTable::PutRegExp(String* src,
void CompilationCacheTable::Remove(Object* value) {
+ Object* null_value = GetHeap()->null_value();
for (int entry = 0, size = Capacity(); entry < size; entry++) {
int entry_index = EntryToIndex(entry);
int value_index = entry_index + 1;
if (get(value_index) == value) {
- fast_set(this, entry_index, Heap::null_value());
- fast_set(this, value_index, Heap::null_value());
+ fast_set(this, entry_index, null_value);
+ fast_set(this, value_index, null_value);
ElementRemoved();
}
}
@@ -9292,7 +10889,7 @@ class SymbolsKey : public HashTableKey {
Object* MapCache::Lookup(FixedArray* array) {
SymbolsKey key(array);
int entry = FindEntry(&key);
- if (entry == kNotFound) return Heap::undefined_value();
+ if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
}
@@ -9329,11 +10926,12 @@ MaybeObject* Dictionary<Shape, Key>::Allocate(int at_least_space_for) {
template<typename Shape, typename Key>
MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
+ Heap* heap = Dictionary<Shape, Key>::GetHeap();
int length = HashTable<Shape, Key>::NumberOfElements();
// Allocate and initialize iteration order array.
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(length);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(length);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* iteration_order = FixedArray::cast(obj);
@@ -9342,7 +10940,7 @@ MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
}
// Allocate array with enumeration order.
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(length);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(length);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* enumeration_order = FixedArray::cast(obj);
@@ -9403,15 +11001,16 @@ void NumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
// Do nothing if the interval [from, to) is empty.
if (from >= to) return;
+ Heap* heap = GetHeap();
int removed_entries = 0;
- Object* sentinel = Heap::null_value();
+ Object* sentinel = heap->null_value();
int capacity = Capacity();
for (int i = 0; i < capacity; i++) {
Object* key = KeyAt(i);
if (key->IsNumber()) {
uint32_t number = static_cast<uint32_t>(key->Number());
if (from <= number && number < to) {
- SetEntry(i, sentinel, sentinel, Smi::FromInt(0));
+ SetEntry(i, sentinel, sentinel);
removed_entries++;
}
}
@@ -9425,14 +11024,21 @@ void NumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
template<typename Shape, typename Key>
Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
JSObject::DeleteMode mode) {
+ Heap* heap = Dictionary<Shape, Key>::GetHeap();
PropertyDetails details = DetailsAt(entry);
// Ignore attributes if forcing a deletion.
if (details.IsDontDelete() && mode != JSObject::FORCE_DELETION) {
- return Heap::false_value();
+ return heap->false_value();
}
- SetEntry(entry, Heap::null_value(), Heap::null_value(), Smi::FromInt(0));
+ SetEntry(entry, heap->null_value(), heap->null_value());
HashTable<Shape, Key>::ElementRemoved();
- return Heap::true_value();
+ return heap->true_value();
+}
+
+
+template<typename Shape, typename Key>
+MaybeObject* Dictionary<Shape, Key>::Shrink(Key key) {
+ return HashTable<Shape, Key>::Shrink(key);
}
@@ -9656,7 +11262,8 @@ Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
if (e == value) return k;
}
}
- return Heap::undefined_value();
+ Heap* heap = Dictionary<Shape, Key>::GetHeap();
+ return heap->undefined_value();
}
@@ -9681,6 +11288,8 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
int instance_descriptor_length = 0;
int number_of_fields = 0;
+ Heap* heap = GetHeap();
+
// Compute the length of the instance descriptor.
int capacity = Capacity();
for (int i = 0; i < capacity; i++) {
@@ -9691,7 +11300,7 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
ASSERT(type != FIELD);
instance_descriptor_length++;
if (type == NORMAL &&
- (!value->IsJSFunction() || Heap::InNewSpace(value))) {
+ (!value->IsJSFunction() || heap->InNewSpace(value))) {
number_of_fields += 1;
}
}
@@ -9719,7 +11328,7 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
// Allocate the fixed array for the fields.
Object* fields;
{ MaybeObject* maybe_fields =
- Heap::AllocateFixedArray(number_of_allocated_fields);
+ heap->AllocateFixedArray(number_of_allocated_fields);
if (!maybe_fields->ToObject(&fields)) return maybe_fields;
}
@@ -9732,13 +11341,13 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
Object* value = ValueAt(i);
// Ensure the key is a symbol before writing into the instance descriptor.
Object* key;
- { MaybeObject* maybe_key = Heap::LookupSymbol(String::cast(k));
+ { MaybeObject* maybe_key = heap->LookupSymbol(String::cast(k));
if (!maybe_key->ToObject(&key)) return maybe_key;
}
PropertyDetails details = DetailsAt(i);
PropertyType type = details.type();
- if (value->IsJSFunction() && !Heap::InNewSpace(value)) {
+ if (value->IsJSFunction() && !heap->InNewSpace(value)) {
ConstantFunctionDescriptor d(String::cast(key),
JSFunction::cast(value),
details.attributes(),
@@ -9813,7 +11422,7 @@ Object* DebugInfo::GetBreakPointInfo(int code_position) {
int index = GetBreakPointInfoIndex(code_position);
// Return the break point info object if any.
- if (index == kNoBreakPointInfo) return Heap::undefined_value();
+ if (index == kNoBreakPointInfo) return GetHeap()->undefined_value();
return BreakPointInfo::cast(break_points()->get(index));
}
@@ -9835,6 +11444,7 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
int source_position,
int statement_position,
Handle<Object> break_point_object) {
+ Isolate* isolate = Isolate::Current();
Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position));
if (!break_point_info->IsUndefined()) {
BreakPointInfo::SetBreakPoint(
@@ -9857,8 +11467,9 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
Handle<FixedArray> old_break_points =
Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
Handle<FixedArray> new_break_points =
- Factory::NewFixedArray(old_break_points->length() +
- Debug::kEstimatedNofBreakPointsInFunction);
+ isolate->factory()->NewFixedArray(
+ old_break_points->length() +
+ Debug::kEstimatedNofBreakPointsInFunction);
debug_info->set_break_points(*new_break_points);
for (int i = 0; i < old_break_points->length(); i++) {
@@ -9869,13 +11480,14 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
ASSERT(index != kNoBreakPointInfo);
// Allocate new BreakPointInfo object and set the break point.
- Handle<BreakPointInfo> new_break_point_info =
- Handle<BreakPointInfo>::cast(Factory::NewStruct(BREAK_POINT_INFO_TYPE));
+ Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
+ isolate->factory()->NewStruct(BREAK_POINT_INFO_TYPE));
new_break_point_info->set_code_position(Smi::FromInt(code_position));
new_break_point_info->set_source_position(Smi::FromInt(source_position));
new_break_point_info->
set_statement_position(Smi::FromInt(statement_position));
- new_break_point_info->set_break_point_objects(Heap::undefined_value());
+ new_break_point_info->set_break_point_objects(
+ isolate->heap()->undefined_value());
BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
debug_info->break_points()->set(index, *new_break_point_info);
}
@@ -9885,7 +11497,7 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
Object* DebugInfo::GetBreakPointObjects(int code_position) {
Object* break_point_info = GetBreakPointInfo(code_position);
if (break_point_info->IsUndefined()) {
- return Heap::undefined_value();
+ return GetHeap()->undefined_value();
}
return BreakPointInfo::cast(break_point_info)->break_point_objects();
}
@@ -9908,7 +11520,8 @@ int DebugInfo::GetBreakPointCount() {
Object* DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
Handle<Object> break_point_object) {
- if (debug_info->break_points()->IsUndefined()) return Heap::undefined_value();
+ Heap* heap = debug_info->GetHeap();
+ if (debug_info->break_points()->IsUndefined()) return heap->undefined_value();
for (int i = 0; i < debug_info->break_points()->length(); i++) {
if (!debug_info->break_points()->get(i)->IsUndefined()) {
Handle<BreakPointInfo> break_point_info =
@@ -9920,7 +11533,7 @@ Object* DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
}
}
}
- return Heap::undefined_value();
+ return heap->undefined_value();
}
@@ -9944,12 +11557,14 @@ int DebugInfo::GetBreakPointInfoIndex(int code_position) {
// Remove the specified break point object.
void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
Handle<Object> break_point_object) {
+ Isolate* isolate = Isolate::Current();
// If there are no break points just ignore.
if (break_point_info->break_point_objects()->IsUndefined()) return;
// If there is a single break point clear it if it is the same.
if (!break_point_info->break_point_objects()->IsFixedArray()) {
if (break_point_info->break_point_objects() == *break_point_object) {
- break_point_info->set_break_point_objects(Heap::undefined_value());
+ break_point_info->set_break_point_objects(
+ isolate->heap()->undefined_value());
}
return;
}
@@ -9959,7 +11574,7 @@ void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
Handle<FixedArray>(
FixedArray::cast(break_point_info->break_point_objects()));
Handle<FixedArray> new_array =
- Factory::NewFixedArray(old_array->length() - 1);
+ isolate->factory()->NewFixedArray(old_array->length() - 1);
int found_count = 0;
for (int i = 0; i < old_array->length(); i++) {
if (old_array->get(i) == *break_point_object) {
@@ -9986,7 +11601,7 @@ void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
if (break_point_info->break_point_objects() == *break_point_object) return;
// If there was one break point object before replace with array.
if (!break_point_info->break_point_objects()->IsFixedArray()) {
- Handle<FixedArray> array = Factory::NewFixedArray(2);
+ Handle<FixedArray> array = FACTORY->NewFixedArray(2);
array->set(0, break_point_info->break_point_objects());
array->set(1, *break_point_object);
break_point_info->set_break_point_objects(*array);
@@ -9997,7 +11612,7 @@ void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
Handle<FixedArray>(
FixedArray::cast(break_point_info->break_point_objects()));
Handle<FixedArray> new_array =
- Factory::NewFixedArray(old_array->length() + 1);
+ FACTORY->NewFixedArray(old_array->length() + 1);
for (int i = 0; i < old_array->length(); i++) {
// If the break point was there before just ignore.
if (old_array->get(i) == *break_point_object) return;
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 406895a4e..9deee23fc 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,7 +28,9 @@
#ifndef V8_OBJECTS_H_
#define V8_OBJECTS_H_
+#include "allocation.h"
#include "builtins.h"
+#include "list.h"
#include "smart-pointer.h"
#include "unicode-inl.h"
#if V8_TARGET_ARCH_ARM
@@ -46,19 +48,22 @@
// - Object
// - Smi (immediate small integer)
// - HeapObject (superclass for everything allocated in the heap)
-// - JSObject
-// - JSArray
-// - JSRegExp
-// - JSFunction
-// - GlobalObject
-// - JSGlobalObject
-// - JSBuiltinsObject
-// - JSGlobalProxy
-// - JSValue
-// - JSMessageObject
+// - JSReceiver (suitable for property access)
+// - JSObject
+// - JSArray
+// - JSRegExp
+// - JSFunction
+// - GlobalObject
+// - JSGlobalObject
+// - JSBuiltinsObject
+// - JSGlobalProxy
+// - JSValue
+// - JSMessageObject
+// - JSProxy
+// - JSFunctionProxy
// - ByteArray
-// - PixelArray
// - ExternalArray
+// - ExternalPixelArray
// - ExternalByteArray
// - ExternalUnsignedByteArray
// - ExternalShortArray
@@ -89,7 +94,7 @@
// - Code
// - Map
// - Oddball
-// - Proxy
+// - Foreign
// - SharedFunctionInfo
// - Struct
// - AccessorInfo
@@ -131,25 +136,42 @@ namespace internal {
// They are used both in property dictionaries and instance descriptors.
class PropertyDetails BASE_EMBEDDED {
public:
-
PropertyDetails(PropertyAttributes attributes,
PropertyType type,
int index = 0) {
+ ASSERT(type != EXTERNAL_ARRAY_TRANSITION);
ASSERT(TypeField::is_valid(type));
ASSERT(AttributesField::is_valid(attributes));
- ASSERT(IndexField::is_valid(index));
+ ASSERT(StorageField::is_valid(index));
value_ = TypeField::encode(type)
| AttributesField::encode(attributes)
- | IndexField::encode(index);
+ | StorageField::encode(index);
ASSERT(type == this->type());
ASSERT(attributes == this->attributes());
ASSERT(index == this->index());
}
+ PropertyDetails(PropertyAttributes attributes,
+ PropertyType type,
+ ExternalArrayType array_type) {
+ ASSERT(type == EXTERNAL_ARRAY_TRANSITION);
+ ASSERT(TypeField::is_valid(type));
+ ASSERT(AttributesField::is_valid(attributes));
+ ASSERT(StorageField::is_valid(static_cast<int>(array_type)));
+
+ value_ = TypeField::encode(type)
+ | AttributesField::encode(attributes)
+ | StorageField::encode(static_cast<int>(array_type));
+
+ ASSERT(type == this->type());
+ ASSERT(attributes == this->attributes());
+ ASSERT(array_type == this->array_type());
+ }
+
// Conversion for storing details as Object*.
- inline PropertyDetails(Smi* smi);
+ explicit inline PropertyDetails(Smi* smi);
inline Smi* AsSmi();
PropertyType type() { return TypeField::decode(value_); }
@@ -157,7 +179,8 @@ class PropertyDetails BASE_EMBEDDED {
bool IsTransition() {
PropertyType t = type();
ASSERT(t != INTERCEPTOR);
- return t == MAP_TRANSITION || t == CONSTANT_TRANSITION;
+ return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
+ t == EXTERNAL_ARRAY_TRANSITION;
}
bool IsProperty() {
@@ -166,11 +189,18 @@ class PropertyDetails BASE_EMBEDDED {
PropertyAttributes attributes() { return AttributesField::decode(value_); }
- int index() { return IndexField::decode(value_); }
+ int index() { return StorageField::decode(value_); }
+
+ ExternalArrayType array_type() {
+ ASSERT(type() == EXTERNAL_ARRAY_TRANSITION);
+ return static_cast<ExternalArrayType>(StorageField::decode(value_));
+ }
inline PropertyDetails AsDeleted();
- static bool IsValidIndex(int index) { return IndexField::is_valid(index); }
+ static bool IsValidIndex(int index) {
+ return StorageField::is_valid(index);
+ }
bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
@@ -179,12 +209,13 @@ class PropertyDetails BASE_EMBEDDED {
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
- class TypeField: public BitField<PropertyType, 0, 3> {};
- class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
- class DeletedField: public BitField<uint32_t, 6, 1> {};
- class IndexField: public BitField<uint32_t, 7, 32-7> {};
+ class TypeField: public BitField<PropertyType, 0, 4> {};
+ class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
+ class DeletedField: public BitField<uint32_t, 7, 1> {};
+ class StorageField: public BitField<uint32_t, 8, 32-8> {};
static const int kInitialIndex = 1;
+
private:
uint32_t value_;
};
@@ -260,9 +291,8 @@ static const int kVariableSizeSentinel = 0;
V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
\
V(HEAP_NUMBER_TYPE) \
- V(PROXY_TYPE) \
+ V(FOREIGN_TYPE) \
V(BYTE_ARRAY_TYPE) \
- V(PIXEL_ARRAY_TYPE) \
/* Note: the order of these external array */ \
/* types is relied upon in */ \
/* Object::IsExternalArray(). */ \
@@ -273,6 +303,7 @@ static const int kVariableSizeSentinel = 0;
V(EXTERNAL_INT_ARRAY_TYPE) \
V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE) \
V(EXTERNAL_FLOAT_ARRAY_TYPE) \
+ V(EXTERNAL_PIXEL_ARRAY_TYPE) \
V(FILLER_TYPE) \
\
V(ACCESSOR_INFO_TYPE) \
@@ -285,6 +316,7 @@ static const int kVariableSizeSentinel = 0;
V(TYPE_SWITCH_INFO_TYPE) \
V(SCRIPT_TYPE) \
V(CODE_CACHE_TYPE) \
+ V(POLYMORPHIC_CODE_CACHE_TYPE) \
\
V(FIXED_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
@@ -298,9 +330,11 @@ static const int kVariableSizeSentinel = 0;
V(JS_BUILTINS_OBJECT_TYPE) \
V(JS_GLOBAL_PROXY_TYPE) \
V(JS_ARRAY_TYPE) \
+ V(JS_PROXY_TYPE) \
V(JS_REGEXP_TYPE) \
\
V(JS_FUNCTION_TYPE) \
+ V(JS_FUNCTION_PROXY_TYPE) \
#ifdef ENABLE_DEBUGGER_SUPPORT
#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
@@ -394,7 +428,8 @@ static const int kVariableSizeSentinel = 0;
V(SIGNATURE_INFO, SignatureInfo, signature_info) \
V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
V(SCRIPT, Script, script) \
- V(CODE_CACHE, CodeCache, code_cache)
+ V(CODE_CACHE, CodeCache, code_cache) \
+ V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache)
#ifdef ENABLE_DEBUGGER_SUPPORT
#define STRUCT_LIST_DEBUGGER(V) \
@@ -458,7 +493,6 @@ const uint32_t kShortcutTypeTag = kConsStringTag;
enum InstanceType {
// String types.
- // FIRST_STRING_TYPE
SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
@@ -488,16 +522,18 @@ enum InstanceType {
// "Data", objects that cannot contain non-map-word pointers to heap
// objects.
HEAP_NUMBER_TYPE,
- PROXY_TYPE,
+ FOREIGN_TYPE,
BYTE_ARRAY_TYPE,
- PIXEL_ARRAY_TYPE,
EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
EXTERNAL_SHORT_ARRAY_TYPE,
EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
EXTERNAL_INT_ARRAY_TYPE,
EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
- EXTERNAL_FLOAT_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
+ EXTERNAL_FLOAT_ARRAY_TYPE,
+ EXTERNAL_DOUBLE_ARRAY_TYPE,
+ EXTERNAL_PIXEL_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
+ FIXED_DOUBLE_ARRAY_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
@@ -511,6 +547,7 @@ enum InstanceType {
TYPE_SWITCH_INFO_TYPE,
SCRIPT_TYPE,
CODE_CACHE_TYPE,
+ POLYMORPHIC_CODE_CACHE_TYPE,
// The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
// is defined. However as include/v8.h contain some of the instance type
// constants always having them avoids them getting different numbers
@@ -523,44 +560,54 @@ enum InstanceType {
JS_MESSAGE_OBJECT_TYPE,
- JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
+ JS_VALUE_TYPE, // FIRST_NON_CALLABLE_OBJECT_TYPE, FIRST_JS_RECEIVER_TYPE
JS_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JS_GLOBAL_OBJECT_TYPE,
JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
+ JS_PROXY_TYPE,
- JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE, FIRST_FUNCTION_CLASS_TYPE
+ JS_REGEXP_TYPE, // LAST_NONCALLABLE_SPEC_OBJECT_TYPE
- JS_FUNCTION_TYPE,
+ JS_FUNCTION_TYPE, // FIRST_CALLABLE_SPEC_OBJECT_TYPE
+ JS_FUNCTION_PROXY_TYPE, // LAST_CALLABLE_SPEC_OBJECT_TYPE
// Pseudo-types
FIRST_TYPE = 0x0,
- LAST_TYPE = JS_FUNCTION_TYPE,
+ LAST_TYPE = JS_FUNCTION_PROXY_TYPE,
INVALID_TYPE = FIRST_TYPE - 1,
FIRST_NONSTRING_TYPE = MAP_TYPE,
- FIRST_STRING_TYPE = FIRST_TYPE,
- LAST_STRING_TYPE = FIRST_NONSTRING_TYPE - 1,
// Boundaries for testing for an external array.
FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
- LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_FLOAT_ARRAY_TYPE,
+ LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_PIXEL_ARRAY_TYPE,
// Boundary for promotion to old data space/old pointer space.
LAST_DATA_TYPE = FILLER_TYPE,
- // Boundaries for testing the type is a JavaScript "object". Note that
- // function objects are not counted as objects, even though they are
- // implemented as such; only values whose typeof is "object" are included.
- FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
- LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE,
- // RegExp objects have [[Class]] "function" because they are callable.
- // All types from this type and above are objects with [[Class]] "function".
- FIRST_FUNCTION_CLASS_TYPE = JS_REGEXP_TYPE
+ // Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
+ // Note that there is no range for JSObject or JSProxy, since their subtypes
+ // are not continuous in this enum! The enum ranges instead reflect the
+ // external class names, where proxies are treated as either ordinary objects,
+ // or functions.
+ FIRST_JS_RECEIVER_TYPE = JS_VALUE_TYPE,
+ LAST_JS_RECEIVER_TYPE = LAST_TYPE,
+ // Boundaries for testing the types for which typeof is "object".
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_VALUE_TYPE,
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
+ // Boundaries for testing the types for which typeof is "function".
+ FIRST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_TYPE,
+ LAST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_PROXY_TYPE,
+ // Boundaries for testing whether the type is a JavaScript object.
+ FIRST_SPEC_OBJECT_TYPE = FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ LAST_SPEC_OBJECT_TYPE = LAST_CALLABLE_SPEC_OBJECT_TYPE
};
+static const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE -
+ FIRST_EXTERNAL_ARRAY_TYPE + 1;
STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
-STATIC_CHECK(PROXY_TYPE == Internals::kProxyType);
+STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType);
enum CompareResult {
@@ -585,7 +632,6 @@ enum CompareResult {
class StringStream;
class ObjectVisitor;
-class Failure;
struct ValueInfo : public Malloced {
ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -599,6 +645,7 @@ struct ValueInfo : public Malloced {
// A template-ized version of the IsXXX functions.
template <class C> static inline bool Is(Object* obj);
+class Failure;
class MaybeObject BASE_EMBEDDED {
public:
@@ -625,6 +672,13 @@ class MaybeObject BASE_EMBEDDED {
return reinterpret_cast<Object*>(this);
}
+ template<typename T>
+ inline bool To(T** obj) {
+ if (IsFailure()) return false;
+ *obj = T::cast(reinterpret_cast<Object*>(this));
+ return true;
+ }
+
#ifdef OBJECT_PRINT
// Prints this object with details.
inline void Print() {
@@ -660,7 +714,6 @@ class MaybeObject BASE_EMBEDDED {
V(SeqTwoByteString) \
V(SeqAsciiString) \
\
- V(PixelArray) \
V(ExternalArray) \
V(ExternalByteArray) \
V(ExternalUnsignedByteArray) \
@@ -669,7 +722,10 @@ class MaybeObject BASE_EMBEDDED {
V(ExternalIntArray) \
V(ExternalUnsignedIntArray) \
V(ExternalFloatArray) \
+ V(ExternalDoubleArray) \
+ V(ExternalPixelArray) \
V(ByteArray) \
+ V(JSReceiver) \
V(JSObject) \
V(JSContextExtensionObject) \
V(Map) \
@@ -677,8 +733,8 @@ class MaybeObject BASE_EMBEDDED {
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
V(FixedArray) \
+ V(FixedDoubleArray) \
V(Context) \
- V(CatchContext) \
V(GlobalContext) \
V(JSFunction) \
V(Code) \
@@ -687,9 +743,11 @@ class MaybeObject BASE_EMBEDDED {
V(JSValue) \
V(JSMessageObject) \
V(StringWrapper) \
- V(Proxy) \
+ V(Foreign) \
V(Boolean) \
V(JSArray) \
+ V(JSProxy) \
+ V(JSFunctionProxy) \
V(JSRegExp) \
V(HashTable) \
V(Dictionary) \
@@ -698,6 +756,7 @@ class MaybeObject BASE_EMBEDDED {
V(NormalizedMapCache) \
V(CompilationCacheTable) \
V(CodeCacheHashTable) \
+ V(PolymorphicCodeCacheHashTable) \
V(MapCache) \
V(Primitive) \
V(GlobalObject) \
@@ -734,6 +793,7 @@ class Object : public MaybeObject {
// Oddball testing.
INLINE(bool IsUndefined());
INLINE(bool IsNull());
+ INLINE(bool IsTheHole()); // Shadows MaybeObject's implementation.
INLINE(bool IsTrue());
INLINE(bool IsFalse());
inline bool IsArgumentsMarker();
@@ -741,6 +801,10 @@ class Object : public MaybeObject {
// Extract the number.
inline double Number();
+ // Returns true if the object is of the correct type to be used as a
+ // implementation of a JSObject's elements.
+ inline bool HasValidElements();
+
inline bool HasSpecificClassOf(String* name);
MUST_USE_RESULT MaybeObject* ToObject(); // ECMA-262 9.9.
@@ -773,6 +837,9 @@ class Object : public MaybeObject {
Object* structure,
String* name,
Object* holder);
+ MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(Object* receiver,
+ String* name,
+ Object* handler);
MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
JSFunction* getter);
@@ -890,7 +957,7 @@ class Failure: public MaybeObject {
enum Type {
RETRY_AFTER_GC = 0,
EXCEPTION = 1, // Returning this marker tells the real exception
- // is in Top::pending_exception.
+ // is in Isolate::pending_exception.
INTERNAL_ERROR = 2,
OUT_OF_MEMORY_EXCEPTION = 3
};
@@ -1078,6 +1145,14 @@ class HeapObject: public Object {
inline MapWord map_word();
inline void set_map_word(MapWord map_word);
+ // The Heap the object was allocated in. Used also to access Isolate.
+ // This method can not be used during GC, it ASSERTs this.
+ inline Heap* GetHeap();
+ // Convenience method to get current isolate. This method can be
+ // accessed only when its result is the same as
+ // Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
+ inline Isolate* GetIsolate();
+
// Converts an address to a HeapObject pointer.
static inline HeapObject* FromAddress(Address address);
@@ -1261,14 +1336,9 @@ class HeapNumber: public HeapObject {
// is a mixture of sign, exponent and mantissa. Our current platforms are all
// little endian apart from non-EABI arm which is little endian with big
// endian floating point word ordering!
-#if !defined(V8_HOST_ARCH_ARM) || defined(USE_ARM_EABI)
static const int kMantissaOffset = kValueOffset;
static const int kExponentOffset = kValueOffset + 4;
-#else
- static const int kMantissaOffset = kValueOffset + 4;
- static const int kExponentOffset = kValueOffset;
-# define BIG_ENDIAN_FLOATING_POINT 1
-#endif
+
static const int kSize = kValueOffset + kDoubleSize;
static const uint32_t kSignMask = 0x80000000u;
static const uint32_t kExponentMask = 0x7ff00000u;
@@ -1285,11 +1355,72 @@ class HeapNumber: public HeapObject {
};
+// JSReceiver includes types on which properties can be defined, i.e.,
+// JSObject and JSProxy.
+class JSReceiver: public HeapObject {
+ public:
+ // Casting.
+ static inline JSReceiver* cast(Object* obj);
+
+ // Can cause GC.
+ MUST_USE_RESULT MaybeObject* SetProperty(String* key,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
+ MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
+ String* key,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
+
+ // Returns the class name ([[Class]] property in the specification).
+ String* class_name();
+
+ // Returns the constructor name (the name (possibly, inferred name) of the
+ // function that was used to instantiate the object).
+ String* constructor_name();
+
+ inline PropertyAttributes GetPropertyAttribute(String* name);
+ PropertyAttributes GetPropertyAttributeWithReceiver(JSReceiver* receiver,
+ String* name);
+ PropertyAttributes GetLocalPropertyAttribute(String* name);
+
+ // Can cause a GC.
+ bool HasProperty(String* name) {
+ return GetPropertyAttribute(name) != ABSENT;
+ }
+
+ // Can cause a GC.
+ bool HasLocalProperty(String* name) {
+ return GetLocalPropertyAttribute(name) != ABSENT;
+ }
+
+ // Return the object's prototype (might be Heap::null_value()).
+ inline Object* GetPrototype();
+
+ // Set the object's prototype (only JSReceiver and null are allowed).
+ MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
+ bool skip_hidden_prototypes);
+
+ // Lookup a property. If found, the result is valid and has
+ // detailed information.
+ void LocalLookup(String* name, LookupResult* result);
+ void Lookup(String* name, LookupResult* result);
+
+ private:
+ PropertyAttributes GetPropertyAttribute(JSReceiver* receiver,
+ LookupResult* result,
+ String* name,
+ bool continue_search);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
+};
+
// The JSObject describes real heap allocated JavaScript objects with
// properties.
// Note that the map of JSObject changes during execution to enable inline
// caching.
-class JSObject: public HeapObject {
+class JSObject: public JSReceiver {
public:
enum DeleteMode {
NORMAL_DELETION,
@@ -1298,20 +1429,37 @@ class JSObject: public HeapObject {
};
enum ElementsKind {
- // The only "fast" kind.
+ // The "fast" kind for tagged values. Must be first to make it possible
+ // to efficiently check maps if they have fast elements.
FAST_ELEMENTS,
- // All the kinds below are "slow".
+
+ // The "fast" kind for unwrapped, non-tagged double values.
+ FAST_DOUBLE_ELEMENTS,
+
+ // The "slow" kind.
DICTIONARY_ELEMENTS,
- PIXEL_ELEMENTS,
+ NON_STRICT_ARGUMENTS_ELEMENTS,
+ // The "fast" kind for external arrays
EXTERNAL_BYTE_ELEMENTS,
EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
EXTERNAL_SHORT_ELEMENTS,
EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
EXTERNAL_INT_ELEMENTS,
EXTERNAL_UNSIGNED_INT_ELEMENTS,
- EXTERNAL_FLOAT_ELEMENTS
+ EXTERNAL_FLOAT_ELEMENTS,
+ EXTERNAL_DOUBLE_ELEMENTS,
+ EXTERNAL_PIXEL_ELEMENTS,
+
+ // Derived constants from ElementsKind
+ FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
+ LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
+ FIRST_ELEMENTS_KIND = FAST_ELEMENTS,
+ LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
};
+ static const int kElementsKindCount =
+ LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
+
// [properties]: Backing storage for properties.
// properties is a FixedArray in the fast case and a Dictionary in the
// slow case.
@@ -1328,21 +1476,24 @@ class JSObject: public HeapObject {
//
// In the fast mode elements is a FixedArray and so each element can
// be quickly accessed. This fact is used in the generated code. The
- // elements array can have one of the two maps in this mode:
- // fixed_array_map or fixed_cow_array_map (for copy-on-write
- // arrays). In the latter case the elements array may be shared by a
- // few objects and so before writing to any element the array must
- // be copied. Use EnsureWritableFastElements in this case.
+ // elements array can have one of three maps in this mode:
+ // fixed_array_map, non_strict_arguments_elements_map or
+ // fixed_cow_array_map (for copy-on-write arrays). In the latter case
+ // the elements array may be shared by a few objects and so before
+ // writing to any element the array must be copied. Use
+ // EnsureWritableFastElements in this case.
//
- // In the slow mode elements is either a NumberDictionary or a
- // PixelArray or an ExternalArray.
+ // In the slow mode the elements is either a NumberDictionary, an
+ // ExternalArray, or a FixedArray parameter map for a (non-strict)
+ // arguments object.
DECL_ACCESSORS(elements, HeapObject)
inline void initialize_elements();
MUST_USE_RESULT inline MaybeObject* ResetElements();
inline ElementsKind GetElementsKind();
inline bool HasFastElements();
+ inline bool HasFastDoubleElements();
inline bool HasDictionaryElements();
- inline bool HasPixelElements();
+ inline bool HasExternalPixelElements();
inline bool HasExternalArrayElements();
inline bool HasExternalByteElements();
inline bool HasExternalUnsignedByteElements();
@@ -1351,9 +1502,13 @@ class JSObject: public HeapObject {
inline bool HasExternalIntElements();
inline bool HasExternalUnsignedIntElements();
inline bool HasExternalFloatElements();
+ inline bool HasExternalDoubleElements();
+ bool HasFastArgumentsElements();
+ bool HasDictionaryArgumentsElements();
inline bool AllowsSetElementsLength();
inline NumberDictionary* element_dictionary(); // Gets slow elements.
- // Requires: this->HasFastElements().
+
+ // Requires: HasFastElements().
MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
// Collects elements starting at index 0.
@@ -1364,36 +1519,35 @@ class JSObject: public HeapObject {
// a dictionary, and it will stay a dictionary.
MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
- MUST_USE_RESULT MaybeObject* SetProperty(String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict);
- MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
+ MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
String* key,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict);
+ StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck(
LookupResult* result,
String* name,
Object* value,
- bool check_prototype);
- MUST_USE_RESULT MaybeObject* SetPropertyWithCallback(Object* structure,
- String* name,
- Object* value,
- JSObject* holder);
+ bool check_prototype,
+ StrictModeFlag strict_mode);
+ MUST_USE_RESULT MaybeObject* SetPropertyWithCallback(
+ Object* structure,
+ String* name,
+ Object* value,
+ JSObject* holder,
+ StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSFunction* setter,
Object* value);
MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
String* name,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict);
+ StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor(
String* name,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict);
+ StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
String* key,
Object* value,
@@ -1417,21 +1571,22 @@ class JSObject: public HeapObject {
MUST_USE_RESULT MaybeObject* DeleteNormalizedProperty(String* name,
DeleteMode mode);
- // Returns the class name ([[Class]] property in the specification).
- String* class_name();
-
- // Returns the constructor name (the name (possibly, inferred name) of the
- // function that was used to instantiate the object).
- String* constructor_name();
-
// Retrieve interceptors.
InterceptorInfo* GetNamedInterceptor();
InterceptorInfo* GetIndexedInterceptor();
- inline PropertyAttributes GetPropertyAttribute(String* name);
- PropertyAttributes GetPropertyAttributeWithReceiver(JSObject* receiver,
- String* name);
- PropertyAttributes GetLocalPropertyAttribute(String* name);
+ // Used from JSReceiver.
+ PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
+ String* name,
+ bool continue_search);
+ PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
+ String* name,
+ bool continue_search);
+ PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
+ Object* receiver,
+ LookupResult* result,
+ String* name,
+ bool continue_search);
MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
bool is_getter,
@@ -1448,14 +1603,14 @@ class JSObject: public HeapObject {
String* name,
PropertyAttributes* attributes);
MaybeObject* GetPropertyWithInterceptor(
- JSObject* receiver,
+ JSReceiver* receiver,
String* name,
PropertyAttributes* attributes);
MaybeObject* GetPropertyPostInterceptor(
- JSObject* receiver,
+ JSReceiver* receiver,
String* name,
PropertyAttributes* attributes);
- MaybeObject* GetLocalPropertyPostInterceptor(JSObject* receiver,
+ MaybeObject* GetLocalPropertyPostInterceptor(JSReceiver* receiver,
String* name,
PropertyAttributes* attributes);
@@ -1463,15 +1618,6 @@ class JSObject: public HeapObject {
// been modified since it was created. May give false positives.
bool IsDirty();
- bool HasProperty(String* name) {
- return GetPropertyAttribute(name) != ABSENT;
- }
-
- // Can cause a GC if it hits an interceptor.
- bool HasLocalProperty(String* name) {
- return GetLocalPropertyAttribute(name) != ABSENT;
- }
-
// If the receiver is a JSGlobalProxy this method will return its prototype,
// otherwise the result is the receiver itself.
inline Object* BypassGlobalProxy();
@@ -1508,17 +1654,19 @@ class JSObject: public HeapObject {
// storage would. In that case the JSObject should have fast
// elements.
bool ShouldConvertToFastElements();
-
- // Return the object's prototype (might be Heap::null_value()).
- inline Object* GetPrototype();
-
- // Set the object's prototype (only JSObject and null are allowed).
- MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
- bool skip_hidden_prototypes);
+ // Returns true if the elements of JSObject contains only values that can be
+ // represented in a FixedDoubleArray.
+ bool ShouldConvertToFastDoubleElements();
// Tells whether the index'th element is present.
inline bool HasElement(uint32_t index);
- bool HasElementWithReceiver(JSObject* receiver, uint32_t index);
+ bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
+
+ // Computes the new capacity when expanding the elements of a JSObject.
+ static int NewElementsCapacity(int old_capacity) {
+ // (old_capacity + 50%) + 16
+ return old_capacity + (old_capacity >> 1) + 16;
+ }
// Tells whether the index'th element is present and how it is stored.
enum LocalElementType {
@@ -1540,18 +1688,30 @@ class JSObject: public HeapObject {
LocalElementType HasLocalElement(uint32_t index);
- bool HasElementWithInterceptor(JSObject* receiver, uint32_t index);
- bool HasElementPostInterceptor(JSObject* receiver, uint32_t index);
+ bool HasElementWithInterceptor(JSReceiver* receiver, uint32_t index);
+ bool HasElementPostInterceptor(JSReceiver* receiver, uint32_t index);
MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
Object* value,
- bool check_prototype = true);
+ StrictModeFlag strict_mode,
+ bool check_prototype);
+ MUST_USE_RESULT MaybeObject* SetDictionaryElement(uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode,
+ bool check_prototype);
+
+ MUST_USE_RESULT MaybeObject* SetFastDoubleElement(
+ uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode,
+ bool check_prototype = true);
// Set the index'th array element.
// A Failure object is returned if GC is needed.
MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
Object* value,
- bool check_prototype = true);
+ StrictModeFlag strict_mode,
+ bool check_prototype);
// Returns the index'th element.
// The undefined object if index is out of bounds.
@@ -1563,8 +1723,14 @@ class JSObject: public HeapObject {
// failed.
MaybeObject* GetExternalElement(uint32_t index);
+ // Replace the elements' backing store with fast elements of the given
+ // capacity. Update the length for JSArrays. Returns the new backing
+ // store.
MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
int length);
+ MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
+ int capacity,
+ int length);
MUST_USE_RESULT MaybeObject* SetSlowElements(Object* length);
// Lookup interceptors are used for handling properties controlled by host
@@ -1585,13 +1751,13 @@ class JSObject: public HeapObject {
inline int GetHeaderSize();
inline int GetInternalFieldCount();
+ inline int GetInternalFieldOffset(int index);
inline Object* GetInternalField(int index);
inline void SetInternalField(int index, Object* value);
// Lookup a property. If found, the result is valid and has
// detailed information.
void LocalLookup(String* name, LookupResult* result);
- void Lookup(String* name, LookupResult* result);
// The following lookup functions skip interceptors.
void LocalLookupRealNamedProperty(String* name, LookupResult* result);
@@ -1599,7 +1765,7 @@ class JSObject: public HeapObject {
void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
- uint32_t index, Object* value, bool* found);
+ uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode);
void LookupCallback(String* name, LookupResult* result);
// Returns the number of properties on this object filtering out properties
@@ -1679,7 +1845,8 @@ class JSObject: public HeapObject {
// Add a property to an object.
MUST_USE_RESULT MaybeObject* AddProperty(String* name,
Object* value,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
@@ -1688,6 +1855,9 @@ class JSObject: public HeapObject {
MUST_USE_RESULT MaybeObject* NormalizeProperties(
PropertyNormalizationMode mode,
int expected_additional_properties);
+
+ // Convert and update the elements backing store to be a NumberDictionary
+ // dictionary. Returns the backing after conversion.
MUST_USE_RESULT MaybeObject* NormalizeElements();
MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(String* name, Code* code);
@@ -1702,6 +1872,7 @@ class JSObject: public HeapObject {
inline Object* FastPropertyAtPut(int index, Object* value);
// Access to in object properties.
+ inline int GetInObjectPropertyOffset(int index);
inline Object* InObjectPropertyAt(int index);
inline Object* InObjectPropertyAtPut(int index,
Object* value,
@@ -1809,13 +1980,17 @@ class JSObject: public HeapObject {
MaybeObject* SetElementWithCallback(Object* structure,
uint32_t index,
Object* value,
- JSObject* holder);
- MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(uint32_t index,
- Object* value,
- bool check_prototype);
+ JSObject* holder,
+ StrictModeFlag strict_mode);
+ MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
+ uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode,
+ bool check_prototype);
MUST_USE_RESULT MaybeObject* SetElementWithoutInterceptor(
uint32_t index,
Object* value,
+ StrictModeFlag strict_mode,
bool check_prototype);
MaybeObject* GetElementPostInterceptor(Object* receiver, uint32_t index);
@@ -1828,21 +2003,16 @@ class JSObject: public HeapObject {
DeleteMode mode);
MUST_USE_RESULT MaybeObject* DeleteElementWithInterceptor(uint32_t index);
- PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
- String* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
- String* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- String* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttribute(JSObject* receiver,
- LookupResult* result,
- String* name,
- bool continue_search);
+ MUST_USE_RESULT MaybeObject* DeleteFastElement(uint32_t index);
+ MUST_USE_RESULT MaybeObject* DeleteDictionaryElement(uint32_t index,
+ DeleteMode mode);
+
+ bool ReferencesObjectFromElements(FixedArray* elements,
+ ElementsKind kind,
+ Object* object);
+ bool HasElementInElements(FixedArray* elements,
+ ElementsKind kind,
+ uint32_t index);
// Returns true if most of the elements backing storage is used.
bool HasDenseElements();
@@ -1866,13 +2036,26 @@ class JSObject: public HeapObject {
};
-// FixedArray describes fixed-sized arrays with element type Object*.
-class FixedArray: public HeapObject {
+// Common superclass for FixedArrays that allow implementations to share
+// common accessors and some code paths.
+class FixedArrayBase: public HeapObject {
public:
// [length]: length of the array.
inline int length();
inline void set_length(int value);
+ inline static FixedArrayBase* cast(Object* object);
+
+ // Layout description.
+ // Length is smi tagged when it is stored.
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kPointerSize;
+};
+
+
+// FixedArray describes fixed-sized arrays with element type Object*.
+class FixedArray: public FixedArrayBase {
+ public:
// Setter and getter for elements.
inline Object* get(int index);
// Setter that uses write barrier.
@@ -1885,13 +2068,18 @@ class FixedArray: public HeapObject {
// Setters for frequently used oddballs located in old space.
inline void set_undefined(int index);
+ // TODO(isolates): duplicate.
+ inline void set_undefined(Heap* heap, int index);
inline void set_null(int index);
+ // TODO(isolates): duplicate.
+ inline void set_null(Heap* heap, int index);
inline void set_the_hole(int index);
// Setters with less debug checks for the GC to use.
inline void set_unchecked(int index, Smi* value);
- inline void set_null_unchecked(int index);
- inline void set_unchecked(int index, Object* value, WriteBarrierMode mode);
+ inline void set_null_unchecked(Heap* heap, int index);
+ inline void set_unchecked(Heap* heap, int index, Object* value,
+ WriteBarrierMode mode);
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
@@ -1918,11 +2106,6 @@ class FixedArray: public HeapObject {
// Casting.
static inline FixedArray* cast(Object* obj);
- // Layout description.
- // Length is smi tagged when it is stored.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kLengthOffset + kPointerSize;
-
// Maximal allowed size, in bytes, of a single FixedArray.
// Prevents overflowing size computations, as well as extreme memory
// consumption.
@@ -1970,23 +2153,96 @@ class FixedArray: public HeapObject {
};
+// FixedDoubleArray describes fixed-sized arrays with element type double.
+class FixedDoubleArray: public FixedArrayBase {
+ public:
+ inline void Initialize(FixedArray* from);
+ inline void Initialize(FixedDoubleArray* from);
+ inline void Initialize(NumberDictionary* from);
+
+ // Setter and getter for elements.
+ inline double get(int index);
+ inline void set(int index, double value);
+ inline void set_the_hole(int index);
+
+ // Checking for the hole.
+ inline bool is_the_hole(int index);
+
+ // Garbage collection support.
+ inline static int SizeFor(int length) {
+ return kHeaderSize + length * kDoubleSize;
+ }
+
+ // The following can't be declared inline as const static
+ // because they're 64-bit.
+ static uint64_t kCanonicalNonHoleNanLower32;
+ static uint64_t kCanonicalNonHoleNanInt64;
+ static uint64_t kHoleNanInt64;
+
+ inline static bool is_the_hole_nan(double value) {
+ return BitCast<uint64_t, double>(value) == kHoleNanInt64;
+ }
+
+ inline static double hole_nan_as_double() {
+ return BitCast<double, uint64_t>(kHoleNanInt64);
+ }
+
+ inline static double canonical_not_the_hole_nan_as_double() {
+ return BitCast<double, uint64_t>(kCanonicalNonHoleNanInt64);
+ }
+
+ // Casting.
+ static inline FixedDoubleArray* cast(Object* obj);
+
+ // Maximal allowed size, in bytes, of a single FixedDoubleArray.
+ // Prevents overflowing size computations, as well as extreme memory
+ // consumption.
+ static const int kMaxSize = 512 * MB;
+ // Maximally allowed length of a FixedArray.
+ static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
+
+ // Dispatched behavior.
+#ifdef OBJECT_PRINT
+ inline void FixedDoubleArrayPrint() {
+ FixedDoubleArrayPrint(stdout);
+ }
+ void FixedDoubleArrayPrint(FILE* out);
+#endif
+
+#ifdef DEBUG
+ void FixedDoubleArrayVerify();
+#endif
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
+};
+
+
// DescriptorArrays are fixed arrays used to hold instance descriptors.
// The format of the these objects is:
-// [0]: point to a fixed array with (value, detail) pairs.
-// [1]: next enumeration index (Smi), or pointer to small fixed array:
+// TODO(1399): It should be possible to make room for bit_field3 in the map
+// without overloading the instance descriptors field in the map
+// (and storing it in the DescriptorArray when the map has one).
+// [0]: storage for bit_field3 for Map owning this object (Smi)
+// [1]: point to a fixed array with (value, detail) pairs.
+// [2]: next enumeration index (Smi), or pointer to small fixed array:
// [0]: next enumeration index (Smi)
// [1]: pointer to fixed array with enum cache
-// [2]: first key
+// [3]: first key
// [length() - 1]: last key
//
class DescriptorArray: public FixedArray {
public:
- // Is this the singleton empty_descriptor_array?
+ // Returns true for both shared empty_descriptor_array and for smis, which the
+ // map uses to encode additional bit fields when the descriptor array is not
+ // yet used.
inline bool IsEmpty();
// Returns the number of descriptors in the array.
int number_of_descriptors() {
- return IsEmpty() ? 0 : length() - kFirstIndex;
+ ASSERT(length() > kFirstIndex || IsEmpty());
+ int len = length();
+ return len <= kFirstIndex ? 0 : len - kFirstIndex;
}
int NextEnumerationIndex() {
@@ -2016,6 +2272,12 @@ class DescriptorArray: public FixedArray {
return bridge->get(kEnumCacheBridgeCacheIndex);
}
+ // TODO(1399): It should be possible to make room for bit_field3 in the map
+ // without overloading the instance descriptors field in the map
+ // (and storing it in the DescriptorArray when the map has one).
+ inline int bit_field3_storage();
+ inline void set_bit_field3_storage(int value);
+
// Initialize or change the enum cache,
// using the supplied storage for the small "bridge".
void SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache);
@@ -2094,9 +2356,10 @@ class DescriptorArray: public FixedArray {
// Constant for denoting key was not found.
static const int kNotFound = -1;
- static const int kContentArrayIndex = 0;
- static const int kEnumerationIndexIndex = 1;
- static const int kFirstIndex = 2;
+ static const int kBitField3StorageIndex = 0;
+ static const int kContentArrayIndex = 1;
+ static const int kEnumerationIndexIndex = 2;
+ static const int kFirstIndex = 3;
// The length of the "bridge" to the enum cache.
static const int kEnumCacheBridgeLength = 2;
@@ -2104,7 +2367,8 @@ class DescriptorArray: public FixedArray {
static const int kEnumCacheBridgeCacheIndex = 1;
// Layout description.
- static const int kContentArrayOffset = FixedArray::kHeaderSize;
+ static const int kBitField3StorageOffset = FixedArray::kHeaderSize;
+ static const int kContentArrayOffset = kBitField3StorageOffset + kPointerSize;
static const int kEnumerationIndexOffset = kContentArrayOffset + kPointerSize;
static const int kFirstOffset = kEnumerationIndexOffset + kPointerSize;
@@ -2278,10 +2542,10 @@ class HashTable: public FixedArray {
(FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize;
// Find entry for key otherwise return kNotFound.
- int FindEntry(Key key);
+ inline int FindEntry(Key key);
+ int FindEntry(Isolate* isolate, Key key);
protected:
-
// Find the entry at which to insert element with the given key that
// has the given hash value.
uint32_t FindInsertionEntry(uint32_t hash);
@@ -2326,6 +2590,12 @@ class HashTable: public FixedArray {
return (last + number) & (size - 1);
}
+ // Rehashes this hash-table into the new table.
+ MUST_USE_RESULT MaybeObject* Rehash(HashTable* new_table, Key key);
+
+ // Attempt to shrink hash table after removal of key.
+ MUST_USE_RESULT MaybeObject* Shrink(Key key);
+
// Ensure enough space for n additional elements.
MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
};
@@ -2350,16 +2620,16 @@ class HashTableKey {
class SymbolTableShape {
public:
- static bool IsMatch(HashTableKey* key, Object* value) {
+ static inline bool IsMatch(HashTableKey* key, Object* value) {
return key->IsMatch(value);
}
- static uint32_t Hash(HashTableKey* key) {
+ static inline uint32_t Hash(HashTableKey* key) {
return key->Hash();
}
- static uint32_t HashForObject(HashTableKey* key, Object* object) {
+ static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
return key->HashForObject(object);
}
- MUST_USE_RESULT static MaybeObject* AsObject(HashTableKey* key) {
+ MUST_USE_RESULT static inline MaybeObject* AsObject(HashTableKey* key) {
return key->AsObject();
}
@@ -2367,6 +2637,8 @@ class SymbolTableShape {
static const int kEntrySize = 1;
};
+class SeqAsciiString;
+
// SymbolTable.
//
// No special elements in the prefix and the element size is 1
@@ -2380,6 +2652,11 @@ class SymbolTable: public HashTable<SymbolTableShape, HashTableKey*> {
MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str, Object** s);
MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str,
Object** s);
+ MUST_USE_RESULT MaybeObject* LookupSubStringAsciiSymbol(
+ Handle<SeqAsciiString> str,
+ int from,
+ int length,
+ Object** s);
MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str,
Object** s);
MUST_USE_RESULT MaybeObject* LookupString(String* key, Object** s);
@@ -2402,18 +2679,18 @@ class SymbolTable: public HashTable<SymbolTableShape, HashTableKey*> {
class MapCacheShape {
public:
- static bool IsMatch(HashTableKey* key, Object* value) {
+ static inline bool IsMatch(HashTableKey* key, Object* value) {
return key->IsMatch(value);
}
- static uint32_t Hash(HashTableKey* key) {
+ static inline uint32_t Hash(HashTableKey* key) {
return key->Hash();
}
- static uint32_t HashForObject(HashTableKey* key, Object* object) {
+ static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
return key->HashForObject(object);
}
- MUST_USE_RESULT static MaybeObject* AsObject(HashTableKey* key) {
+ MUST_USE_RESULT static inline MaybeObject* AsObject(HashTableKey* key) {
return key->AsObject();
}
@@ -2441,7 +2718,6 @@ class MapCache: public HashTable<MapCacheShape, HashTableKey*> {
template <typename Shape, typename Key>
class Dictionary: public HashTable<Shape, Key> {
public:
-
static inline Dictionary<Shape, Key>* cast(Object* obj) {
return reinterpret_cast<Dictionary<Shape, Key>*>(obj);
}
@@ -2452,13 +2728,18 @@ class Dictionary: public HashTable<Shape, Key> {
}
// Set the value for entry.
- void ValueAtPut(int entry, Object* value) {
+ // Returns false if the put wasn't performed due to property being read only.
+ // Returns true on successful put.
+ bool ValueAtPut(int entry, Object* value) {
// Check that this value can actually be written.
PropertyDetails details = DetailsAt(entry);
// If a value has not been initilized we allow writing to it even if
// it is read only (a declared const that has not been initialized).
- if (details.IsReadOnly() && !ValueAt(entry)->IsTheHole()) return;
- this->set(HashTable<Shape, Key>::EntryToIndex(entry)+1, value);
+ if (details.IsReadOnly() && !ValueAt(entry)->IsTheHole()) {
+ return false;
+ }
+ this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 1, value);
+ return true;
}
// Returns the property details for the property at entry.
@@ -2479,6 +2760,9 @@ class Dictionary: public HashTable<Shape, Key> {
// Delete a property from the dictionary.
Object* DeleteProperty(int entry, JSObject::DeleteMode mode);
+ // Attempt to shrink the dictionary after deletion of key.
+ MUST_USE_RESULT MaybeObject* Shrink(Key key);
+
// Returns the number of elements in the dictionary filtering out properties
// with the specified attributes.
int NumberOfElementsFilterAttributes(PropertyAttributes filter);
@@ -2518,6 +2802,9 @@ class Dictionary: public HashTable<Shape, Key> {
// Sets the entry to (key, value) pair.
inline void SetEntry(int entry,
Object* key,
+ Object* value);
+ inline void SetEntry(int entry,
+ Object* key,
Object* value,
PropertyDetails details);
@@ -2687,11 +2974,6 @@ class NormalizedMapCache: public FixedArray {
#ifdef DEBUG
void NormalizedMapCacheVerify();
#endif
-
- private:
- static int Hash(Map* fast);
-
- static bool CheckHit(Map* slow, Map* fast, PropertyNormalizationMode mode);
};
@@ -2764,59 +3046,6 @@ class ByteArray: public HeapObject {
};
-// A PixelArray represents a fixed-size byte array with special semantics
-// used for implementing the CanvasPixelArray object. Please see the
-// specification at:
-// http://www.whatwg.org/specs/web-apps/current-work/
-// multipage/the-canvas-element.html#canvaspixelarray
-// In particular, write access clamps the value written to 0 or 255 if the
-// value written is outside this range.
-class PixelArray: public HeapObject {
- public:
- // [length]: length of the array.
- inline int length();
- inline void set_length(int value);
-
- // [external_pointer]: The pointer to the external memory area backing this
- // pixel array.
- DECL_ACCESSORS(external_pointer, uint8_t) // Pointer to the data store.
-
- // Setter and getter.
- inline uint8_t get(int index);
- inline void set(int index, uint8_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber and
- // undefined and clamps the converted value between 0 and 255.
- Object* SetValue(uint32_t index, Object* value);
-
- // Casting.
- static inline PixelArray* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void PixelArrayPrint() {
- PixelArrayPrint(stdout);
- }
- void PixelArrayPrint(FILE* out);
-#endif
-#ifdef DEBUG
- void PixelArrayVerify();
-#endif // DEBUG
-
- // Maximal acceptable length for a pixel array.
- static const int kMaxLength = 0x3fffffff;
-
- // PixelArray headers are not quadword aligned.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kExternalPointerOffset =
- POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
- static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PixelArray);
-};
-
-
// An ExternalArray represents a fixed-size array of primitive values
// which live outside the JavaScript heap. Its subclasses are used to
// implement the CanvasArray types being defined in the WebGL
@@ -2856,6 +3085,44 @@ class ExternalArray: public HeapObject {
};
+// A ExternalPixelArray represents a fixed-size byte array with special
+// semantics used for implementing the CanvasPixelArray object. Please see the
+// specification at:
+
+// http://www.whatwg.org/specs/web-apps/current-work/
+// multipage/the-canvas-element.html#canvaspixelarray
+// In particular, write access clamps the value written to 0 or 255 if the
+// value written is outside this range.
+class ExternalPixelArray: public ExternalArray {
+ public:
+ inline uint8_t* external_pixel_pointer();
+
+ // Setter and getter.
+ inline uint8_t get(int index);
+ inline void set(int index, uint8_t value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber and
+ // undefined and clamps the converted value between 0 and 255.
+ Object* SetValue(uint32_t index, Object* value);
+
+ // Casting.
+ static inline ExternalPixelArray* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+ inline void ExternalPixelArrayPrint() {
+ ExternalPixelArrayPrint(stdout);
+ }
+ void ExternalPixelArrayPrint(FILE* out);
+#endif
+#ifdef DEBUG
+ void ExternalPixelArrayVerify();
+#endif // DEBUG
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalPixelArray);
+};
+
+
class ExternalByteArray: public ExternalArray {
public:
// Setter and getter.
@@ -3052,6 +3319,34 @@ class ExternalFloatArray: public ExternalArray {
};
+class ExternalDoubleArray: public ExternalArray {
+ public:
+ // Setter and getter.
+ inline double get(int index);
+ inline void set(int index, double value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber
+ // and undefined.
+ MaybeObject* SetValue(uint32_t index, Object* value);
+
+ // Casting.
+ static inline ExternalDoubleArray* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+ inline void ExternalDoubleArrayPrint() {
+ ExternalDoubleArrayPrint(stdout);
+ }
+ void ExternalDoubleArrayPrint(FILE* out);
+#endif // OBJECT_PRINT
+#ifdef DEBUG
+ void ExternalDoubleArrayVerify();
+#endif // DEBUG
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalDoubleArray);
+};
+
+
// DeoptimizationInputData is a fixed array used to hold the deoptimization
// data for code generated by the Hydrogen/Lithium compiler. It also
// contains information about functions that were inlined. If N different
@@ -3123,7 +3418,7 @@ class DeoptimizationInputData: public FixedArray {
// Casting.
static inline DeoptimizationInputData* cast(Object* obj);
-#ifdef OBJECT_PRINT
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
void DeoptimizationInputDataPrint(FILE* out);
#endif
@@ -3162,7 +3457,7 @@ class DeoptimizationOutputData: public FixedArray {
// Casting.
static inline DeoptimizationOutputData* cast(Object* obj);
-#ifdef OBJECT_PRINT
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
void DeoptimizationOutputDataPrint(FILE* out);
#endif
};
@@ -3194,8 +3489,8 @@ class Code: public HeapObject {
KEYED_CALL_IC,
STORE_IC,
KEYED_STORE_IC,
+ UNARY_OP_IC,
BINARY_OP_IC,
- TYPE_RECORDING_BINARY_OP_IC,
COMPARE_IC,
// No more than 16 kinds. The value currently encoded in four bits in
// Flags.
@@ -3237,6 +3532,12 @@ class Code: public HeapObject {
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
+ // [code_flushing_candidate]: Field only used during garbage
+ // collection to hold code flushing candidates. The contents of this
+ // field does not have to be traced during garbage collection since
+ // it is only used by the garbage collector itself.
+ DECL_ACCESSORS(next_code_flushing_candidate, Object)
+
// Unchecked accessors to be used during GC.
inline ByteArray* unchecked_relocation_info();
inline FixedArray* unchecked_deoptimization_data();
@@ -3263,9 +3564,11 @@ class Code: public HeapObject {
inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
inline bool is_call_stub() { return kind() == CALL_IC; }
inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
- inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
- inline bool is_type_recording_binary_op_stub() {
- return kind() == TYPE_RECORDING_BINARY_OP_IC;
+ inline bool is_unary_op_stub() {
+ return kind() == UNARY_OP_IC;
+ }
+ inline bool is_binary_op_stub() {
+ return kind() == BINARY_OP_IC;
}
inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
@@ -3309,15 +3612,15 @@ class Code: public HeapObject {
inline CheckType check_type();
inline void set_check_type(CheckType value);
- // [binary op type]: For all BINARY_OP_IC.
- inline byte binary_op_type();
- inline void set_binary_op_type(byte value);
+ // [type-recording unary op type]: For all UNARY_OP_IC.
+ inline byte unary_op_type();
+ inline void set_unary_op_type(byte value);
// [type-recording binary op type]: For all TYPE_RECORDING_BINARY_OP_IC.
- inline byte type_recording_binary_op_type();
- inline void set_type_recording_binary_op_type(byte value);
- inline byte type_recording_binary_op_result_type();
- inline void set_type_recording_binary_op_result_type(byte value);
+ inline byte binary_op_type();
+ inline void set_binary_op_type(byte value);
+ inline byte binary_op_result_type();
+ inline void set_binary_op_result_type(byte value);
// [compare state]: For kind compare IC stubs, tells what state the
// stub is in.
@@ -3419,7 +3722,7 @@ class Code: public HeapObject {
inline void CodeIterateBody(ObjectVisitor* v);
template<typename StaticVisitor>
- inline void CodeIterateBody();
+ inline void CodeIterateBody(Heap* heap);
#ifdef OBJECT_PRINT
inline void CodePrint() {
CodePrint(stdout);
@@ -3430,6 +3733,10 @@ class Code: public HeapObject {
void CodeVerify();
#endif
+ // Returns the isolate/heap this code object belongs to.
+ inline Isolate* isolate();
+ inline Heap* heap();
+
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
@@ -3439,9 +3746,12 @@ class Code: public HeapObject {
static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
static const int kDeoptimizationDataOffset =
kRelocationInfoOffset + kPointerSize;
- static const int kFlagsOffset = kDeoptimizationDataOffset + kPointerSize;
- static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
+ static const int kNextCodeFlushingCandidateOffset =
+ kDeoptimizationDataOffset + kPointerSize;
+ static const int kFlagsOffset =
+ kNextCodeFlushingCandidateOffset + kPointerSize;
+ static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
static const int kKindSpecificFlagsSize = 2 * kIntSize;
static const int kHeaderPaddingStart = kKindSpecificFlagsOffset +
@@ -3459,6 +3769,7 @@ class Code: public HeapObject {
static const int kCheckTypeOffset = kKindSpecificFlagsOffset;
static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
+ static const int kUnaryOpTypeOffset = kStubMajorKeyOffset + 1;
static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
static const int kHasDeoptimizationSupportOffset = kOptimizableOffset + 1;
@@ -3473,18 +3784,18 @@ class Code: public HeapObject {
static const int kFlagsICStateShift = 0;
static const int kFlagsICInLoopShift = 3;
static const int kFlagsTypeShift = 4;
- static const int kFlagsKindShift = 7;
- static const int kFlagsICHolderShift = 11;
- static const int kFlagsExtraICStateShift = 12;
- static const int kFlagsArgumentsCountShift = 14;
+ static const int kFlagsKindShift = 8;
+ static const int kFlagsICHolderShift = 12;
+ static const int kFlagsExtraICStateShift = 13;
+ static const int kFlagsArgumentsCountShift = 15;
static const int kFlagsICStateMask = 0x00000007; // 00000000111
static const int kFlagsICInLoopMask = 0x00000008; // 00000001000
- static const int kFlagsTypeMask = 0x00000070; // 00001110000
- static const int kFlagsKindMask = 0x00000780; // 11110000000
- static const int kFlagsCacheInPrototypeMapMask = 0x00000800;
- static const int kFlagsExtraICStateMask = 0x00003000;
- static const int kFlagsArgumentsCountMask = 0xFFFFC000;
+ static const int kFlagsTypeMask = 0x000000F0; // 00001110000
+ static const int kFlagsKindMask = 0x00000F00; // 11110000000
+ static const int kFlagsCacheInPrototypeMapMask = 0x00001000;
+ static const int kFlagsExtraICStateMask = 0x00006000;
+ static const int kFlagsArgumentsCountMask = 0xFFFF8000;
static const int kFlagsNotUsedInLookup =
(kFlagsICInLoopMask | kFlagsTypeMask | kFlagsCacheInPrototypeMapMask);
@@ -3531,6 +3842,13 @@ class Map: public HeapObject {
inline byte bit_field2();
inline void set_bit_field2(byte value);
+ // Bit field 3.
+ // TODO(1399): It should be possible to make room for bit_field3 in the map
+ // without overloading the instance descriptors field (and storing it in the
+ // DescriptorArray when the map has one).
+ inline int bit_field3();
+ inline void set_bit_field3(int value);
+
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
// property is set to a value that is not a JSObject, the prototype
@@ -3599,31 +3917,33 @@ class Map: public HeapObject {
inline void set_is_extensible(bool value);
inline bool is_extensible();
- // Tells whether the instance has fast elements.
- // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS.
- inline void set_has_fast_elements(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kHasFastElements));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kHasFastElements));
- }
+ inline void set_elements_kind(JSObject::ElementsKind elements_kind) {
+ ASSERT(elements_kind < JSObject::kElementsKindCount);
+ ASSERT(JSObject::kElementsKindCount <= (1 << kElementsKindBitCount));
+ set_bit_field2((bit_field2() & ~kElementsKindMask) |
+ (elements_kind << kElementsKindShift));
+ ASSERT(this->elements_kind() == elements_kind);
+ }
+
+ inline JSObject::ElementsKind elements_kind() {
+ return static_cast<JSObject::ElementsKind>(
+ (bit_field2() & kElementsKindMask) >> kElementsKindShift);
}
+ // Tells whether the instance has fast elements.
+ // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS.
inline bool has_fast_elements() {
- return ((1 << kHasFastElements) & bit_field2()) != 0;
+ return elements_kind() == JSObject::FAST_ELEMENTS;
}
- // Tells whether an instance has pixel array elements.
- inline void set_has_pixel_array_elements(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kHasPixelArrayElements));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kHasPixelArrayElements));
- }
+ inline bool has_fast_double_elements() {
+ return elements_kind() == JSObject::FAST_DOUBLE_ELEMENTS;
}
- inline bool has_pixel_array_elements() {
- return ((1 << kHasPixelArrayElements) & bit_field2()) != 0;
+ inline bool has_external_array_elements() {
+ JSObject::ElementsKind kind(elements_kind());
+ return kind >= JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
+ kind <= JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
}
// Tells whether the map is attached to SharedFunctionInfo
@@ -3652,12 +3972,50 @@ class Map: public HeapObject {
inline JSFunction* unchecked_constructor();
+ // Should only be called by the code that initializes map to set initial valid
+ // value of the instance descriptor member.
+ inline void init_instance_descriptors();
+
// [instance descriptors]: describes the object.
DECL_ACCESSORS(instance_descriptors, DescriptorArray)
+ // Sets the instance descriptor array for the map to be an empty descriptor
+ // array.
+ inline void clear_instance_descriptors();
+
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
+ // [prototype transitions]: cache of prototype transitions.
+ // Prototype transition is a transition that happens
+ // when we change object's prototype to a new one.
+ // Cache format:
+ // 0: finger - index of the first free cell in the cache
+ // 1 + 2 * i: prototype
+ // 2 + 2 * i: target map
+ DECL_ACCESSORS(prototype_transitions, FixedArray)
+ inline FixedArray* unchecked_prototype_transitions();
+
+ static const int kProtoTransitionHeaderSize = 1;
+ static const int kProtoTransitionNumberOfEntriesOffset = 0;
+ static const int kProtoTransitionElementsPerEntry = 2;
+ static const int kProtoTransitionPrototypeOffset = 0;
+ static const int kProtoTransitionMapOffset = 1;
+
+ inline int NumberOfProtoTransitions() {
+ FixedArray* cache = unchecked_prototype_transitions();
+ if (cache->length() == 0) return 0;
+ return
+ Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
+ }
+
+ inline void SetNumberOfProtoTransitions(int value) {
+ FixedArray* cache = unchecked_prototype_transitions();
+ ASSERT(cache->length() != 0);
+ cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
+ Smi::FromInt(value));
+ }
+
// Lookup in the map's instance descriptors and fill out the result
// with the given holder if the name is found. The holder may be
// NULL when this function is used from the compiler.
@@ -3674,20 +4032,26 @@ class Map: public HeapObject {
// instance descriptors.
MUST_USE_RESULT MaybeObject* CopyDropTransitions();
- // Returns this map if it has the fast elements bit set, otherwise
+ // Returns this map if it already has elements that are fast, otherwise
// returns a copy of the map, with all transitions dropped from the
- // descriptors and the fast elements bit set.
+ // descriptors and the ElementsKind set to FAST_ELEMENTS.
MUST_USE_RESULT inline MaybeObject* GetFastElementsMap();
- // Returns this map if it has the fast elements bit cleared,
- // otherwise returns a copy of the map, with all transitions dropped
- // from the descriptors and the fast elements bit cleared.
+ // Returns this map if it already has fast elements that are doubles,
+ // otherwise returns a copy of the map, with all transitions dropped from the
+ // descriptors and the ElementsKind set to FAST_DOUBLE_ELEMENTS.
+ MUST_USE_RESULT inline MaybeObject* GetFastDoubleElementsMap();
+
+ // Returns this map if already has dictionary elements, otherwise returns a
+ // copy of the map, with all transitions dropped from the descriptors and the
+ // ElementsKind set to DICTIONARY_ELEMENTS.
MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap();
- // Returns this map if it has the pixel array elements bit is set, otherwise
- // returns a copy of the map, with all transitions dropped from the
- // descriptors and the pixel array elements bit set.
- MUST_USE_RESULT inline MaybeObject* GetPixelArrayElementsMap();
+ // Returns a new map with all transitions dropped from the descriptors and the
+ // ElementsKind set to one of the value corresponding to array_type.
+ MUST_USE_RESULT MaybeObject* GetExternalArrayElementsMap(
+ ExternalArrayType array_type,
+ bool safe_to_add_transition);
// Returns the property index for name (only valid for FAST MODE).
int PropertyIndexFor(String* name);
@@ -3707,7 +4071,7 @@ class Map: public HeapObject {
// Code cache operations.
// Clears the code cache.
- inline void ClearCodeCache();
+ inline void ClearCodeCache(Heap* heap);
// Update code cache.
MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code);
@@ -3731,7 +4095,22 @@ class Map: public HeapObject {
// Also, restore the original prototype on the targets of these
// transitions, so that we do not process this map again while
// following back pointers.
- void ClearNonLiveTransitions(Object* real_prototype);
+ void ClearNonLiveTransitions(Heap* heap, Object* real_prototype);
+
+ // Computes a hash value for this map, to be used in HashTables and such.
+ int Hash();
+
+ // Compares this map to another to see if they describe equivalent objects.
+ // If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
+ // it had exactly zero inobject properties.
+ // The "shared" flags of both this map and |other| are ignored.
+ bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
+
+ // Returns true if this map and |other| describe equivalent objects.
+ // The "shared" flags of both this map and |other| are ignored.
+ bool EquivalentTo(Map* other) {
+ return EquivalentToForNormalization(other, KEEP_INOBJECT_PROPERTIES);
+ }
// Dispatched behavior.
#ifdef OBJECT_PRINT
@@ -3748,10 +4127,20 @@ class Map: public HeapObject {
inline int visitor_id();
inline void set_visitor_id(int visitor_id);
+ // Returns the isolate/heap this map belongs to.
+ inline Isolate* isolate();
+ inline Heap* heap();
+
typedef void (*TraverseCallback)(Map* map, void* data);
void TraverseTransitionTree(TraverseCallback callback, void* data);
+ static const int kMaxCachedPrototypeTransitions = 256;
+
+ Object* GetPrototypeTransition(Object* prototype);
+
+ MaybeObject* PutPrototypeTransition(Object* prototype, Map* map);
+
static const int kMaxPreAllocatedPropertyFields = 255;
// Layout description.
@@ -3759,17 +4148,29 @@ class Map: public HeapObject {
static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
static const int kPrototypeOffset = kInstanceAttributesOffset + kIntSize;
static const int kConstructorOffset = kPrototypeOffset + kPointerSize;
- static const int kInstanceDescriptorsOffset =
+ // Storage for instance descriptors is overloaded to also contain additional
+ // map flags when unused (bit_field3). When the map has instance descriptors,
+ // the flags are transferred to the instance descriptor array and accessed
+ // through an extra indirection.
+ // TODO(1399): It should be possible to make room for bit_field3 in the map
+ // without overloading the instance descriptors field, but the map is
+ // currently perfectly aligned to 32 bytes and extending it at all would
+ // double its size. After the increment GC work lands, this size restriction
+ // could be loosened and bit_field3 moved directly back in the map.
+ static const int kInstanceDescriptorsOrBitField3Offset =
kConstructorOffset + kPointerSize;
- static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
- static const int kPadStart = kCodeCacheOffset + kPointerSize;
+ static const int kCodeCacheOffset =
+ kInstanceDescriptorsOrBitField3Offset + kPointerSize;
+ static const int kPrototypeTransitionsOffset =
+ kCodeCacheOffset + kPointerSize;
+ static const int kPadStart = kPrototypeTransitionsOffset + kPointerSize;
static const int kSize = MAP_POINTER_ALIGN(kPadStart);
// Layout of pointer fields. Heap iteration code relies on them
// being continiously allocated.
static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
static const int kPointerFieldsEndOffset =
- Map::kCodeCacheOffset + kPointerSize;
+ Map::kPrototypeTransitionsOffset + kPointerSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -3803,11 +4204,21 @@ class Map: public HeapObject {
// Bit positions for bit field 2
static const int kIsExtensible = 0;
static const int kFunctionWithPrototype = 1;
- static const int kHasFastElements = 2;
- static const int kStringWrapperSafeForDefaultValueOf = 3;
- static const int kAttachedToSharedFunctionInfo = 4;
- static const int kIsShared = 5;
- static const int kHasPixelArrayElements = 6;
+ static const int kStringWrapperSafeForDefaultValueOf = 2;
+ static const int kAttachedToSharedFunctionInfo = 3;
+ // No bits can be used after kElementsKindFirstBit, they are all reserved for
+ // storing ElementKind. for anything other than storing the ElementKind.
+ static const int kElementsKindShift = 4;
+ static const int kElementsKindBitCount = 4;
+
+ // Derived values from bit field 2
+ static const int kElementsKindMask = (-1 << kElementsKindShift) &
+ ((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
+ static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
+ (JSObject::FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
+
+ // Bit positions for bit field 3
+ static const int kIsShared = 0;
// Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2;
@@ -3824,7 +4235,7 @@ class Map: public HeapObject {
// An abstract superclass, a marker class really, for simple structure classes.
-// It doesn't carry much functionality but allows struct classes to me
+// It doesn't carry much functionality but allows struct classes to be
// identified in the type system.
class Struct: public HeapObject {
public:
@@ -3872,7 +4283,7 @@ class Script: public Struct {
DECL_ACCESSORS(context_data, Object)
// [wrapper]: the wrapper cache.
- DECL_ACCESSORS(wrapper, Proxy)
+ DECL_ACCESSORS(wrapper, Foreign)
// [type]: the script type.
DECL_ACCESSORS(type, Smi)
@@ -4102,9 +4513,7 @@ class SharedFunctionInfo: public HeapObject {
// False if there are definitely no live objects created from this function.
// True if live objects _may_ exist (existence not guaranteed).
// May go back from true to false after GC.
- inline bool live_objects_may_exist();
-
- inline void set_live_objects_may_exist(bool value);
+ DECL_BOOLEAN_ACCESSORS(live_objects_may_exist)
// [instance class name]: class name for instances.
DECL_ACCESSORS(instance_class_name, Object)
@@ -4192,14 +4601,10 @@ class SharedFunctionInfo: public HeapObject {
// this.x = y; where y is either a constant or refers to an argument.
inline bool has_only_simple_this_property_assignments();
- inline bool try_full_codegen();
- inline void set_try_full_codegen(bool flag);
-
// Indicates if this function can be lazy compiled.
// This is used to determine if we can safely flush code from a function
// when doing GC if we expect that the function will no longer be used.
- inline bool allows_lazy_compilation();
- inline void set_allows_lazy_compilation(bool flag);
+ DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
// Indicates how many full GCs this function has survived with assigned
// code object. Used to determine when it is relatively safe to flush
@@ -4213,12 +4618,28 @@ class SharedFunctionInfo: public HeapObject {
// shared function info. If a function is repeatedly optimized or if
// we cannot optimize the function we disable optimization to avoid
// spending time attempting to optimize it again.
- inline bool optimization_disabled();
- inline void set_optimization_disabled(bool value);
+ DECL_BOOLEAN_ACCESSORS(optimization_disabled)
// Indicates whether the function is a strict mode function.
- inline bool strict_mode();
- inline void set_strict_mode(bool value);
+ DECL_BOOLEAN_ACCESSORS(strict_mode)
+
+ // False if the function definitely does not allocate an arguments object.
+ DECL_BOOLEAN_ACCESSORS(uses_arguments)
+
+ // True if the function has any duplicated parameter names.
+ DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
+
+ // Indicates whether the function is a native function.
+ // These needs special threatment in .call and .apply since
+ // null passed as the receiver should not be translated to the
+ // global object.
+ inline bool native();
+ inline void set_native(bool value);
+
+ // Indicates whether the function is a bound function created using
+ // the bind function.
+ inline bool bound();
+ inline void set_bound(bool value);
// Indicates whether or not the code in the shared function support
// deoptimization.
@@ -4227,6 +4648,11 @@ class SharedFunctionInfo: public HeapObject {
// Enable deoptimization support through recompiled code.
void EnableDeoptimizationSupport(Code* recompiled);
+ // Disable (further) attempted optimization of all functions sharing this
+ // shared function info. The function is the one we actually tried to
+ // optimize.
+ void DisableOptimization(JSFunction* function);
+
// Lookup the bailout ID and ASSERT that it exists in the non-optimized
// code, returns whether it asserted (i.e., always true if assertions are
// disabled).
@@ -4393,14 +4819,21 @@ class SharedFunctionInfo: public HeapObject {
static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
// Bit positions in compiler_hints.
- static const int kHasOnlySimpleThisPropertyAssignments = 0;
- static const int kTryFullCodegen = 1;
- static const int kAllowLazyCompilation = 2;
- static const int kLiveObjectsMayExist = 3;
- static const int kCodeAgeShift = 4;
- static const int kCodeAgeMask = 0x7;
- static const int kOptimizationDisabled = 7;
- static const int kStrictModeFunction = 8;
+ static const int kCodeAgeSize = 3;
+ static const int kCodeAgeMask = (1 << kCodeAgeSize) - 1;
+ static const int kBoundFunction = 9;
+
+ enum CompilerHints {
+ kHasOnlySimpleThisPropertyAssignments,
+ kAllowLazyCompilation,
+ kLiveObjectsMayExist,
+ kCodeAgeShift,
+ kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
+ kStrictModeFunction,
+ kUsesArguments,
+ kHasDuplicateParameters,
+ kNative
+ };
private:
#if V8_HOST_ARCH_32_BIT
@@ -4414,18 +4847,27 @@ class SharedFunctionInfo: public HeapObject {
#endif
public:
- // Constants for optimizing codegen for strict mode function tests.
+ // Constants for optimizing codegen for strict mode function and
+ // native tests.
// Allows to use byte-widgh instructions.
static const int kStrictModeBitWithinByte =
(kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
+ static const int kNativeBitWithinByte =
+ (kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
+
#if __BYTE_ORDER == __LITTLE_ENDIAN
static const int kStrictModeByteOffset = kCompilerHintsOffset +
- (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+ (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+ static const int kNativeByteOffset = kCompilerHintsOffset +
+ (kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
#elif __BYTE_ORDER == __BIG_ENDIAN
static const int kStrictModeByteOffset = kCompilerHintsOffset +
- (kCompilerHintsSize - 1) -
- ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+ (kCompilerHintsSize - 1) -
+ ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+ static const int kNativeByteOffset = kCompilerHintsOffset +
+ (kCompilerHintsSize - 1) -
+ ((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
#else
#error Unknown byte ordering
#endif
@@ -4471,6 +4913,9 @@ class JSFunction: public JSObject {
// Tells whether or not this function has been optimized.
inline bool IsOptimized();
+ // Tells whether or not this function can be optimized.
+ inline bool IsOptimizable();
+
// Mark this function for lazy recompilation. The function will be
// recompiled the next time it is executed.
void MarkForLazyRecompilation();
@@ -4581,6 +5026,7 @@ class JSFunction: public JSObject {
// Layout of the literals array.
static const int kLiteralsPrefixSize = 1;
static const int kLiteralGlobalContextIndex = 0;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
};
@@ -4596,7 +5042,7 @@ class JSFunction: public JSObject {
class JSGlobalProxy : public JSObject {
public:
- // [context]: the owner global context of this proxy object.
+ // [context]: the owner global context of this global proxy object.
// It is null value if this object is not used by any context.
DECL_ACCESSORS(context, Object)
@@ -4619,7 +5065,6 @@ class JSGlobalProxy : public JSObject {
static const int kSize = kContextOffset + kPointerSize;
private:
-
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
};
@@ -4675,7 +5120,6 @@ class GlobalObject: public JSObject {
// JavaScript global object.
class JSGlobalObject: public GlobalObject {
public:
-
// Casting.
static inline JSGlobalObject* cast(Object* obj);
@@ -5089,13 +5533,56 @@ class CodeCacheHashTable: public HashTable<CodeCacheHashTableShape,
};
+class PolymorphicCodeCache: public Struct {
+ public:
+ DECL_ACCESSORS(cache, Object)
+
+ MUST_USE_RESULT MaybeObject* Update(MapList* maps,
+ Code::Flags flags,
+ Code* code);
+ Object* Lookup(MapList* maps, Code::Flags flags);
+
+ static inline PolymorphicCodeCache* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+ inline void PolymorphicCodeCachePrint() {
+ PolymorphicCodeCachePrint(stdout);
+ }
+ void PolymorphicCodeCachePrint(FILE* out);
+#endif
+#ifdef DEBUG
+ void PolymorphicCodeCacheVerify();
+#endif
+
+ static const int kCacheOffset = HeapObject::kHeaderSize;
+ static const int kSize = kCacheOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PolymorphicCodeCache);
+};
+
+
+class PolymorphicCodeCacheHashTable
+ : public HashTable<CodeCacheHashTableShape, HashTableKey*> {
+ public:
+ Object* Lookup(MapList* maps, int code_kind);
+ MUST_USE_RESULT MaybeObject* Put(MapList* maps, int code_kind, Code* code);
+
+ static inline PolymorphicCodeCacheHashTable* cast(Object* obj);
+
+ static const int kInitialSize = 64;
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PolymorphicCodeCacheHashTable);
+};
+
+
enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
class StringHasher {
public:
- inline StringHasher(int length);
+ explicit inline StringHasher(int length);
// Returns true if the hash of this string can be computed without
// looking at the contents.
@@ -5127,7 +5614,6 @@ class StringHasher {
static uint32_t MakeArrayIndexHash(uint32_t value, int length);
private:
-
uint32_t array_index() {
ASSERT(is_array_index());
return array_index_;
@@ -5145,6 +5631,11 @@ class StringHasher {
};
+// Calculates string hash.
+template <typename schar>
+inline uint32_t HashSequentialString(const schar* chars, int length);
+
+
// The characteristics of a string are stored in its map. Retrieving these
// few bits of information is moderately expensive, involving two memory
// loads where the second is dependent on the first. To improve efficiency
@@ -5179,6 +5670,7 @@ class StringShape BASE_EMBEDDED {
#else
inline void invalidate() { }
#endif
+
private:
uint32_t type_;
#ifdef DEBUG
@@ -5516,7 +6008,6 @@ class String: public HeapObject {
// The SeqString abstract class captures sequential string values.
class SeqString: public String {
public:
-
// Casting.
static inline SeqString* cast(Object* obj);
@@ -5788,23 +6279,20 @@ class ExternalTwoByteString: public ExternalString {
// iterating or updating after gc.
class Relocatable BASE_EMBEDDED {
public:
- inline Relocatable() : prev_(top_) { top_ = this; }
- virtual ~Relocatable() {
- ASSERT_EQ(top_, this);
- top_ = prev_;
- }
+ explicit inline Relocatable(Isolate* isolate);
+ inline virtual ~Relocatable();
virtual void IterateInstance(ObjectVisitor* v) { }
virtual void PostGarbageCollection() { }
static void PostGarbageCollectionProcessing();
static int ArchiveSpacePerThread();
- static char* ArchiveState(char* to);
- static char* RestoreState(char* from);
+ static char* ArchiveState(Isolate* isolate, char* to);
+ static char* RestoreState(Isolate* isolate, char* from);
static void Iterate(ObjectVisitor* v);
static void Iterate(ObjectVisitor* v, Relocatable* top);
static char* Iterate(ObjectVisitor* v, char* t);
private:
- static Relocatable* top_;
+ Isolate* isolate_;
Relocatable* prev_;
};
@@ -5814,8 +6302,8 @@ class Relocatable BASE_EMBEDDED {
// must be valid as long as the reader is being used.
class FlatStringReader : public Relocatable {
public:
- explicit FlatStringReader(Handle<String> str);
- explicit FlatStringReader(Vector<const char> input);
+ FlatStringReader(Isolate* isolate, Handle<String> str);
+ FlatStringReader(Isolate* isolate, Vector<const char> input);
void PostGarbageCollection();
inline uc32 Get(int index);
int length() { return length_; }
@@ -5840,7 +6328,7 @@ class StringInputBuffer: public unibrow::InputBuffer<String, String*, 1024> {
public:
virtual void Seek(unsigned pos);
inline StringInputBuffer(): unibrow::InputBuffer<String, String*, 1024>() {}
- inline StringInputBuffer(String* backing):
+ explicit inline StringInputBuffer(String* backing):
unibrow::InputBuffer<String, String*, 1024>(backing) {}
};
@@ -5851,7 +6339,7 @@ class SafeStringInputBuffer
virtual void Seek(unsigned pos);
inline SafeStringInputBuffer()
: unibrow::InputBuffer<String, String**, 256>() {}
- inline SafeStringInputBuffer(String** backing)
+ explicit inline SafeStringInputBuffer(String** backing)
: unibrow::InputBuffer<String, String**, 256>(backing) {}
};
@@ -5878,6 +6366,9 @@ class Oddball: public HeapObject {
// [to_number]: Cached to_number computed at startup.
DECL_ACCESSORS(to_number, Object)
+ inline byte kind();
+ inline void set_kind(byte kind);
+
// Casting.
static inline Oddball* cast(Object* obj);
@@ -5888,12 +6379,23 @@ class Oddball: public HeapObject {
// Initialize the fields.
MUST_USE_RESULT MaybeObject* Initialize(const char* to_string,
- Object* to_number);
+ Object* to_number,
+ byte kind);
// Layout description.
static const int kToStringOffset = HeapObject::kHeaderSize;
static const int kToNumberOffset = kToStringOffset + kPointerSize;
- static const int kSize = kToNumberOffset + kPointerSize;
+ static const int kKindOffset = kToNumberOffset + kPointerSize;
+ static const int kSize = kKindOffset + kPointerSize;
+
+ static const byte kFalse = 0;
+ static const byte kTrue = 1;
+ static const byte kNotBooleanMask = ~1;
+ static const byte kTheHole = 2;
+ static const byte kNull = 3;
+ static const byte kArgumentMarker = 4;
+ static const byte kUndefined = 5;
+ static const byte kOther = 6;
typedef FixedBodyDescriptor<kToStringOffset,
kToNumberOffset + kPointerSize,
@@ -5930,49 +6432,107 @@ class JSGlobalPropertyCell: public HeapObject {
kValueOffset + kPointerSize,
kSize> BodyDescriptor;
+ // Returns the isolate/heap this cell object belongs to.
+ inline Isolate* isolate();
+ inline Heap* heap();
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
};
+// The JSProxy describes EcmaScript Harmony proxies
+class JSProxy: public JSReceiver {
+ public:
+ // [handler]: The handler property.
+ DECL_ACCESSORS(handler, Object)
+
+ // Casting.
+ static inline JSProxy* cast(Object* obj);
+
+ MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
+ String* name_raw,
+ Object* value_raw,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
+
+ MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
+ JSReceiver* receiver,
+ String* name_raw,
+ bool* has_exception);
+
+ // Dispatched behavior.
+#ifdef OBJECT_PRINT
+ inline void JSProxyPrint() {
+ JSProxyPrint(stdout);
+ }
+ void JSProxyPrint(FILE* out);
+#endif
+#ifdef DEBUG
+ void JSProxyVerify();
+#endif
+
+ // Layout description.
+ static const int kHandlerOffset = HeapObject::kHeaderSize;
+ static const int kSize = kHandlerOffset + kPointerSize;
+
+ typedef FixedBodyDescriptor<kHandlerOffset,
+ kHandlerOffset + kPointerSize,
+ kSize> BodyDescriptor;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
+};
+
+
+// TODO(rossberg): Only a stub for now.
+class JSFunctionProxy: public JSProxy {
+ public:
+ // Casting.
+ static inline JSFunctionProxy* cast(Object* obj);
-// Proxy describes objects pointing from JavaScript to C structures.
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunctionProxy);
+};
+
+
+// Foreign describes objects pointing from JavaScript to C structures.
// Since they cannot contain references to JS HeapObjects they can be
// placed in old_data_space.
-class Proxy: public HeapObject {
+class Foreign: public HeapObject {
public:
- // [proxy]: field containing the address.
- inline Address proxy();
- inline void set_proxy(Address value);
+ // [address]: field containing the address.
+ inline Address address();
+ inline void set_address(Address value);
// Casting.
- static inline Proxy* cast(Object* obj);
+ static inline Foreign* cast(Object* obj);
// Dispatched behavior.
- inline void ProxyIterateBody(ObjectVisitor* v);
+ inline void ForeignIterateBody(ObjectVisitor* v);
template<typename StaticVisitor>
- inline void ProxyIterateBody();
+ inline void ForeignIterateBody();
#ifdef OBJECT_PRINT
- inline void ProxyPrint() {
- ProxyPrint(stdout);
+ inline void ForeignPrint() {
+ ForeignPrint(stdout);
}
- void ProxyPrint(FILE* out);
+ void ForeignPrint(FILE* out);
#endif
#ifdef DEBUG
- void ProxyVerify();
+ void ForeignVerify();
#endif
// Layout description.
- static const int kProxyOffset = HeapObject::kHeaderSize;
- static const int kSize = kProxyOffset + kPointerSize;
+ static const int kAddressOffset = HeapObject::kHeaderSize;
+ static const int kSize = kAddressOffset + kPointerSize;
- STATIC_CHECK(kProxyOffset == Internals::kProxyProxyOffset);
+ STATIC_CHECK(kAddressOffset == Internals::kForeignAddressOffset);
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
};
@@ -6235,6 +6795,7 @@ class FunctionTemplateInfo: public TemplateInfo {
DECL_ACCESSORS(instance_call_handler, Object)
DECL_ACCESSORS(access_check_info, Object)
DECL_ACCESSORS(flag, Smi)
+ DECL_ACCESSORS(prototype_attributes, Smi)
// Following properties use flag bits.
DECL_BOOLEAN_ACCESSORS(hidden_prototype)
@@ -6274,7 +6835,8 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kAccessCheckInfoOffset =
kInstanceCallHandlerOffset + kPointerSize;
static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize;
- static const int kSize = kFlagOffset + kPointerSize;
+ static const int kPrototypeAttributesOffset = kFlagOffset + kPointerSize;
+ static const int kSize = kPrototypeAttributesOffset + kPointerSize;
private:
// Bit position in the flag, from least significant bit position.
@@ -6526,6 +7088,9 @@ class ObjectVisitor BASE_EMBEDDED {
VisitExternalReferences(p, p + 1);
}
+ // Visits a handle that has an embedder-assigned class ID.
+ virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
+
#ifdef DEBUG
// Intended for serialization/deserialization checking: insert, or
// check for the presence of, a tag at this position in the stream.
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 6d462bc53..4e5ba1389 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,7 +28,7 @@
#include "v8.h"
#include "api.h"
-#include "ast.h"
+#include "ast-inl.h"
#include "bootstrapper.h"
#include "codegen.h"
#include "compiler.h"
@@ -41,9 +41,6 @@
#include "scopeinfo.h"
#include "string-stream.h"
-#include "ast-inl.h"
-#include "jump-target-inl.h"
-
namespace v8 {
namespace internal {
@@ -88,12 +85,13 @@ class PositionStack {
RegExpBuilder::RegExpBuilder()
- : pending_empty_(false),
- characters_(NULL),
- terms_(),
- alternatives_()
+ : zone_(Isolate::Current()->zone()),
+ pending_empty_(false),
+ characters_(NULL),
+ terms_(),
+ alternatives_()
#ifdef DEBUG
- , last_added_(ADD_NONE)
+ , last_added_(ADD_NONE)
#endif
{}
@@ -101,7 +99,7 @@ RegExpBuilder::RegExpBuilder()
void RegExpBuilder::FlushCharacters() {
pending_empty_ = false;
if (characters_ != NULL) {
- RegExpTree* atom = new RegExpAtom(characters_->ToConstVector());
+ RegExpTree* atom = new(zone()) RegExpAtom(characters_->ToConstVector());
characters_ = NULL;
text_.Add(atom);
LAST(ADD_ATOM);
@@ -117,7 +115,7 @@ void RegExpBuilder::FlushText() {
} else if (num_text == 1) {
terms_.Add(text_.last());
} else {
- RegExpText* text = new RegExpText();
+ RegExpText* text = new(zone()) RegExpText();
for (int i = 0; i < num_text; i++)
text_.Get(i)->AppendToText(text);
terms_.Add(text);
@@ -129,7 +127,7 @@ void RegExpBuilder::FlushText() {
void RegExpBuilder::AddCharacter(uc16 c) {
pending_empty_ = false;
if (characters_ == NULL) {
- characters_ = new ZoneList<uc16>(4);
+ characters_ = new(zone()) ZoneList<uc16>(4);
}
characters_->Add(c);
LAST(ADD_CHAR);
@@ -178,7 +176,7 @@ void RegExpBuilder::FlushTerms() {
} else if (num_terms == 1) {
alternative = terms_.last();
} else {
- alternative = new RegExpAlternative(terms_.GetList());
+ alternative = new(zone()) RegExpAlternative(terms_.GetList());
}
alternatives_.Add(alternative);
terms_.Clear();
@@ -195,7 +193,7 @@ RegExpTree* RegExpBuilder::ToRegExp() {
if (num_alternatives == 1) {
return alternatives_.last();
}
- return new RegExpDisjunction(alternatives_.GetList());
+ return new(zone()) RegExpDisjunction(alternatives_.GetList());
}
@@ -214,11 +212,11 @@ void RegExpBuilder::AddQuantifierToAtom(int min,
int num_chars = char_vector.length();
if (num_chars > 1) {
Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
- text_.Add(new RegExpAtom(prefix));
+ text_.Add(new(zone()) RegExpAtom(prefix));
char_vector = char_vector.SubVector(num_chars - 1, num_chars);
}
characters_ = NULL;
- atom = new RegExpAtom(char_vector);
+ atom = new(zone()) RegExpAtom(char_vector);
FlushText();
} else if (text_.length() > 0) {
ASSERT(last_added_ == ADD_ATOM);
@@ -241,98 +239,11 @@ void RegExpBuilder::AddQuantifierToAtom(int min,
UNREACHABLE();
return;
}
- terms_.Add(new RegExpQuantifier(min, max, type, atom));
+ terms_.Add(new(zone()) RegExpQuantifier(min, max, type, atom));
LAST(ADD_TERM);
}
-// A temporary scope stores information during parsing, just like
-// a plain scope. However, temporary scopes are not kept around
-// after parsing or referenced by syntax trees so they can be stack-
-// allocated and hence used by the pre-parser.
-class TemporaryScope BASE_EMBEDDED {
- public:
- explicit TemporaryScope(TemporaryScope** variable);
- ~TemporaryScope();
-
- int NextMaterializedLiteralIndex() {
- int next_index =
- materialized_literal_count_ + JSFunction::kLiteralsPrefixSize;
- materialized_literal_count_++;
- return next_index;
- }
- int materialized_literal_count() { return materialized_literal_count_; }
-
- void SetThisPropertyAssignmentInfo(
- bool only_simple_this_property_assignments,
- Handle<FixedArray> this_property_assignments) {
- only_simple_this_property_assignments_ =
- only_simple_this_property_assignments;
- this_property_assignments_ = this_property_assignments;
- }
- bool only_simple_this_property_assignments() {
- return only_simple_this_property_assignments_;
- }
- Handle<FixedArray> this_property_assignments() {
- return this_property_assignments_;
- }
-
- void AddProperty() { expected_property_count_++; }
- int expected_property_count() { return expected_property_count_; }
-
- void AddLoop() { loop_count_++; }
- bool ContainsLoops() const { return loop_count_ > 0; }
-
- bool StrictMode() { return strict_mode_; }
- void EnableStrictMode() {
- strict_mode_ = FLAG_strict_mode;
- }
-
- private:
- // Captures the number of literals that need materialization in the
- // function. Includes regexp literals, and boilerplate for object
- // and array literals.
- int materialized_literal_count_;
-
- // Properties count estimation.
- int expected_property_count_;
-
- // Keeps track of assignments to properties of this. Used for
- // optimizing constructors.
- bool only_simple_this_property_assignments_;
- Handle<FixedArray> this_property_assignments_;
-
- // Captures the number of loops inside the scope.
- int loop_count_;
-
- // Parsing strict mode code.
- bool strict_mode_;
-
- // Bookkeeping
- TemporaryScope** variable_;
- TemporaryScope* parent_;
-};
-
-
-TemporaryScope::TemporaryScope(TemporaryScope** variable)
- : materialized_literal_count_(0),
- expected_property_count_(0),
- only_simple_this_property_assignments_(false),
- this_property_assignments_(Factory::empty_fixed_array()),
- loop_count_(0),
- variable_(variable),
- parent_(*variable) {
- // Inherit the strict mode from the parent scope.
- strict_mode_ = (parent_ != NULL) && parent_->strict_mode_;
- *variable = this;
-}
-
-
-TemporaryScope::~TemporaryScope() {
- *variable_ = parent_;
-}
-
-
Handle<String> Parser::LookupSymbol(int symbol_id) {
// Length of symbol cache is the number of identified symbols.
// If we are larger than that, or negative, it's not a cached symbol.
@@ -341,9 +252,11 @@ Handle<String> Parser::LookupSymbol(int symbol_id) {
if (static_cast<unsigned>(symbol_id)
>= static_cast<unsigned>(symbol_cache_.length())) {
if (scanner().is_literal_ascii()) {
- return Factory::LookupAsciiSymbol(scanner().literal_ascii_string());
+ return isolate()->factory()->LookupAsciiSymbol(
+ scanner().literal_ascii_string());
} else {
- return Factory::LookupTwoByteSymbol(scanner().literal_uc16_string());
+ return isolate()->factory()->LookupTwoByteSymbol(
+ scanner().literal_uc16_string());
}
}
return LookupCachedSymbol(symbol_id);
@@ -360,14 +273,16 @@ Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
Handle<String> result = symbol_cache_.at(symbol_id);
if (result.is_null()) {
if (scanner().is_literal_ascii()) {
- result = Factory::LookupAsciiSymbol(scanner().literal_ascii_string());
+ result = isolate()->factory()->LookupAsciiSymbol(
+ scanner().literal_ascii_string());
} else {
- result = Factory::LookupTwoByteSymbol(scanner().literal_uc16_string());
+ result = isolate()->factory()->LookupTwoByteSymbol(
+ scanner().literal_uc16_string());
}
symbol_cache_.at(symbol_id) = result;
return result;
}
- Counters::total_preparse_symbols_skipped.Increment();
+ isolate()->counters()->total_preparse_symbols_skipped()->Increment();
return result;
}
@@ -491,7 +406,7 @@ unsigned* ScriptDataImpl::ReadAddress(int position) {
Scope* Parser::NewScope(Scope* parent, Scope::Type type, bool inside_with) {
- Scope* result = new Scope(parent, type);
+ Scope* result = new(zone()) Scope(parent, type);
result->Initialize(inside_with);
return result;
}
@@ -544,33 +459,90 @@ class TargetScope BASE_EMBEDDED {
// LexicalScope is a support class to facilitate manipulation of the
// Parser's scope stack. The constructor sets the parser's top scope
// to the incoming scope, and the destructor resets it.
+//
+// Additionally, it stores transient information used during parsing.
+// These scopes are not kept around after parsing or referenced by syntax
+// trees so they can be stack-allocated and hence used by the pre-parser.
class LexicalScope BASE_EMBEDDED {
public:
- LexicalScope(Scope** scope_variable,
- int* with_nesting_level_variable,
- Scope* scope)
- : scope_variable_(scope_variable),
- with_nesting_level_variable_(with_nesting_level_variable),
- prev_scope_(*scope_variable),
- prev_level_(*with_nesting_level_variable) {
- *scope_variable = scope;
- *with_nesting_level_variable = 0;
+ LexicalScope(Parser* parser, Scope* scope, Isolate* isolate);
+ ~LexicalScope();
+
+ int NextMaterializedLiteralIndex() {
+ int next_index =
+ materialized_literal_count_ + JSFunction::kLiteralsPrefixSize;
+ materialized_literal_count_++;
+ return next_index;
}
+ int materialized_literal_count() { return materialized_literal_count_; }
- ~LexicalScope() {
- (*scope_variable_)->Leave();
- *scope_variable_ = prev_scope_;
- *with_nesting_level_variable_ = prev_level_;
+ void SetThisPropertyAssignmentInfo(
+ bool only_simple_this_property_assignments,
+ Handle<FixedArray> this_property_assignments) {
+ only_simple_this_property_assignments_ =
+ only_simple_this_property_assignments;
+ this_property_assignments_ = this_property_assignments;
+ }
+ bool only_simple_this_property_assignments() {
+ return only_simple_this_property_assignments_;
+ }
+ Handle<FixedArray> this_property_assignments() {
+ return this_property_assignments_;
}
+ void AddProperty() { expected_property_count_++; }
+ int expected_property_count() { return expected_property_count_; }
+
private:
- Scope** scope_variable_;
- int* with_nesting_level_variable_;
- Scope* prev_scope_;
- int prev_level_;
+ // Captures the number of literals that need materialization in the
+ // function. Includes regexp literals, and boilerplate for object
+ // and array literals.
+ int materialized_literal_count_;
+
+ // Properties count estimation.
+ int expected_property_count_;
+
+ // Keeps track of assignments to properties of this. Used for
+ // optimizing constructors.
+ bool only_simple_this_property_assignments_;
+ Handle<FixedArray> this_property_assignments_;
+
+ // Bookkeeping
+ Parser* parser_;
+ // Previous values
+ LexicalScope* lexical_scope_parent_;
+ Scope* previous_scope_;
+ int previous_with_nesting_level_;
+ unsigned previous_ast_node_id_;
};
+
+LexicalScope::LexicalScope(Parser* parser, Scope* scope, Isolate* isolate)
+ : materialized_literal_count_(0),
+ expected_property_count_(0),
+ only_simple_this_property_assignments_(false),
+ this_property_assignments_(isolate->factory()->empty_fixed_array()),
+ parser_(parser),
+ lexical_scope_parent_(parser->lexical_scope_),
+ previous_scope_(parser->top_scope_),
+ previous_with_nesting_level_(parser->with_nesting_level_),
+ previous_ast_node_id_(isolate->ast_node_id()) {
+ parser->top_scope_ = scope;
+ parser->lexical_scope_ = this;
+ parser->with_nesting_level_ = 0;
+ isolate->set_ast_node_id(AstNode::kFunctionEntryId + 1);
+}
+
+
+LexicalScope::~LexicalScope() {
+ parser_->top_scope_ = previous_scope_;
+ parser_->lexical_scope_ = lexical_scope_parent_;
+ parser_->with_nesting_level_ = previous_with_nesting_level_;
+ parser_->isolate()->set_ast_node_id(previous_ast_node_id_);
+}
+
+
// ----------------------------------------------------------------------------
// The CHECK_OK macro is a convenient macro to enforce error
// handling for functions that may fail (by returning !*ok).
@@ -598,12 +570,13 @@ Parser::Parser(Handle<Script> script,
bool allow_natives_syntax,
v8::Extension* extension,
ScriptDataImpl* pre_data)
- : symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
+ : isolate_(script->GetIsolate()),
+ symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
script_(script),
- scanner_(),
+ scanner_(isolate_->unicode_cache()),
top_scope_(NULL),
with_nesting_level_(0),
- temp_scope_(NULL),
+ lexical_scope_(NULL),
target_stack_(NULL),
allow_natives_syntax_(allow_natives_syntax),
extension_(extension),
@@ -618,11 +591,11 @@ Parser::Parser(Handle<Script> script,
FunctionLiteral* Parser::ParseProgram(Handle<String> source,
bool in_global_context,
StrictModeFlag strict_mode) {
- CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
+ ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
- HistogramTimerScope timer(&Counters::parse);
- Counters::total_parse_size.Increment(source->length());
- fni_ = new FuncNameInferrer();
+ HistogramTimerScope timer(isolate()->counters()->parse());
+ isolate()->counters()->total_parse_size()->Increment(source->length());
+ fni_ = new(zone()) FuncNameInferrer(isolate());
// Initialize parser state.
source->TryFlatten();
@@ -657,40 +630,37 @@ FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
in_global_context
? Scope::GLOBAL_SCOPE
: Scope::EVAL_SCOPE;
- Handle<String> no_name = Factory::empty_symbol();
+ Handle<String> no_name = isolate()->factory()->empty_symbol();
FunctionLiteral* result = NULL;
{ Scope* scope = NewScope(top_scope_, type, inside_with());
- LexicalScope lexical_scope(&this->top_scope_, &this->with_nesting_level_,
- scope);
- TemporaryScope temp_scope(&this->temp_scope_);
+ LexicalScope lexical_scope(this, scope, isolate());
if (strict_mode == kStrictMode) {
- temp_scope.EnableStrictMode();
+ top_scope_->EnableStrictMode();
}
- ZoneList<Statement*>* body = new ZoneList<Statement*>(16);
+ ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
bool ok = true;
int beg_loc = scanner().location().beg_pos;
ParseSourceElements(body, Token::EOS, &ok);
- if (ok && temp_scope_->StrictMode()) {
+ if (ok && top_scope_->is_strict_mode()) {
CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
}
if (ok) {
- result = new FunctionLiteral(
+ result = new(zone()) FunctionLiteral(
no_name,
top_scope_,
body,
- temp_scope.materialized_literal_count(),
- temp_scope.expected_property_count(),
- temp_scope.only_simple_this_property_assignments(),
- temp_scope.this_property_assignments(),
+ lexical_scope.materialized_literal_count(),
+ lexical_scope.expected_property_count(),
+ lexical_scope.only_simple_this_property_assignments(),
+ lexical_scope.this_property_assignments(),
0,
0,
source->length(),
false,
- temp_scope.ContainsLoops(),
- temp_scope.StrictMode());
+ false);
} else if (stack_overflow_) {
- Top::StackOverflow();
+ isolate()->StackOverflow();
}
}
@@ -704,10 +674,10 @@ FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
}
FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
- CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
- HistogramTimerScope timer(&Counters::parse_lazy);
+ ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
+ HistogramTimerScope timer(isolate()->counters()->parse_lazy());
Handle<String> source(String::cast(script_->source()));
- Counters::total_parse_size.Increment(source->length());
+ isolate()->counters()->total_parse_size()->Increment(source->length());
Handle<SharedFunctionInfo> shared_info = info->shared_info();
// Initialize parser state.
@@ -737,7 +707,7 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
ASSERT(target_stack_ == NULL);
Handle<String> name(String::cast(shared_info->name()));
- fni_ = new FuncNameInferrer();
+ fni_ = new(zone()) FuncNameInferrer(isolate());
fni_->PushEnclosingName(name);
mode_ = PARSE_EAGERLY;
@@ -747,17 +717,15 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
{
// Parse the function literal.
- Handle<String> no_name = Factory::empty_symbol();
+ Handle<String> no_name = isolate()->factory()->empty_symbol();
Scope* scope = NewScope(top_scope_, Scope::GLOBAL_SCOPE, inside_with());
if (!info->closure().is_null()) {
scope = Scope::DeserializeScopeChain(info, scope);
}
- LexicalScope lexical_scope(&this->top_scope_, &this->with_nesting_level_,
- scope);
- TemporaryScope temp_scope(&this->temp_scope_);
+ LexicalScope lexical_scope(this, scope, isolate());
if (shared_info->strict_mode()) {
- temp_scope.EnableStrictMode();
+ top_scope_->EnableStrictMode();
}
FunctionLiteralType type =
@@ -777,7 +745,7 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
// not safe to do before scope has been deleted.
if (result == NULL) {
zone_scope->DeleteOnExit();
- if (stack_overflow_) Top::StackOverflow();
+ if (stack_overflow_) isolate()->StackOverflow();
} else {
Handle<String> inferred_name(shared_info->inferred_name());
result->set_inferred_name(inferred_name);
@@ -807,12 +775,15 @@ void Parser::ReportMessageAt(Scanner::Location source_location,
MessageLocation location(script_,
source_location.beg_pos,
source_location.end_pos);
- Handle<JSArray> array = Factory::NewJSArray(args.length());
+ Factory* factory = isolate()->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
- SetElement(array, i, Factory::NewStringFromUtf8(CStrVector(args[i])));
+ Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
+ elements->set(i, *arg_string);
}
- Handle<Object> result = Factory::NewSyntaxError(type, array);
- Top::Throw(*result, &location);
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = factory->NewSyntaxError(type, array);
+ isolate()->Throw(*result, &location);
}
@@ -822,12 +793,14 @@ void Parser::ReportMessageAt(Scanner::Location source_location,
MessageLocation location(script_,
source_location.beg_pos,
source_location.end_pos);
- Handle<JSArray> array = Factory::NewJSArray(args.length());
+ Factory* factory = isolate()->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
- SetElement(array, i, args[i]);
+ elements->set(i, *args[i]);
}
- Handle<Object> result = Factory::NewSyntaxError(type, array);
- Top::Throw(*result, &location);
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = factory->NewSyntaxError(type, array);
+ isolate()->Throw(*result, &location);
}
@@ -952,8 +925,9 @@ class InitializationBlockFinder : public ParserFinder {
// function contains only assignments of this type.
class ThisNamedPropertyAssigmentFinder : public ParserFinder {
public:
- ThisNamedPropertyAssigmentFinder()
- : only_simple_this_property_assignments_(true),
+ explicit ThisNamedPropertyAssigmentFinder(Isolate* isolate)
+ : isolate_(isolate),
+ only_simple_this_property_assignments_(true),
names_(NULL),
assigned_arguments_(NULL),
assigned_constants_(NULL) {}
@@ -984,14 +958,14 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder {
// form this.x = y;
Handle<FixedArray> GetThisPropertyAssignments() {
if (names_ == NULL) {
- return Factory::empty_fixed_array();
+ return isolate_->factory()->empty_fixed_array();
}
ASSERT(names_ != NULL);
ASSERT(assigned_arguments_ != NULL);
ASSERT_EQ(names_->length(), assigned_arguments_->length());
ASSERT_EQ(names_->length(), assigned_constants_->length());
Handle<FixedArray> assignments =
- Factory::NewFixedArray(names_->length() * 3);
+ isolate_->factory()->NewFixedArray(names_->length() * 3);
for (int i = 0; i < names_->length(); i++) {
assignments->set(i * 3, *names_->at(i));
assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_->at(i)));
@@ -1021,7 +995,8 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder {
uint32_t dummy;
if (literal != NULL &&
literal->handle()->IsString() &&
- !String::cast(*(literal->handle()))->Equals(Heap::Proto_symbol()) &&
+ !String::cast(*(literal->handle()))->Equals(
+ isolate_->heap()->Proto_symbol()) &&
!String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
Handle<String> key = Handle<String>::cast(literal->handle());
@@ -1055,7 +1030,7 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder {
EnsureAllocation();
names_->Add(name);
assigned_arguments_->Add(index);
- assigned_constants_->Add(Factory::undefined_value());
+ assigned_constants_->Add(isolate_->factory()->undefined_value());
}
void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
@@ -1074,12 +1049,14 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder {
if (names_ == NULL) {
ASSERT(assigned_arguments_ == NULL);
ASSERT(assigned_constants_ == NULL);
- names_ = new ZoneStringList(4);
- assigned_arguments_ = new ZoneList<int>(4);
- assigned_constants_ = new ZoneObjectList(4);
+ Zone* zone = isolate_->zone();
+ names_ = new(zone) ZoneStringList(4);
+ assigned_arguments_ = new(zone) ZoneList<int>(4);
+ assigned_constants_ = new(zone) ZoneObjectList(4);
}
}
+ Isolate* isolate_;
bool only_simple_this_property_assignments_;
ZoneStringList* names_;
ZoneList<int>* assigned_arguments_;
@@ -1101,7 +1078,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
ASSERT(processor != NULL);
InitializationBlockFinder block_finder;
- ThisNamedPropertyAssigmentFinder this_property_assignment_finder;
+ ThisNamedPropertyAssigmentFinder this_property_assignment_finder(isolate());
bool directive_prologue = true; // Parsing directive prologue.
while (peek() != end_token) {
@@ -1141,11 +1118,11 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
Handle<String> directive = Handle<String>::cast(literal->handle());
// Check "use strict" directive (ES5 14.1).
- if (!temp_scope_->StrictMode() &&
- directive->Equals(Heap::use_strict()) &&
+ if (!top_scope_->is_strict_mode() &&
+ directive->Equals(isolate()->heap()->use_strict()) &&
token_loc.end_pos - token_loc.beg_pos ==
- Heap::use_strict()->length() + 2) {
- temp_scope_->EnableStrictMode();
+ isolate()->heap()->use_strict()->length() + 2) {
+ top_scope_->EnableStrictMode();
// "use strict" is the only directive for now.
directive_prologue = false;
}
@@ -1174,7 +1151,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
this_property_assignment_finder.only_simple_this_property_assignments()
&& top_scope_->declarations()->length() == 0;
if (only_simple_this_property_assignments) {
- temp_scope_->SetThisPropertyAssignmentInfo(
+ lexical_scope_->SetThisPropertyAssignmentInfo(
only_simple_this_property_assignments,
this_property_assignment_finder.GetThisPropertyAssignments());
}
@@ -1270,7 +1247,7 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// one must take great care not to treat it as a
// fall-through. It is much easier just to wrap the entire
// try-statement in a statement block and put the labels there
- Block* result = new Block(labels, 1, false);
+ Block* result = new(zone()) Block(labels, 1, false);
Target target(&this->target_stack_, result);
TryStatement* statement = ParseTryStatement(CHECK_OK);
if (statement) {
@@ -1283,7 +1260,7 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
case Token::FUNCTION: {
// In strict mode, FunctionDeclaration is only allowed in the context
// of SourceElements.
- if (temp_scope_->StrictMode()) {
+ if (top_scope_->is_strict_mode()) {
ReportMessageAt(scanner().peek_location(), "strict_function",
Vector<const char*>::empty());
*ok = false;
@@ -1292,9 +1269,6 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
return ParseFunctionDeclaration(ok);
}
- case Token::NATIVE:
- return ParseNativeDeclaration(ok);
-
case Token::DEBUGGER:
stmt = ParseDebuggerStatement(ok);
break;
@@ -1325,7 +1299,10 @@ VariableProxy* Parser::Declare(Handle<String> name,
// to the corresponding activation frame at runtime if necessary.
// For instance declarations inside an eval scope need to be added
// to the calling function context.
- if (top_scope_->is_function_scope()) {
+ // Similarly, strict mode eval scope does not leak variable declarations to
+ // the caller's scope so we declare all locals, too.
+ if (top_scope_->is_function_scope() ||
+ top_scope_->is_strict_mode_eval_scope()) {
// Declare the variable in the function scope.
var = top_scope_->LocalLookup(name);
if (var == NULL) {
@@ -1342,9 +1319,9 @@ VariableProxy* Parser::Declare(Handle<String> name,
var->mode() == Variable::CONST);
const char* type = (var->mode() == Variable::VAR) ? "var" : "const";
Handle<String> type_string =
- Factory::NewStringFromUtf8(CStrVector(type), TENURED);
+ isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
Expression* expression =
- NewThrowTypeError(Factory::redeclaration_symbol(),
+ NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
type_string, name);
top_scope_->SetIllegalRedeclaration(expression);
}
@@ -1368,13 +1345,13 @@ VariableProxy* Parser::Declare(Handle<String> name,
// a performance issue since it may lead to repeated
// Runtime::DeclareContextSlot() calls.
VariableProxy* proxy = top_scope_->NewUnresolved(name, inside_with());
- top_scope_->AddDeclaration(new Declaration(proxy, mode, fun));
+ top_scope_->AddDeclaration(new(zone()) Declaration(proxy, mode, fun));
// For global const variables we bind the proxy to a variable.
if (mode == Variable::CONST && top_scope_->is_global_scope()) {
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
- var = new Variable(top_scope_, name, Variable::CONST, true, kind);
+ var = new(zone()) Variable(top_scope_, name, Variable::CONST, true, kind);
}
// If requested and we have a local variable, bind the proxy to the variable
@@ -1412,13 +1389,6 @@ VariableProxy* Parser::Declare(Handle<String> name,
// declaration is resolved by looking up the function through a
// callback provided by the extension.
Statement* Parser::ParseNativeDeclaration(bool* ok) {
- if (extension_ == NULL) {
- ReportUnexpectedToken(Token::NATIVE);
- *ok = false;
- return NULL;
- }
-
- Expect(Token::NATIVE, CHECK_OK);
Expect(Token::FUNCTION, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
@@ -1450,7 +1420,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
Handle<Code> code = Handle<Code>(fun->shared()->code());
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
Handle<SharedFunctionInfo> shared =
- Factory::NewSharedFunctionInfo(name, literals, code,
+ isolate()->factory()->NewSharedFunctionInfo(name, literals, code,
Handle<SerializedScopeInfo>(fun->shared()->scope_info()));
shared->set_construct_stub(*construct_stub);
@@ -1462,10 +1432,11 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
// other functions are setup when entering the surrounding scope.
- SharedFunctionInfoLiteral* lit = new SharedFunctionInfoLiteral(shared);
+ SharedFunctionInfoLiteral* lit =
+ new(zone()) SharedFunctionInfoLiteral(shared);
VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
- return new ExpressionStatement(
- new Assignment(Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
+ return new(zone()) ExpressionStatement(new(zone()) Assignment(
+ Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
}
@@ -1474,10 +1445,11 @@ Statement* Parser::ParseFunctionDeclaration(bool* ok) {
// 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
- bool is_reserved = false;
- Handle<String> name = ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
+ bool is_strict_reserved = false;
+ Handle<String> name = ParseIdentifierOrStrictReservedWord(
+ &is_strict_reserved, CHECK_OK);
FunctionLiteral* fun = ParseFunctionLiteral(name,
- is_reserved,
+ is_strict_reserved,
function_token_position,
DECLARATION,
CHECK_OK);
@@ -1497,7 +1469,7 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
// (ECMA-262, 3rd, 12.2)
//
// Construct block expecting 16 statements.
- Block* result = new Block(labels, 16, false);
+ Block* result = new(zone()) Block(labels, 16, false);
Target target(&this->target_stack_, result);
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
@@ -1519,11 +1491,13 @@ Block* Parser::ParseVariableStatement(bool* ok) {
return result;
}
-static bool IsEvalOrArguments(Handle<String> string) {
- return string.is_identical_to(Factory::eval_symbol()) ||
- string.is_identical_to(Factory::arguments_symbol());
+
+bool Parser::IsEvalOrArguments(Handle<String> string) {
+ return string.is_identical_to(isolate()->factory()->eval_symbol()) ||
+ string.is_identical_to(isolate()->factory()->arguments_symbol());
}
+
// If the variable declaration declares exactly one non-const
// variable, then *var is set to that variable. In all other cases,
// *var is untouched; in particular, it is the caller's responsibility
@@ -1541,7 +1515,7 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
Consume(Token::VAR);
} else if (peek() == Token::CONST) {
Consume(Token::CONST);
- if (temp_scope_->StrictMode()) {
+ if (top_scope_->is_strict_mode()) {
ReportMessage("strict_const", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -1565,7 +1539,7 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
// is inside an initializer block, it is ignored.
//
// Create new block with one expected declaration.
- Block* block = new Block(NULL, 1, true);
+ Block* block = new(zone()) Block(NULL, 1, true);
VariableProxy* last_var = NULL; // the last variable declared
int nvars = 0; // the number of variables declared
do {
@@ -1577,7 +1551,7 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
if (fni_ != NULL) fni_->PushVariableName(name);
// Strict mode variables may not be named eval or arguments
- if (temp_scope_->StrictMode() && IsEvalOrArguments(name)) {
+ if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
ReportMessage("strict_var_name", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -1599,6 +1573,12 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
is_const /* always bound for CONST! */,
CHECK_OK);
nvars++;
+ if (top_scope_->num_var_or_const() > kMaxNumFunctionLocals) {
+ ReportMessageAt(scanner().location(), "too_many_variables",
+ Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
// Parse initialization expression if present and/or needed. A
// declaration of the form:
@@ -1634,7 +1614,11 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
position = scanner().location().beg_pos;
value = ParseAssignmentExpression(accept_IN, CHECK_OK);
// Don't infer if it is "a = function(){...}();"-like expression.
- if (fni_ != NULL && value->AsCall() == NULL) fni_->Infer();
+ if (fni_ != NULL &&
+ value->AsCall() == NULL &&
+ value->AsCallNew() == NULL) {
+ fni_->Infer();
+ }
}
// Make sure that 'const c' actually initializes 'c' to undefined
@@ -1665,8 +1649,9 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
if (top_scope_->is_global_scope()) {
// Compute the arguments for the runtime call.
- ZoneList<Expression*>* arguments = new ZoneList<Expression*>(3);
- arguments->Add(new Literal(name)); // we have at least 1 parameter
+ ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3);
+ // We have at least 1 parameter.
+ arguments->Add(new(zone()) Literal(name));
CallRuntime* initialize;
if (is_const) {
@@ -1678,15 +1663,15 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
// Note that the function does different things depending on
// the number of arguments (1 or 2).
initialize =
- new CallRuntime(
- Factory::InitializeConstGlobal_symbol(),
+ new(zone()) CallRuntime(
+ isolate()->factory()->InitializeConstGlobal_symbol(),
Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
arguments);
} else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
arguments->Add(NewNumberLiteral(
- temp_scope_->StrictMode() ? kStrictMode : kNonStrictMode));
+ top_scope_->is_strict_mode() ? kStrictMode : kNonStrictMode));
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@@ -1702,13 +1687,13 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
// Note that the function does different things depending on
// the number of arguments (2 or 3).
initialize =
- new CallRuntime(
- Factory::InitializeVarGlobal_symbol(),
+ new(zone()) CallRuntime(
+ isolate()->factory()->InitializeVarGlobal_symbol(),
Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
arguments);
}
- block->AddStatement(new ExpressionStatement(initialize));
+ block->AddStatement(new(zone()) ExpressionStatement(initialize));
}
// Add an assignment node to the initialization statement block if
@@ -1723,8 +1708,11 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
// the top context for variables). Sigh...
if (value != NULL) {
Token::Value op = (is_const ? Token::INIT_CONST : Token::INIT_VAR);
- Assignment* assignment = new Assignment(op, last_var, value, position);
- if (block) block->AddStatement(new ExpressionStatement(assignment));
+ Assignment* assignment =
+ new(zone()) Assignment(op, last_var, value, position);
+ if (block) {
+ block->AddStatement(new(zone()) ExpressionStatement(assignment));
+ }
}
if (fni_ != NULL) fni_->Leave();
@@ -1758,7 +1746,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// Identifier ':' Statement
bool starts_with_idenfifier = peek_any_identifier();
Expression* expr = ParseExpression(true, CHECK_OK);
- if (peek() == Token::COLON && starts_with_idenfifier && expr &&
+ if (peek() == Token::COLON && starts_with_idenfifier && expr != NULL &&
expr->AsVariableProxy() != NULL &&
!expr->AsVariableProxy()->is_this()) {
// Expression is a single identifier, and not, e.g., a parenthesized
@@ -1778,7 +1766,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
*ok = false;
return NULL;
}
- if (labels == NULL) labels = new ZoneStringList(4);
+ if (labels == NULL) labels = new(zone()) ZoneStringList(4);
labels->Add(label);
// Remove the "ghost" variable that turned out to be a label
// from the top scope. This way, we don't try to resolve it
@@ -1788,9 +1776,23 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
return ParseStatement(labels, ok);
}
+ // If we have an extension, we allow a native function declaration.
+ // A native function declaration starts with "native function" with
+ // no line-terminator between the two words.
+ if (extension_ != NULL &&
+ peek() == Token::FUNCTION &&
+ !scanner().HasAnyLineTerminatorBeforeNext() &&
+ expr != NULL &&
+ expr->AsVariableProxy() != NULL &&
+ expr->AsVariableProxy()->name()->Equals(
+ isolate()->heap()->native_symbol()) &&
+ !scanner().literal_contains_escapes()) {
+ return ParseNativeDeclaration(ok);
+ }
+
// Parsed expression statement.
ExpectSemicolon(CHECK_OK);
- return new ExpressionStatement(expr);
+ return new(zone()) ExpressionStatement(expr);
}
@@ -1810,7 +1812,7 @@ IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) {
} else {
else_statement = EmptyStatement();
}
- return new IfStatement(condition, then_statement, else_statement);
+ return new(zone()) IfStatement(condition, then_statement, else_statement);
}
@@ -1821,7 +1823,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
- if (!scanner().has_line_terminator_before_next() &&
+ if (!scanner().HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
label = ParseIdentifier(CHECK_OK);
}
@@ -1840,7 +1842,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
return NULL;
}
ExpectSemicolon(CHECK_OK);
- return new ContinueStatement(target);
+ return new(zone()) ContinueStatement(target);
}
@@ -1851,7 +1853,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::BREAK, CHECK_OK);
Handle<String> label;
Token::Value tok = peek();
- if (!scanner().has_line_terminator_before_next() &&
+ if (!scanner().HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
label = ParseIdentifier(CHECK_OK);
}
@@ -1875,7 +1877,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
return NULL;
}
ExpectSemicolon(CHECK_OK);
- return new BreakStatement(target);
+ return new(zone()) BreakStatement(target);
}
@@ -1894,33 +1896,29 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
//
// To be consistent with KJS we report the syntax error at runtime.
if (!top_scope_->is_function_scope()) {
- Handle<String> type = Factory::illegal_return_symbol();
+ Handle<String> type = isolate()->factory()->illegal_return_symbol();
Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
- return new ExpressionStatement(throw_error);
+ return new(zone()) ExpressionStatement(throw_error);
}
Token::Value tok = peek();
- if (scanner().has_line_terminator_before_next() ||
+ if (scanner().HasAnyLineTerminatorBeforeNext() ||
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
ExpectSemicolon(CHECK_OK);
- return new ReturnStatement(GetLiteralUndefined());
+ return new(zone()) ReturnStatement(GetLiteralUndefined());
}
Expression* expr = ParseExpression(true, CHECK_OK);
ExpectSemicolon(CHECK_OK);
- return new ReturnStatement(expr);
+ return new(zone()) ReturnStatement(expr);
}
-Block* Parser::WithHelper(Expression* obj,
- ZoneStringList* labels,
- bool is_catch_block,
- bool* ok) {
+Block* Parser::WithHelper(Expression* obj, ZoneStringList* labels, bool* ok) {
// Parse the statement and collect escaping labels.
- ZoneList<BreakTarget*>* target_list = new ZoneList<BreakTarget*>(0);
- TargetCollector collector(target_list);
+ TargetCollector collector;
Statement* stat;
{ Target target(&this->target_stack_, &collector);
with_nesting_level_++;
@@ -1931,21 +1929,21 @@ Block* Parser::WithHelper(Expression* obj,
// Create resulting block with two statements.
// 1: Evaluate the with expression.
// 2: The try-finally block evaluating the body.
- Block* result = new Block(NULL, 2, false);
+ Block* result = new(zone()) Block(NULL, 2, false);
if (result != NULL) {
- result->AddStatement(new WithEnterStatement(obj, is_catch_block));
+ result->AddStatement(new(zone()) EnterWithContextStatement(obj));
// Create body block.
- Block* body = new Block(NULL, 1, false);
+ Block* body = new(zone()) Block(NULL, 1, false);
body->AddStatement(stat);
// Create exit block.
- Block* exit = new Block(NULL, 1, false);
- exit->AddStatement(new WithExitStatement());
+ Block* exit = new(zone()) Block(NULL, 1, false);
+ exit->AddStatement(new(zone()) ExitContextStatement());
// Return a try-finally statement.
- TryFinallyStatement* wrapper = new TryFinallyStatement(body, exit);
+ TryFinallyStatement* wrapper = new(zone()) TryFinallyStatement(body, exit);
wrapper->set_escaping_targets(collector.targets());
result->AddStatement(wrapper);
}
@@ -1959,7 +1957,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::WITH, CHECK_OK);
- if (temp_scope_->StrictMode()) {
+ if (top_scope_->is_strict_mode()) {
ReportMessage("strict_mode_with", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -1969,7 +1967,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- return WithHelper(expr, labels, false, CHECK_OK);
+ return WithHelper(expr, labels, CHECK_OK);
}
@@ -1994,7 +1992,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
}
Expect(Token::COLON, CHECK_OK);
int pos = scanner().location().beg_pos;
- ZoneList<Statement*>* statements = new ZoneList<Statement*>(5);
+ ZoneList<Statement*>* statements = new(zone()) ZoneList<Statement*>(5);
while (peek() != Token::CASE &&
peek() != Token::DEFAULT &&
peek() != Token::RBRACE) {
@@ -2002,7 +2000,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
statements->Add(stat);
}
- return new CaseClause(label, statements, pos);
+ return new(zone()) CaseClause(label, statements, pos);
}
@@ -2011,7 +2009,7 @@ SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels,
// SwitchStatement ::
// 'switch' '(' Expression ')' '{' CaseClause* '}'
- SwitchStatement* statement = new SwitchStatement(labels);
+ SwitchStatement* statement = new(zone()) SwitchStatement(labels);
Target target(&this->target_stack_, statement);
Expect(Token::SWITCH, CHECK_OK);
@@ -2020,7 +2018,7 @@ SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels,
Expect(Token::RPAREN, CHECK_OK);
bool default_seen = false;
- ZoneList<CaseClause*>* cases = new ZoneList<CaseClause*>(4);
+ ZoneList<CaseClause*>* cases = new(zone()) ZoneList<CaseClause*>(4);
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
@@ -2039,7 +2037,7 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
int pos = scanner().location().beg_pos;
- if (scanner().has_line_terminator_before_next()) {
+ if (scanner().HasAnyLineTerminatorBeforeNext()) {
ReportMessage("newline_after_throw", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2047,7 +2045,7 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
Expression* exception = ParseExpression(true, CHECK_OK);
ExpectSemicolon(CHECK_OK);
- return new ExpressionStatement(new Throw(exception, pos));
+ return new(zone()) ExpressionStatement(new(zone()) Throw(exception, pos));
}
@@ -2065,18 +2063,13 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Expect(Token::TRY, CHECK_OK);
- ZoneList<BreakTarget*>* target_list = new ZoneList<BreakTarget*>(0);
- TargetCollector collector(target_list);
+ TargetCollector try_collector;
Block* try_block;
- { Target target(&this->target_stack_, &collector);
+ { Target target(&this->target_stack_, &try_collector);
try_block = ParseBlock(NULL, CHECK_OK);
}
- Block* catch_block = NULL;
- Variable* catch_var = NULL;
- Block* finally_block = NULL;
-
Token::Value tok = peek();
if (tok != Token::CATCH && tok != Token::FINALLY) {
ReportMessage("no_catch_or_finally", Vector<const char*>::empty());
@@ -2085,20 +2078,19 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
}
// If we can break out from the catch block and there is a finally block,
- // then we will need to collect jump targets from the catch block. Since
- // we don't know yet if there will be a finally block, we always collect
- // the jump targets.
- ZoneList<BreakTarget*>* catch_target_list = new ZoneList<BreakTarget*>(0);
- TargetCollector catch_collector(catch_target_list);
- bool has_catch = false;
+ // then we will need to collect escaping targets from the catch
+ // block. Since we don't know yet if there will be a finally block, we
+ // always collect the targets.
+ TargetCollector catch_collector;
+ Block* catch_block = NULL;
+ Handle<String> name;
if (tok == Token::CATCH) {
- has_catch = true;
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ name = ParseIdentifier(CHECK_OK);
- if (temp_scope_->StrictMode() && IsEvalOrArguments(name)) {
+ if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
ReportMessage("strict_catch_variable", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2107,15 +2099,33 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
if (peek() == Token::LBRACE) {
- // Allocate a temporary for holding the finally state while
- // executing the finally block.
- catch_var = top_scope_->NewTemporary(Factory::catch_var_symbol());
- Literal* name_literal = new Literal(name);
- VariableProxy* catch_var_use = new VariableProxy(catch_var);
- Expression* obj = new CatchExtensionObject(name_literal, catch_var_use);
+ // Rewrite the catch body B to a single statement block
+ // { try B finally { PopContext }}.
+ Block* inner_body;
+ // We need to collect escapes from the body for both the inner
+ // try/finally used to pop the catch context and any possible outer
+ // try/finally.
+ TargetCollector inner_collector;
{ Target target(&this->target_stack_, &catch_collector);
- catch_block = WithHelper(obj, NULL, true, CHECK_OK);
+ { Target target(&this->target_stack_, &inner_collector);
+ ++with_nesting_level_;
+ top_scope_->RecordWithStatement();
+ inner_body = ParseBlock(NULL, CHECK_OK);
+ --with_nesting_level_;
+ }
}
+
+ // Create exit block.
+ Block* inner_finally = new(zone()) Block(NULL, 1, false);
+ inner_finally->AddStatement(new(zone()) ExitContextStatement());
+
+ // Create a try/finally statement.
+ TryFinallyStatement* inner_try_finally =
+ new(zone()) TryFinallyStatement(inner_body, inner_finally);
+ inner_try_finally->set_escaping_targets(inner_collector.targets());
+
+ catch_block = new(zone()) Block(NULL, 1, false);
+ catch_block->AddStatement(inner_try_finally);
} else {
Expect(Token::LBRACE, CHECK_OK);
}
@@ -2123,24 +2133,22 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
tok = peek();
}
- if (tok == Token::FINALLY || !has_catch) {
+ Block* finally_block = NULL;
+ if (tok == Token::FINALLY || catch_block == NULL) {
Consume(Token::FINALLY);
- // Declare a variable for holding the finally state while
- // executing the finally block.
finally_block = ParseBlock(NULL, CHECK_OK);
}
// Simplify the AST nodes by converting:
- // 'try { } catch { } finally { }'
+ // 'try B0 catch B1 finally B2'
// to:
- // 'try { try { } catch { } } finally { }'
+ // 'try { try B0 catch B1 } finally B2'
if (catch_block != NULL && finally_block != NULL) {
- VariableProxy* catch_var_defn = new VariableProxy(catch_var);
TryCatchStatement* statement =
- new TryCatchStatement(try_block, catch_var_defn, catch_block);
- statement->set_escaping_targets(collector.targets());
- try_block = new Block(NULL, 1, false);
+ new(zone()) TryCatchStatement(try_block, name, catch_block);
+ statement->set_escaping_targets(try_collector.targets());
+ try_block = new(zone()) Block(NULL, 1, false);
try_block->AddStatement(statement);
catch_block = NULL;
}
@@ -2148,19 +2156,16 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
TryStatement* result = NULL;
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
- VariableProxy* catch_var_defn = new VariableProxy(catch_var);
- result = new TryCatchStatement(try_block, catch_var_defn, catch_block);
- result->set_escaping_targets(collector.targets());
+ result =
+ new(zone()) TryCatchStatement(try_block, name, catch_block);
} else {
ASSERT(finally_block != NULL);
- result = new TryFinallyStatement(try_block, finally_block);
- // Add the jump targets of the try block and the catch block.
- for (int i = 0; i < collector.targets()->length(); i++) {
- catch_collector.AddTarget(collector.targets()->at(i));
- }
- result->set_escaping_targets(catch_collector.targets());
+ result = new(zone()) TryFinallyStatement(try_block, finally_block);
+ // Combine the jump targets of the try block and the possible catch block.
+ try_collector.targets()->AddAll(*catch_collector.targets());
}
+ result->set_escaping_targets(try_collector.targets());
return result;
}
@@ -2170,8 +2175,7 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
- temp_scope_->AddLoop();
- DoWhileStatement* loop = new DoWhileStatement(labels);
+ DoWhileStatement* loop = new(zone()) DoWhileStatement(labels);
Target target(&this->target_stack_, loop);
Expect(Token::DO, CHECK_OK);
@@ -2185,7 +2189,6 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
}
Expression* cond = ParseExpression(true, CHECK_OK);
- if (cond != NULL) cond->set_is_loop_condition(true);
Expect(Token::RPAREN, CHECK_OK);
// Allow do-statements to be terminated with and without
@@ -2203,14 +2206,12 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
// WhileStatement ::
// 'while' '(' Expression ')' Statement
- temp_scope_->AddLoop();
- WhileStatement* loop = new WhileStatement(labels);
+ WhileStatement* loop = new(zone()) WhileStatement(labels);
Target target(&this->target_stack_, loop);
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
Expression* cond = ParseExpression(true, CHECK_OK);
- if (cond != NULL) cond->set_is_loop_condition(true);
Expect(Token::RPAREN, CHECK_OK);
Statement* body = ParseStatement(NULL, CHECK_OK);
@@ -2223,7 +2224,6 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
- temp_scope_->AddLoop();
Statement* init = NULL;
Expect(Token::FOR, CHECK_OK);
@@ -2234,7 +2234,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Block* variable_statement =
ParseVariableDeclarations(false, &each, CHECK_OK);
if (peek() == Token::IN && each != NULL) {
- ForInStatement* loop = new ForInStatement(labels);
+ ForInStatement* loop = new(zone()) ForInStatement(labels);
Target target(&this->target_stack_, loop);
Expect(Token::IN, CHECK_OK);
@@ -2243,7 +2243,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* body = ParseStatement(NULL, CHECK_OK);
loop->Initialize(each, enumerable, body);
- Block* result = new Block(NULL, 2, false);
+ Block* result = new(zone()) Block(NULL, 2, false);
result->AddStatement(variable_statement);
result->AddStatement(loop);
// Parsed for-in loop w/ variable/const declaration.
@@ -2260,10 +2260,11 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// error here but for compatibility with JSC we choose to report
// the error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type = Factory::invalid_lhs_in_for_in_symbol();
+ Handle<String> type =
+ isolate()->factory()->invalid_lhs_in_for_in_symbol();
expression = NewThrowReferenceError(type);
}
- ForInStatement* loop = new ForInStatement(labels);
+ ForInStatement* loop = new(zone()) ForInStatement(labels);
Target target(&this->target_stack_, loop);
Expect(Token::IN, CHECK_OK);
@@ -2276,13 +2277,13 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
return loop;
} else {
- init = new ExpressionStatement(expression);
+ init = new(zone()) ExpressionStatement(expression);
}
}
}
// Standard 'for' loop
- ForStatement* loop = new ForStatement(labels);
+ ForStatement* loop = new(zone()) ForStatement(labels);
Target target(&this->target_stack_, loop);
// Parsed initializer at this point.
@@ -2291,14 +2292,13 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expression* cond = NULL;
if (peek() != Token::SEMICOLON) {
cond = ParseExpression(true, CHECK_OK);
- if (cond != NULL) cond->set_is_loop_condition(true);
}
Expect(Token::SEMICOLON, CHECK_OK);
Statement* next = NULL;
if (peek() != Token::RPAREN) {
Expression* exp = ParseExpression(true, CHECK_OK);
- next = new ExpressionStatement(exp);
+ next = new(zone()) ExpressionStatement(exp);
}
Expect(Token::RPAREN, CHECK_OK);
@@ -2319,7 +2319,7 @@ Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
Expect(Token::COMMA, CHECK_OK);
int position = scanner().location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = new BinaryOperation(Token::COMMA, result, right, position);
+ result = new(zone()) BinaryOperation(Token::COMMA, result, right, position);
}
return result;
}
@@ -2345,11 +2345,12 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// for compatibility with JSC we choose to report the error at
// runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type = Factory::invalid_lhs_in_assignment_symbol();
+ Handle<String> type =
+ isolate()->factory()->invalid_lhs_in_assignment_symbol();
expression = NewThrowReferenceError(type);
}
- if (temp_scope_->StrictMode()) {
+ if (top_scope_->is_strict_mode()) {
// Assignment to eval or arguments is disallowed in strict mode.
CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
}
@@ -2368,7 +2369,7 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
property != NULL &&
property->obj()->AsVariableProxy() != NULL &&
property->obj()->AsVariableProxy()->is_this()) {
- temp_scope_->AddProperty();
+ lexical_scope_->AddProperty();
}
// If we assign a function literal to a property we pretenure the
@@ -2384,13 +2385,13 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
if ((op == Token::INIT_VAR
|| op == Token::INIT_CONST
|| op == Token::ASSIGN)
- && (right->AsCall() == NULL)) {
+ && (right->AsCall() == NULL && right->AsCallNew() == NULL)) {
fni_->Infer();
}
fni_->Leave();
}
- return new Assignment(op, expression, right, pos);
+ return new(zone()) Assignment(op, expression, right, pos);
}
@@ -2412,7 +2413,7 @@ Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
Expect(Token::COLON, CHECK_OK);
int right_position = scanner().peek_location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- return new Conditional(expression, left, right,
+ return new(zone()) Conditional(expression, left, right,
left_position, right_position);
}
@@ -2500,12 +2501,12 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
x = NewCompareNode(cmp, x, y, position);
if (cmp != op) {
// The comparison was negated - add a NOT.
- x = new UnaryOperation(Token::NOT, x);
+ x = new(zone()) UnaryOperation(Token::NOT, x, position);
}
} else {
// We have a "normal" binary operation.
- x = new BinaryOperation(op, x, y, position);
+ x = new(zone()) BinaryOperation(op, x, y, position);
}
}
}
@@ -2522,15 +2523,15 @@ Expression* Parser::NewCompareNode(Token::Value op,
bool is_strict = (op == Token::EQ_STRICT);
Literal* x_literal = x->AsLiteral();
if (x_literal != NULL && x_literal->IsNull()) {
- return new CompareToNull(is_strict, y);
+ return new(zone()) CompareToNull(is_strict, y);
}
Literal* y_literal = y->AsLiteral();
if (y_literal != NULL && y_literal->IsNull()) {
- return new CompareToNull(is_strict, x);
+ return new(zone()) CompareToNull(is_strict, x);
}
}
- return new CompareOperation(op, x, y, position);
+ return new(zone()) CompareOperation(op, x, y, position);
}
@@ -2550,25 +2551,34 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
Token::Value op = peek();
if (Token::IsUnaryOp(op)) {
op = Next();
+ int position = scanner().location().beg_pos;
Expression* expression = ParseUnaryExpression(CHECK_OK);
- // Compute some expressions involving only number literals.
- if (expression != NULL && expression->AsLiteral() &&
- expression->AsLiteral()->handle()->IsNumber()) {
- double value = expression->AsLiteral()->handle()->Number();
- switch (op) {
- case Token::ADD:
- return expression;
- case Token::SUB:
- return NewNumberLiteral(-value);
- case Token::BIT_NOT:
- return NewNumberLiteral(~DoubleToInt32(value));
- default: break;
+ if (expression != NULL && (expression->AsLiteral() != NULL)) {
+ Handle<Object> literal = expression->AsLiteral()->handle();
+ if (op == Token::NOT) {
+ // Convert the literal to a boolean condition and negate it.
+ bool condition = literal->ToBoolean()->IsTrue();
+ Handle<Object> result(isolate()->heap()->ToBoolean(!condition));
+ return new(zone()) Literal(result);
+ } else if (literal->IsNumber()) {
+ // Compute some expressions involving only number literals.
+ double value = literal->Number();
+ switch (op) {
+ case Token::ADD:
+ return expression;
+ case Token::SUB:
+ return NewNumberLiteral(-value);
+ case Token::BIT_NOT:
+ return NewNumberLiteral(~DoubleToInt32(value));
+ default:
+ break;
+ }
}
}
// "delete identifier" is a syntax error in strict mode.
- if (op == Token::DELETE && temp_scope_->StrictMode()) {
+ if (op == Token::DELETE && top_scope_->is_strict_mode()) {
VariableProxy* operand = expression->AsVariableProxy();
if (operand != NULL && !operand->is_this()) {
ReportMessage("strict_delete", Vector<const char*>::empty());
@@ -2577,7 +2587,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
}
}
- return new UnaryOperation(op, expression);
+ return new(zone()) UnaryOperation(op, expression, position);
} else if (Token::IsCountOp(op)) {
op = Next();
@@ -2587,18 +2597,21 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
// error here but for compatibility with JSC we choose to report the
// error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
+ Handle<String> type =
+ isolate()->factory()->invalid_lhs_in_prefix_op_symbol();
expression = NewThrowReferenceError(type);
}
- if (temp_scope_->StrictMode()) {
+ if (top_scope_->is_strict_mode()) {
// Prefix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
int position = scanner().location().beg_pos;
- IncrementOperation* increment = new IncrementOperation(op, expression);
- return new CountOperation(true /* prefix */, increment, position);
+ return new(zone()) CountOperation(op,
+ true /* prefix */,
+ expression,
+ position);
} else {
return ParsePostfixExpression(ok);
@@ -2611,26 +2624,30 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
// LeftHandSideExpression ('++' | '--')?
Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner().has_line_terminator_before_next() &&
+ if (!scanner().HasAnyLineTerminatorBeforeNext() &&
Token::IsCountOp(peek())) {
// Signal a reference error if the expression is an invalid
// left-hand side expression. We could report this as a syntax
// error here but for compatibility with JSC we choose to report the
// error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
+ Handle<String> type =
+ isolate()->factory()->invalid_lhs_in_postfix_op_symbol();
expression = NewThrowReferenceError(type);
}
- if (temp_scope_->StrictMode()) {
+ if (top_scope_->is_strict_mode()) {
// Postfix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
Token::Value next = Next();
int position = scanner().location().beg_pos;
- IncrementOperation* increment = new IncrementOperation(next, expression);
- expression = new CountOperation(false /* postfix */, increment, position);
+ expression =
+ new(zone()) CountOperation(next,
+ false /* postfix */,
+ expression,
+ position);
}
return expression;
}
@@ -2653,7 +2670,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
Consume(Token::LBRACK);
int pos = scanner().location().beg_pos;
Expression* index = ParseExpression(true, CHECK_OK);
- result = new Property(result, index, pos);
+ result = new(zone()) Property(result, index, pos);
Expect(Token::RBRACK, CHECK_OK);
break;
}
@@ -2675,7 +2692,8 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
// is called without a receiver and it refers to the original eval
// function.
VariableProxy* callee = result->AsVariableProxy();
- if (callee != NULL && callee->IsVariable(Factory::eval_symbol())) {
+ if (callee != NULL &&
+ callee->IsVariable(isolate()->factory()->eval_symbol())) {
Handle<String> name = callee->name();
Variable* var = top_scope_->Lookup(name);
if (var == NULL) {
@@ -2690,7 +2708,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
Consume(Token::PERIOD);
int pos = scanner().location().beg_pos;
Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = new Property(result, new Literal(name), pos);
+ result = new(zone()) Property(result, new(zone()) Literal(name), pos);
if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
@@ -2726,7 +2744,9 @@ Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
if (!stack->is_empty()) {
int last = stack->pop();
- result = new CallNew(result, new ZoneList<Expression*>(0), last);
+ result = new(zone()) CallNew(result,
+ new(zone()) ZoneList<Expression*>(0),
+ last);
}
return result;
}
@@ -2755,11 +2775,12 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
Handle<String> name;
- bool is_reserved_name = false;
+ bool is_strict_reserved_name = false;
if (peek_any_identifier()) {
- name = ParseIdentifierOrReservedWord(&is_reserved_name, CHECK_OK);
+ name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
+ CHECK_OK);
}
- result = ParseFunctionLiteral(name, is_reserved_name,
+ result = ParseFunctionLiteral(name, is_strict_reserved_name,
function_token_position, NESTED, CHECK_OK);
} else {
result = ParsePrimaryExpression(CHECK_OK);
@@ -2771,7 +2792,15 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
Consume(Token::LBRACK);
int pos = scanner().location().beg_pos;
Expression* index = ParseExpression(true, CHECK_OK);
- result = new Property(result, index, pos);
+ result = new(zone()) Property(result, index, pos);
+ if (fni_ != NULL) {
+ if (index->IsPropertyName()) {
+ fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
+ } else {
+ fni_->PushLiteralName(
+ isolate()->factory()->anonymous_function_symbol());
+ }
+ }
Expect(Token::RBRACK, CHECK_OK);
break;
}
@@ -2779,7 +2808,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
Consume(Token::PERIOD);
int pos = scanner().location().beg_pos;
Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = new Property(result, new Literal(name), pos);
+ result = new(zone()) Property(result, new(zone()) Literal(name), pos);
if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
@@ -2807,7 +2836,7 @@ DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
Expect(Token::DEBUGGER, CHECK_OK);
ExpectSemicolon(CHECK_OK);
- return new DebuggerStatement();
+ return new(zone()) DebuggerStatement();
}
@@ -2830,7 +2859,10 @@ void Parser::ReportUnexpectedToken(Token::Value token) {
return ReportMessage("unexpected_token_identifier",
Vector<const char*>::empty());
case Token::FUTURE_RESERVED_WORD:
- return ReportMessage(temp_scope_->StrictMode() ?
+ return ReportMessage("unexpected_reserved",
+ Vector<const char*>::empty());
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ return ReportMessage(top_scope_->is_strict_mode() ?
"unexpected_strict_reserved" :
"unexpected_token_identifier",
Vector<const char*>::empty());
@@ -2876,31 +2908,34 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::NULL_LITERAL:
Consume(Token::NULL_LITERAL);
- result = new Literal(Factory::null_value());
+ result = new(zone()) Literal(isolate()->factory()->null_value());
break;
case Token::TRUE_LITERAL:
Consume(Token::TRUE_LITERAL);
- result = new Literal(Factory::true_value());
+ result = new(zone()) Literal(isolate()->factory()->true_value());
break;
case Token::FALSE_LITERAL:
Consume(Token::FALSE_LITERAL);
- result = new Literal(Factory::false_value());
+ result = new(zone()) Literal(isolate()->factory()->false_value());
break;
case Token::IDENTIFIER:
- case Token::FUTURE_RESERVED_WORD: {
+ case Token::FUTURE_STRICT_RESERVED_WORD: {
Handle<String> name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
- result = top_scope_->NewUnresolved(name, inside_with());
+ result = top_scope_->NewUnresolved(name,
+ inside_with(),
+ scanner().location().beg_pos);
break;
}
case Token::NUMBER: {
Consume(Token::NUMBER);
ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(scanner().literal_ascii_string(),
+ double value = StringToDouble(isolate()->unicode_cache(),
+ scanner().literal_ascii_string(),
ALLOW_HEX | ALLOW_OCTALS);
result = NewNumberLiteral(value);
break;
@@ -2909,7 +2944,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::STRING: {
Consume(Token::STRING);
Handle<String> symbol = GetSymbol(CHECK_OK);
- result = new Literal(symbol);
+ result = new(zone()) Literal(symbol);
if (fni_ != NULL) fni_->PushLiteralName(symbol);
break;
}
@@ -2990,7 +3025,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
- ZoneList<Expression*>* values = new ZoneList<Expression*>(4);
+ ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4);
Expect(Token::LBRACK, CHECK_OK);
while (peek() != Token::RBRACK) {
Expression* elem;
@@ -3007,11 +3042,11 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
Expect(Token::RBRACK, CHECK_OK);
// Update the scope information before the pre-parsing bailout.
- int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+ int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
// Allocate a fixed array with all the literals.
Handle<FixedArray> literals =
- Factory::NewFixedArray(values->length(), TENURED);
+ isolate()->factory()->NewFixedArray(values->length(), TENURED);
// Fill in the literals.
bool is_simple = true;
@@ -3033,10 +3068,10 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0) {
- literals->set_map(Heap::fixed_cow_array_map());
+ literals->set_map(isolate()->heap()->fixed_cow_array_map());
}
- return new ArrayLiteral(literals, values,
+ return new(zone()) ArrayLiteral(literals, values,
literal_index, is_simple, depth);
}
@@ -3068,7 +3103,7 @@ bool CompileTimeValue::ArrayLiteralElementNeedsInitialization(
Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
ASSERT(IsCompileTimeValue(expression));
- Handle<FixedArray> result = Factory::NewFixedArray(2, TENURED);
+ Handle<FixedArray> result = FACTORY->NewFixedArray(2, TENURED);
ObjectLiteral* object_literal = expression->AsObjectLiteral();
if (object_literal != NULL) {
ASSERT(object_literal->is_simple());
@@ -3106,7 +3141,7 @@ Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
if (CompileTimeValue::IsCompileTimeValue(expression)) {
return CompileTimeValue::GetValue(expression);
}
- return Factory::undefined_value();
+ return isolate()->factory()->undefined_value();
}
// Defined in ast.cc
@@ -3172,7 +3207,7 @@ void ObjectLiteralPropertyChecker::CheckProperty(
if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle));
if (name->AsArrayIndex(&hash)) {
- Handle<Object> key_handle = Factory::NewNumberFromUint(hash);
+ Handle<Object> key_handle = FACTORY->NewNumberFromUint(hash);
key = key_handle.location();
map = &elems;
} else {
@@ -3189,7 +3224,7 @@ void ObjectLiteralPropertyChecker::CheckProperty(
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str = DoubleToCString(num, buffer);
- Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
+ Handle<String> name = FACTORY->NewStringFromAscii(CStrVector(str));
key = name.location();
hash = name->Hash();
map = &props;
@@ -3298,10 +3333,11 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
bool is_keyword = Token::IsKeyword(next);
if (next == Token::IDENTIFIER || next == Token::NUMBER ||
next == Token::FUTURE_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD ||
next == Token::STRING || is_keyword) {
Handle<String> name;
if (is_keyword) {
- name = Factory::LookupAsciiSymbol(Token::String(next));
+ name = isolate_->factory()->LookupAsciiSymbol(Token::String(next));
} else {
name = GetSymbol(CHECK_OK);
}
@@ -3314,7 +3350,7 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
// Allow any number of parameters for compatiabilty with JSC.
// Specification only allows zero parameters for get and one for set.
ObjectLiteral::Property* property =
- new ObjectLiteral::Property(is_getter, value);
+ new(zone()) ObjectLiteral::Property(is_getter, value);
return property;
} else {
ReportUnexpectedToken(next);
@@ -3332,10 +3368,11 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// )*[','] '}'
ZoneList<ObjectLiteral::Property*>* properties =
- new ZoneList<ObjectLiteral::Property*>(4);
+ new(zone()) ZoneList<ObjectLiteral::Property*>(4);
int number_of_boilerplate_properties = 0;
+ bool has_function = false;
- ObjectLiteralPropertyChecker checker(this, temp_scope_->StrictMode());
+ ObjectLiteralPropertyChecker checker(this, top_scope_->is_strict_mode());
Expect(Token::LBRACE, CHECK_OK);
Scanner::Location loc = scanner().location();
@@ -3351,11 +3388,12 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
switch (next) {
case Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
case Token::IDENTIFIER: {
bool is_getter = false;
bool is_setter = false;
Handle<String> id =
- ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+ ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
if (fni_ != NULL) fni_->PushLiteralName(id);
if ((is_getter || is_setter) && peek() != Token::COLON) {
@@ -3379,7 +3417,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
// Failed to parse as get/set property, so it's just a property
// called "get" or "set".
- key = new Literal(id);
+ key = new(zone()) Literal(id);
break;
}
case Token::STRING: {
@@ -3391,13 +3429,14 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
key = NewNumberLiteral(index);
break;
}
- key = new Literal(string);
+ key = new(zone()) Literal(string);
break;
}
case Token::NUMBER: {
Consume(Token::NUMBER);
ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(scanner().literal_ascii_string(),
+ double value = StringToDouble(isolate()->unicode_cache(),
+ scanner().literal_ascii_string(),
ALLOW_HEX | ALLOW_OCTALS);
key = NewNumberLiteral(value);
break;
@@ -3406,7 +3445,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
if (Token::IsKeyword(next)) {
Consume(next);
Handle<String> string = GetSymbol(CHECK_OK);
- key = new Literal(string);
+ key = new(zone()) Literal(string);
} else {
// Unexpected token.
Token::Value next = Next();
@@ -3420,7 +3459,14 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Expression* value = ParseAssignmentExpression(true, CHECK_OK);
ObjectLiteral::Property* property =
- new ObjectLiteral::Property(key, value);
+ new(zone()) ObjectLiteral::Property(key, value);
+
+ // Mark object literals that contain function literals and pretenure the
+ // literal so it can be added as a constant function property.
+ if (value->AsFunctionLiteral() != NULL) {
+ has_function = true;
+ value->AsFunctionLiteral()->set_pretenure(true);
+ }
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
@@ -3439,10 +3485,10 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Expect(Token::RBRACE, CHECK_OK);
// Computation of literal_index must happen before pre parse bailout.
- int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+ int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
- Handle<FixedArray> constant_properties =
- Factory::NewFixedArray(number_of_boilerplate_properties * 2, TENURED);
+ Handle<FixedArray> constant_properties = isolate()->factory()->NewFixedArray(
+ number_of_boilerplate_properties * 2, TENURED);
bool is_simple = true;
bool fast_elements = true;
@@ -3452,12 +3498,13 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
&is_simple,
&fast_elements,
&depth);
- return new ObjectLiteral(constant_properties,
+ return new(zone()) ObjectLiteral(constant_properties,
properties,
literal_index,
is_simple,
fast_elements,
- depth);
+ depth,
+ has_function);
}
@@ -3469,14 +3516,14 @@ Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
return NULL;
}
- int literal_index = temp_scope_->NextMaterializedLiteralIndex();
+ int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
Handle<String> js_pattern = NextLiteralString(TENURED);
scanner().ScanRegExpFlags();
Handle<String> js_flags = NextLiteralString(TENURED);
Next();
- return new RegExpLiteral(js_pattern, js_flags, literal_index);
+ return new(zone()) RegExpLiteral(js_pattern, js_flags, literal_index);
}
@@ -3484,7 +3531,7 @@ ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
- ZoneList<Expression*>* result = new ZoneList<Expression*>(4);
+ ZoneList<Expression*>* result = new(zone()) ZoneList<Expression*>(4);
Expect(Token::LPAREN, CHECK_OK);
bool done = (peek() == Token::RPAREN);
while (!done) {
@@ -3505,7 +3552,7 @@ ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
- bool name_is_reserved,
+ bool name_is_strict_reserved,
int function_token_position,
FunctionLiteralType type,
bool* ok) {
@@ -3517,49 +3564,56 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
// this is the actual function name, otherwise this is the name of the
// variable declared and initialized with the function (expression). In
// that case, we don't have a function name (it's empty).
- Handle<String> name = is_named ? var_name : Factory::empty_symbol();
+ Handle<String> name =
+ is_named ? var_name : isolate()->factory()->empty_symbol();
// The function name, if any.
- Handle<String> function_name = Factory::empty_symbol();
+ Handle<String> function_name = isolate()->factory()->empty_symbol();
if (is_named && (type == EXPRESSION || type == NESTED)) {
function_name = name;
}
int num_parameters = 0;
+ Scope* scope = NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
+ ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8);
+ int materialized_literal_count;
+ int expected_property_count;
+ int start_pos;
+ int end_pos;
+ bool only_simple_this_property_assignments;
+ Handle<FixedArray> this_property_assignments;
+ bool has_duplicate_parameters = false;
// Parse function body.
- { Scope* scope =
- NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
- LexicalScope lexical_scope(&this->top_scope_, &this->with_nesting_level_,
- scope);
- TemporaryScope temp_scope(&this->temp_scope_);
+ { LexicalScope lexical_scope(this, scope, isolate());
top_scope_->SetScopeName(name);
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
- int start_pos = scanner().location().beg_pos;
- Scanner::Location name_loc = Scanner::NoLocation();
- Scanner::Location dupe_loc = Scanner::NoLocation();
- Scanner::Location reserved_loc = Scanner::NoLocation();
+ start_pos = scanner().location().beg_pos;
+ Scanner::Location name_loc = Scanner::Location::invalid();
+ Scanner::Location dupe_loc = Scanner::Location::invalid();
+ Scanner::Location reserved_loc = Scanner::Location::invalid();
bool done = (peek() == Token::RPAREN);
while (!done) {
- bool is_reserved = false;
+ bool is_strict_reserved = false;
Handle<String> param_name =
- ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
+ ParseIdentifierOrStrictReservedWord(&is_strict_reserved,
+ CHECK_OK);
// Store locations for possible future error reports.
if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
name_loc = scanner().location();
}
if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
+ has_duplicate_parameters = true;
dupe_loc = scanner().location();
}
- if (!reserved_loc.IsValid() && is_reserved) {
+ if (!reserved_loc.IsValid() && is_strict_reserved) {
reserved_loc = scanner().location();
}
- Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
- top_scope_->AddParameter(parameter);
+ top_scope_->DeclareParameter(param_name);
num_parameters++;
if (num_parameters > kMaxNumFunctionParameters) {
ReportMessageAt(scanner().location(), "too_many_parameters",
@@ -3573,7 +3627,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
Expect(Token::RPAREN, CHECK_OK);
Expect(Token::LBRACE, CHECK_OK);
- ZoneList<Statement*>* body = new ZoneList<Statement*>(8);
// If we have a named function expression, we add a local variable
// declaration to the body of the function with the name of the
@@ -3586,9 +3639,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
VariableProxy* fproxy =
top_scope_->NewUnresolved(function_name, inside_with());
fproxy->BindTo(fvar);
- body->Add(new ExpressionStatement(
- new Assignment(Token::INIT_CONST, fproxy,
- new ThisFunction(),
+ body->Add(new(zone()) ExpressionStatement(
+ new(zone()) Assignment(Token::INIT_CONST, fproxy,
+ new(zone()) ThisFunction(),
RelocInfo::kNoPosition)));
}
@@ -3601,11 +3654,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
parenthesized_function_ = false; // The bit was set for this function only.
int function_block_pos = scanner().location().beg_pos;
- int materialized_literal_count;
- int expected_property_count;
- int end_pos;
- bool only_simple_this_property_assignments;
- Handle<FixedArray> this_property_assignments;
if (is_lazily_compiled && pre_data() != NULL) {
FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
if (!entry.is_valid()) {
@@ -3616,29 +3664,31 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
// End position greater than end of stream is safe, and hard to check.
ReportInvalidPreparseData(name, CHECK_OK);
}
- Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
+ isolate()->counters()->total_preparse_skipped()->Increment(
+ end_pos - function_block_pos);
// Seek to position just before terminal '}'.
scanner().SeekForward(end_pos - 1);
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
+ if (entry.strict_mode()) top_scope_->EnableStrictMode();
only_simple_this_property_assignments = false;
- this_property_assignments = Factory::empty_fixed_array();
+ this_property_assignments = isolate()->factory()->empty_fixed_array();
Expect(Token::RBRACE, CHECK_OK);
} else {
ParseSourceElements(body, Token::RBRACE, CHECK_OK);
- materialized_literal_count = temp_scope.materialized_literal_count();
- expected_property_count = temp_scope.expected_property_count();
+ materialized_literal_count = lexical_scope.materialized_literal_count();
+ expected_property_count = lexical_scope.expected_property_count();
only_simple_this_property_assignments =
- temp_scope.only_simple_this_property_assignments();
- this_property_assignments = temp_scope.this_property_assignments();
+ lexical_scope.only_simple_this_property_assignments();
+ this_property_assignments = lexical_scope.this_property_assignments();
Expect(Token::RBRACE, CHECK_OK);
end_pos = scanner().location().end_pos;
}
// Validate strict mode.
- if (temp_scope_->StrictMode()) {
+ if (top_scope_->is_strict_mode()) {
if (IsEvalOrArguments(name)) {
int position = function_token_position != RelocInfo::kNoPosition
? function_token_position
@@ -3661,7 +3711,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
*ok = false;
return NULL;
}
- if (name_is_reserved) {
+ if (name_is_strict_reserved) {
int position = function_token_position != RelocInfo::kNoPosition
? function_token_position
: (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -3679,26 +3729,25 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
}
CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
}
+ }
- FunctionLiteral* function_literal =
- new FunctionLiteral(name,
- top_scope_,
- body,
- materialized_literal_count,
- expected_property_count,
- only_simple_this_property_assignments,
- this_property_assignments,
- num_parameters,
- start_pos,
- end_pos,
- function_name->length() > 0,
- temp_scope.ContainsLoops(),
- temp_scope.StrictMode());
- function_literal->set_function_token_position(function_token_position);
+ FunctionLiteral* function_literal =
+ new(zone()) FunctionLiteral(name,
+ scope,
+ body,
+ materialized_literal_count,
+ expected_property_count,
+ only_simple_this_property_assignments,
+ this_property_assignments,
+ num_parameters,
+ start_pos,
+ end_pos,
+ (function_name->length() > 0),
+ has_duplicate_parameters);
+ function_literal->set_function_token_position(function_token_position);
- if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
- return function_literal;
- }
+ if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
+ return function_literal;
}
@@ -3716,7 +3765,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
top_scope_->ForceEagerCompilation();
}
- Runtime::Function* function = Runtime::FunctionForSymbol(name);
+ const Runtime::Function* function = Runtime::FunctionForSymbol(name);
// Check for built-in IS_VAR macro.
if (function != NULL &&
@@ -3744,14 +3793,15 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
}
// We have a valid intrinsics call or a call to a builtin.
- return new CallRuntime(name, function, args);
+ return new(zone()) CallRuntime(name, function, args);
}
bool Parser::peek_any_identifier() {
Token::Value next = peek();
return next == Token::IDENTIFIER ||
- next == Token::FUTURE_RESERVED_WORD;
+ next == Token::FUTURE_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD;
}
@@ -3789,7 +3839,7 @@ void Parser::ExpectSemicolon(bool* ok) {
Next();
return;
}
- if (scanner().has_line_terminator_before_next() ||
+ if (scanner().HasAnyLineTerminatorBeforeNext() ||
tok == Token::RBRACE ||
tok == Token::EOS) {
return;
@@ -3799,12 +3849,12 @@ void Parser::ExpectSemicolon(bool* ok) {
Literal* Parser::GetLiteralUndefined() {
- return new Literal(Factory::undefined_value());
+ return new(zone()) Literal(isolate()->factory()->undefined_value());
}
Literal* Parser::GetLiteralTheHole() {
- return new Literal(Factory::the_hole_value());
+ return new(zone()) Literal(isolate()->factory()->the_hole_value());
}
@@ -3813,22 +3863,27 @@ Literal* Parser::GetLiteralNumber(double value) {
}
+// Parses and identifier that is valid for the current scope, in particular it
+// fails on strict mode future reserved keywords in a strict scope.
Handle<String> Parser::ParseIdentifier(bool* ok) {
- bool is_reserved;
- return ParseIdentifierOrReservedWord(&is_reserved, ok);
+ if (top_scope_->is_strict_mode()) {
+ Expect(Token::IDENTIFIER, ok);
+ } else if (!Check(Token::IDENTIFIER)) {
+ Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
+ }
+ if (!*ok) return Handle<String>();
+ return GetSymbol(ok);
}
-Handle<String> Parser::ParseIdentifierOrReservedWord(bool* is_reserved,
- bool* ok) {
- *is_reserved = false;
- if (temp_scope_->StrictMode()) {
- Expect(Token::IDENTIFIER, ok);
- } else {
- if (!Check(Token::IDENTIFIER)) {
- Expect(Token::FUTURE_RESERVED_WORD, ok);
- *is_reserved = true;
- }
+// Parses and identifier or a strict mode future reserved word, and indicate
+// whether it is strict mode future reserved.
+Handle<String> Parser::ParseIdentifierOrStrictReservedWord(
+ bool* is_strict_reserved, bool* ok) {
+ *is_strict_reserved = false;
+ if (!Check(Token::IDENTIFIER)) {
+ Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
+ *is_strict_reserved = true;
}
if (!*ok) return Handle<String>();
return GetSymbol(ok);
@@ -3838,8 +3893,9 @@ Handle<String> Parser::ParseIdentifierOrReservedWord(bool* is_reserved,
Handle<String> Parser::ParseIdentifierName(bool* ok) {
Token::Value next = Next();
if (next != Token::IDENTIFIER &&
- next != Token::FUTURE_RESERVED_WORD &&
- !Token::IsKeyword(next)) {
+ next != Token::FUTURE_RESERVED_WORD &&
+ next != Token::FUTURE_STRICT_RESERVED_WORD &&
+ !Token::IsKeyword(next)) {
ReportUnexpectedToken(next);
*ok = false;
return Handle<String>();
@@ -3853,7 +3909,7 @@ Handle<String> Parser::ParseIdentifierName(bool* ok) {
void Parser::CheckStrictModeLValue(Expression* expression,
const char* error,
bool* ok) {
- ASSERT(temp_scope_->StrictMode());
+ ASSERT(top_scope_->is_strict_mode());
VariableProxy* lhs = expression != NULL
? expression->AsVariableProxy()
: NULL;
@@ -3865,12 +3921,14 @@ void Parser::CheckStrictModeLValue(Expression* expression,
}
-// Checks whether octal literal last seen is between beg_pos and end_pos.
-// If so, reports an error.
+// Checks whether an octal literal was last seen between beg_pos and end_pos.
+// If so, reports an error. Only called for strict mode.
void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- int octal = scanner().octal_position();
- if (beg_pos <= octal && octal <= end_pos) {
- ReportMessageAt(Scanner::Location(octal, octal + 1), "strict_octal_literal",
+ Scanner::Location octal = scanner().octal_position();
+ if (octal.IsValid() &&
+ beg_pos <= octal.beg_pos &&
+ octal.end_pos <= end_pos) {
+ ReportMessageAt(octal, "strict_octal_literal",
Vector<const char*>::empty());
scanner().clear_octal_position();
*ok = false;
@@ -3878,12 +3936,12 @@ void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
}
-// This function reads an identifier and determines whether or not it
+// This function reads an identifier name and determines whether or not it
// is 'get' or 'set'.
-Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Handle<String> result = ParseIdentifier(ok);
+Handle<String> Parser::ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok) {
+ Handle<String> result = ParseIdentifierName(ok);
if (!*ok) return Handle<String>();
if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
const char* token = scanner().literal_ascii_string().start();
@@ -3940,7 +3998,7 @@ IterationStatement* Parser::LookupContinueTarget(Handle<String> label,
}
-void Parser::RegisterTargetUse(BreakTarget* target, Target* stop) {
+void Parser::RegisterTargetUse(Label* target, Target* stop) {
// Register that a break target found at the given stop in the
// target stack has been used from the top of the target stack. Add
// the break target to any TargetCollectors passed on the stack.
@@ -3952,12 +4010,12 @@ void Parser::RegisterTargetUse(BreakTarget* target, Target* stop) {
Literal* Parser::NewNumberLiteral(double number) {
- return new Literal(Factory::NewNumber(number, TENURED));
+ return new(zone()) Literal(isolate()->factory()->NewNumber(number, TENURED));
}
Expression* Parser::NewThrowReferenceError(Handle<String> type) {
- return NewThrowError(Factory::MakeReferenceError_symbol(),
+ return NewThrowError(isolate()->factory()->MakeReferenceError_symbol(),
type, HandleVector<Object>(NULL, 0));
}
@@ -3966,7 +4024,8 @@ Expression* Parser::NewThrowSyntaxError(Handle<String> type,
Handle<Object> first) {
int argc = first.is_null() ? 0 : 1;
Vector< Handle<Object> > arguments = HandleVector<Object>(&first, argc);
- return NewThrowError(Factory::MakeSyntaxError_symbol(), type, arguments);
+ return NewThrowError(
+ isolate()->factory()->MakeSyntaxError_symbol(), type, arguments);
}
@@ -3977,7 +4036,8 @@ Expression* Parser::NewThrowTypeError(Handle<String> type,
Handle<Object> elements[] = { first, second };
Vector< Handle<Object> > arguments =
HandleVector<Object>(elements, ARRAY_SIZE(elements));
- return NewThrowError(Factory::MakeTypeError_symbol(), type, arguments);
+ return NewThrowError(
+ isolate()->factory()->MakeTypeError_symbol(), type, arguments);
}
@@ -3985,215 +4045,44 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
Handle<String> type,
Vector< Handle<Object> > arguments) {
int argc = arguments.length();
- Handle<FixedArray> elements = Factory::NewFixedArray(argc, TENURED);
+ Handle<FixedArray> elements = isolate()->factory()->NewFixedArray(argc,
+ TENURED);
for (int i = 0; i < argc; i++) {
Handle<Object> element = arguments[i];
if (!element.is_null()) {
elements->set(i, *element);
}
}
- Handle<JSArray> array = Factory::NewJSArrayWithElements(elements, TENURED);
+ Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(elements,
+ TENURED);
- ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
- args->Add(new Literal(type));
- args->Add(new Literal(array));
- return new Throw(new CallRuntime(constructor, NULL, args),
+ ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2);
+ args->Add(new(zone()) Literal(type));
+ args->Add(new(zone()) Literal(array));
+ return new(zone()) Throw(new(zone()) CallRuntime(constructor, NULL, args),
scanner().location().beg_pos);
}
// ----------------------------------------------------------------------------
-// JSON
-
-Handle<Object> JsonParser::ParseJson(Handle<String> script,
- UC16CharacterStream* source) {
- scanner_.Initialize(source);
- stack_overflow_ = false;
- Handle<Object> result = ParseJsonValue();
- if (result.is_null() || scanner_.Next() != Token::EOS) {
- if (stack_overflow_) {
- // Scanner failed.
- Top::StackOverflow();
- } else {
- // Parse failed. Scanner's current token is the unexpected token.
- Token::Value token = scanner_.current_token();
-
- const char* message;
- const char* name_opt = NULL;
-
- switch (token) {
- case Token::EOS:
- message = "unexpected_eos";
- break;
- case Token::NUMBER:
- message = "unexpected_token_number";
- break;
- case Token::STRING:
- message = "unexpected_token_string";
- break;
- case Token::IDENTIFIER:
- case Token::FUTURE_RESERVED_WORD:
- message = "unexpected_token_identifier";
- break;
- default:
- message = "unexpected_token";
- name_opt = Token::String(token);
- ASSERT(name_opt != NULL);
- break;
- }
-
- Scanner::Location source_location = scanner_.location();
- MessageLocation location(Factory::NewScript(script),
- source_location.beg_pos,
- source_location.end_pos);
- int argc = (name_opt == NULL) ? 0 : 1;
- Handle<JSArray> array = Factory::NewJSArray(argc);
- if (name_opt != NULL) {
- SetElement(array,
- 0,
- Factory::NewStringFromUtf8(CStrVector(name_opt)));
- }
- Handle<Object> result = Factory::NewSyntaxError(message, array);
- Top::Throw(*result, &location);
- return Handle<Object>::null();
- }
- }
- return result;
-}
-
-
-Handle<String> JsonParser::GetString() {
- int literal_length = scanner_.literal_length();
- if (literal_length == 0) {
- return Factory::empty_string();
- }
- if (scanner_.is_literal_ascii()) {
- return Factory::NewStringFromAscii(scanner_.literal_ascii_string());
- } else {
- return Factory::NewStringFromTwoByte(scanner_.literal_uc16_string());
- }
-}
-
-
-// Parse any JSON value.
-Handle<Object> JsonParser::ParseJsonValue() {
- Token::Value token = scanner_.Next();
- switch (token) {
- case Token::STRING:
- return GetString();
- case Token::NUMBER:
- return Factory::NewNumber(scanner_.number());
- case Token::FALSE_LITERAL:
- return Factory::false_value();
- case Token::TRUE_LITERAL:
- return Factory::true_value();
- case Token::NULL_LITERAL:
- return Factory::null_value();
- case Token::LBRACE:
- return ParseJsonObject();
- case Token::LBRACK:
- return ParseJsonArray();
- default:
- return ReportUnexpectedToken();
- }
-}
-
-
-// Parse a JSON object. Scanner must be right after '{' token.
-Handle<Object> JsonParser::ParseJsonObject() {
- Handle<JSFunction> object_constructor(
- Top::global_context()->object_function());
- Handle<JSObject> json_object = Factory::NewJSObject(object_constructor);
- if (scanner_.peek() == Token::RBRACE) {
- scanner_.Next();
- } else {
- if (StackLimitCheck().HasOverflowed()) {
- stack_overflow_ = true;
- return Handle<Object>::null();
- }
- do {
- if (scanner_.Next() != Token::STRING) {
- return ReportUnexpectedToken();
- }
- Handle<String> key = GetString();
- if (scanner_.Next() != Token::COLON) {
- return ReportUnexpectedToken();
- }
- Handle<Object> value = ParseJsonValue();
- if (value.is_null()) return Handle<Object>::null();
- uint32_t index;
- if (key->AsArrayIndex(&index)) {
- SetOwnElement(json_object, index, value);
- } else if (key->Equals(Heap::Proto_symbol())) {
- // We can't remove the __proto__ accessor since it's hardcoded
- // in several places. Instead go along and add the value as
- // the prototype of the created object if possible.
- SetPrototype(json_object, value);
- } else {
- SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
- }
- } while (scanner_.Next() == Token::COMMA);
- if (scanner_.current_token() != Token::RBRACE) {
- return ReportUnexpectedToken();
- }
- }
- return json_object;
-}
-
-
-// Parse a JSON array. Scanner must be right after '[' token.
-Handle<Object> JsonParser::ParseJsonArray() {
- ZoneScope zone_scope(DELETE_ON_EXIT);
- ZoneList<Handle<Object> > elements(4);
-
- Token::Value token = scanner_.peek();
- if (token == Token::RBRACK) {
- scanner_.Next();
- } else {
- if (StackLimitCheck().HasOverflowed()) {
- stack_overflow_ = true;
- return Handle<Object>::null();
- }
- do {
- Handle<Object> element = ParseJsonValue();
- if (element.is_null()) return Handle<Object>::null();
- elements.Add(element);
- token = scanner_.Next();
- } while (token == Token::COMMA);
- if (token != Token::RBRACK) {
- return ReportUnexpectedToken();
- }
- }
-
- // Allocate a fixed array with all the elements.
- Handle<FixedArray> fast_elements =
- Factory::NewFixedArray(elements.length());
-
- for (int i = 0, n = elements.length(); i < n; i++) {
- fast_elements->set(i, *elements[i]);
- }
-
- return Factory::NewJSArrayWithElements(fast_elements);
-}
-
-// ----------------------------------------------------------------------------
// Regular expressions
RegExpParser::RegExpParser(FlatStringReader* in,
Handle<String>* error,
bool multiline)
- : error_(error),
- captures_(NULL),
- in_(in),
- current_(kEndMarker),
- next_pos_(0),
- capture_count_(0),
- has_more_(true),
- multiline_(multiline),
- simple_(false),
- contains_anchor_(false),
- is_scanned_for_captures_(false),
- failed_(false) {
+ : isolate_(Isolate::Current()),
+ error_(error),
+ captures_(NULL),
+ in_(in),
+ current_(kEndMarker),
+ next_pos_(0),
+ capture_count_(0),
+ has_more_(true),
+ multiline_(multiline),
+ simple_(false),
+ contains_anchor_(false),
+ is_scanned_for_captures_(false),
+ failed_(false) {
Advance();
}
@@ -4209,10 +4098,10 @@ uc32 RegExpParser::Next() {
void RegExpParser::Advance() {
if (next_pos_ < in()->length()) {
- StackLimitCheck check;
+ StackLimitCheck check(isolate());
if (check.HasOverflowed()) {
- ReportError(CStrVector(Top::kStackOverflowMessage));
- } else if (Zone::excess_allocation()) {
+ ReportError(CStrVector(Isolate::kStackOverflowMessage));
+ } else if (isolate()->zone()->excess_allocation()) {
ReportError(CStrVector("Regular expression too large"));
} else {
current_ = in()->Get(next_pos_);
@@ -4243,7 +4132,7 @@ bool RegExpParser::simple() {
RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
failed_ = true;
- *error_ = Factory::NewStringFromAscii(message, NOT_TENURED);
+ *error_ = isolate()->factory()->NewStringFromAscii(message, NOT_TENURED);
// Zip to the end to make sure the no more input is read.
current_ = kEndMarker;
next_pos_ = in()->length();
@@ -4313,13 +4202,13 @@ RegExpTree* RegExpParser::ParseDisjunction() {
// Build result of subexpression.
if (type == CAPTURE) {
- RegExpCapture* capture = new RegExpCapture(body, capture_index);
+ RegExpCapture* capture = new(zone()) RegExpCapture(body, capture_index);
captures_->at(capture_index - 1) = capture;
body = capture;
} else if (type != GROUPING) {
ASSERT(type == POSITIVE_LOOKAHEAD || type == NEGATIVE_LOOKAHEAD);
bool is_positive = (type == POSITIVE_LOOKAHEAD);
- body = new RegExpLookahead(body,
+ body = new(zone()) RegExpLookahead(body,
is_positive,
end_capture_index - capture_index,
capture_index);
@@ -4342,10 +4231,10 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance();
if (multiline_) {
builder->AddAssertion(
- new RegExpAssertion(RegExpAssertion::START_OF_LINE));
+ new(zone()) RegExpAssertion(RegExpAssertion::START_OF_LINE));
} else {
builder->AddAssertion(
- new RegExpAssertion(RegExpAssertion::START_OF_INPUT));
+ new(zone()) RegExpAssertion(RegExpAssertion::START_OF_INPUT));
set_contains_anchor();
}
continue;
@@ -4355,15 +4244,16 @@ RegExpTree* RegExpParser::ParseDisjunction() {
RegExpAssertion::Type type =
multiline_ ? RegExpAssertion::END_OF_LINE :
RegExpAssertion::END_OF_INPUT;
- builder->AddAssertion(new RegExpAssertion(type));
+ builder->AddAssertion(new(zone()) RegExpAssertion(type));
continue;
}
case '.': {
Advance();
// everything except \x0a, \x0d, \u2028 and \u2029
- ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
+ ZoneList<CharacterRange>* ranges =
+ new(zone()) ZoneList<CharacterRange>(2);
CharacterRange::AddClassEscape('.', ranges);
- RegExpTree* atom = new RegExpCharacterClass(ranges, false);
+ RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
builder->AddAtom(atom);
break;
}
@@ -4388,7 +4278,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance(2);
} else {
if (captures_ == NULL) {
- captures_ = new ZoneList<RegExpCapture*>(2);
+ captures_ = new(zone()) ZoneList<RegExpCapture*>(2);
}
if (captures_started() >= kMaxCaptures) {
ReportError(CStrVector("Too many captures") CHECK_FAILED);
@@ -4396,7 +4286,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
captures_->Add(NULL);
}
// Store current state and begin new disjunction parsing.
- stored_state = new RegExpParserState(stored_state,
+ stored_state = new(zone()) RegExpParserState(stored_state,
type,
captures_started());
builder = stored_state->builder();
@@ -4416,12 +4306,12 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case 'b':
Advance(2);
builder->AddAssertion(
- new RegExpAssertion(RegExpAssertion::BOUNDARY));
+ new(zone()) RegExpAssertion(RegExpAssertion::BOUNDARY));
continue;
case 'B':
Advance(2);
builder->AddAssertion(
- new RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
+ new(zone()) RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
continue;
// AtomEscape ::
// CharacterClassEscape
@@ -4431,9 +4321,10 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case 'd': case 'D': case 's': case 'S': case 'w': case 'W': {
uc32 c = Next();
Advance(2);
- ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
+ ZoneList<CharacterRange>* ranges =
+ new(zone()) ZoneList<CharacterRange>(2);
CharacterRange::AddClassEscape(c, ranges);
- RegExpTree* atom = new RegExpCharacterClass(ranges, false);
+ RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
builder->AddAtom(atom);
break;
}
@@ -4449,7 +4340,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddEmpty();
break;
}
- RegExpTree* atom = new RegExpBackReference(capture);
+ RegExpTree* atom = new(zone()) RegExpBackReference(capture);
builder->AddAtom(atom);
break;
}
@@ -4597,30 +4488,6 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
}
-class SourceCharacter {
- public:
- static bool Is(uc32 c) {
- switch (c) {
- // case ']': case '}':
- // In spidermonkey and jsc these are treated as source characters
- // so we do too.
- case '^': case '$': case '\\': case '.': case '*': case '+':
- case '?': case '(': case ')': case '[': case '{': case '|':
- case RegExpParser::kEndMarker:
- return false;
- default:
- return true;
- }
- }
-};
-
-
-static unibrow::Predicate<SourceCharacter> source_character;
-
-
-static inline bool IsSourceCharacter(uc32 c) {
- return source_character.get(c);
-}
#ifdef DEBUG
// Currently only used in an ASSERT.
@@ -4951,7 +4818,7 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
is_negated = true;
Advance();
}
- ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
+ ZoneList<CharacterRange>* ranges = new(zone()) ZoneList<CharacterRange>(2);
while (has_more() && current() != ']') {
uc16 char_class = kNoCharClass;
CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
@@ -4991,7 +4858,7 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
ranges->Add(CharacterRange::Everything());
is_negated = !is_negated;
}
- return new RegExpCharacterClass(ranges, is_negated);
+ return new(zone()) RegExpCharacterClass(ranges, is_negated);
}
@@ -5073,14 +4940,15 @@ int ScriptDataImpl::ReadNumber(byte** source) {
static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
bool allow_lazy,
ParserRecorder* recorder) {
- V8JavaScriptScanner scanner;
+ Isolate* isolate = Isolate::Current();
+ JavaScriptScanner scanner(isolate->unicode_cache());
scanner.Initialize(source);
- intptr_t stack_limit = StackGuard::real_climit();
+ intptr_t stack_limit = isolate->stack_guard()->real_climit();
if (!preparser::PreParser::PreParseProgram(&scanner,
recorder,
allow_lazy,
stack_limit)) {
- Top::StackOverflow();
+ isolate->StackOverflow();
return NULL;
}
@@ -5145,8 +5013,9 @@ bool ParserApi::Parse(CompilationInfo* info) {
Parser parser(script, true, NULL, NULL);
result = parser.ParseLazy(info);
} else {
+ // Whether we allow %identifier(..) syntax.
bool allow_natives_syntax =
- FLAG_allow_natives_syntax || Bootstrapper::IsActive();
+ info->allows_natives_syntax() || FLAG_allow_natives_syntax;
ScriptDataImpl* pre_data = info->pre_parse_data();
Parser parser(script, allow_natives_syntax, info->extension(), pre_data);
if (pre_data != NULL && pre_data->has_error()) {
@@ -5159,7 +5028,7 @@ bool ParserApi::Parse(CompilationInfo* info) {
DeleteArray(args[i]);
}
DeleteArray(args.start());
- ASSERT(Top::has_pending_exception());
+ ASSERT(info->isolate()->has_pending_exception());
} else {
Handle<String> source = Handle<String>(String::cast(script->source()));
result = parser.ParseProgram(source,
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index bc7bc562e..ea2e0d529 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -32,6 +32,7 @@
#include "ast.h"
#include "scanner.h"
#include "scopes.h"
+#include "preparse-data-format.h"
#include "preparse-data.h"
namespace v8 {
@@ -42,7 +43,7 @@ class FuncNameInferrer;
class ParserLog;
class PositionStack;
class Target;
-class TemporaryScope;
+class LexicalScope;
template <typename T> class ZoneListWrapper;
@@ -71,22 +72,14 @@ class FunctionEntry BASE_EMBEDDED {
FunctionEntry() : backing_(Vector<unsigned>::empty()) { }
int start_pos() { return backing_[kStartPosOffset]; }
- void set_start_pos(int value) { backing_[kStartPosOffset] = value; }
-
int end_pos() { return backing_[kEndPosOffset]; }
- void set_end_pos(int value) { backing_[kEndPosOffset] = value; }
-
int literal_count() { return backing_[kLiteralCountOffset]; }
- void set_literal_count(int value) { backing_[kLiteralCountOffset] = value; }
-
int property_count() { return backing_[kPropertyCountOffset]; }
- void set_property_count(int value) {
- backing_[kPropertyCountOffset] = value;
- }
+ bool strict_mode() { return backing_[kStrictModeOffset] != 0; }
bool is_valid() { return backing_.length() > 0; }
- static const int kSize = 4;
+ static const int kSize = 5;
private:
Vector<unsigned> backing_;
@@ -94,6 +87,7 @@ class FunctionEntry BASE_EMBEDDED {
static const int kEndPosOffset = 1;
static const int kLiteralCountOffset = 2;
static const int kPropertyCountOffset = 3;
+ static const int kStrictModeOffset = 4;
};
@@ -280,6 +274,9 @@ class RegExpBuilder: public ZoneObject {
void FlushCharacters();
void FlushText();
void FlushTerms();
+ Zone* zone() { return zone_; }
+
+ Zone* zone_;
bool pending_empty_;
ZoneList<uc16>* characters_;
BufferedZoneList<RegExpTree, 2> terms_;
@@ -388,6 +385,9 @@ class RegExpParser {
int disjunction_capture_index_;
};
+ Isolate* isolate() { return isolate_; }
+ Zone* zone() { return isolate_->zone(); }
+
uc32 current() { return current_; }
bool has_more() { return has_more_; }
bool has_next() { return next_pos_ < in()->length(); }
@@ -395,6 +395,7 @@ class RegExpParser {
FlatStringReader* in() { return in_; }
void ScanForCaptures();
+ Isolate* isolate_;
Handle<String>* error_;
ZoneList<RegExpCapture*>* captures_;
FlatStringReader* in_;
@@ -441,6 +442,7 @@ class Parser {
// construct a hashable id, so if more than 2^17 are allowed, this
// should be checked.
static const int kMaxNumFunctionParameters = 32766;
+ static const int kMaxNumFunctionLocals = 32767;
FunctionLiteral* ParseLazy(CompilationInfo* info,
UC16CharacterStream* source,
ZoneScope* zone_scope);
@@ -449,6 +451,9 @@ class Parser {
PARSE_EAGERLY
};
+ Isolate* isolate() { return isolate_; }
+ Zone* zone() { return isolate_->zone(); }
+
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(Handle<String> source,
bool in_global_context,
@@ -461,10 +466,13 @@ class Parser {
void ReportMessage(const char* message, Vector<const char*> args);
bool inside_with() const { return with_nesting_level_ > 0; }
- V8JavaScriptScanner& scanner() { return scanner_; }
+ JavaScriptScanner& scanner() { return scanner_; }
Mode mode() const { return mode_; }
ScriptDataImpl* pre_data() const { return pre_data_; }
+ // Check if the given string is 'eval' or 'arguments'.
+ bool IsEvalOrArguments(Handle<String> string);
+
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -483,10 +491,7 @@ class Parser {
Statement* ParseContinueStatement(bool* ok);
Statement* ParseBreakStatement(ZoneStringList* labels, bool* ok);
Statement* ParseReturnStatement(bool* ok);
- Block* WithHelper(Expression* obj,
- ZoneStringList* labels,
- bool is_catch_block,
- bool* ok);
+ Block* WithHelper(Expression* obj, ZoneStringList* labels, bool* ok);
Statement* ParseWithStatement(ZoneStringList* labels, bool* ok);
CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
SwitchStatement* ParseSwitchStatement(ZoneStringList* labels, bool* ok);
@@ -574,7 +579,7 @@ class Parser {
if (stack_overflow_) {
return Token::ILLEGAL;
}
- if (StackLimitCheck().HasOverflowed()) {
+ if (StackLimitCheck(isolate()).HasOverflowed()) {
// Any further calls to Next or peek will return the illegal token.
// The current call must return the next token, which might already
// have been peek'ed.
@@ -592,21 +597,21 @@ class Parser {
Handle<String> LiteralString(PretenureFlag tenured) {
if (scanner().is_literal_ascii()) {
- return Factory::NewStringFromAscii(scanner().literal_ascii_string(),
- tenured);
+ return isolate_->factory()->NewStringFromAscii(
+ scanner().literal_ascii_string(), tenured);
} else {
- return Factory::NewStringFromTwoByte(scanner().literal_uc16_string(),
- tenured);
+ return isolate_->factory()->NewStringFromTwoByte(
+ scanner().literal_uc16_string(), tenured);
}
}
Handle<String> NextLiteralString(PretenureFlag tenured) {
if (scanner().is_next_literal_ascii()) {
- return Factory::NewStringFromAscii(scanner().next_literal_ascii_string(),
- tenured);
+ return isolate_->factory()->NewStringFromAscii(
+ scanner().next_literal_ascii_string(), tenured);
} else {
- return Factory::NewStringFromTwoByte(scanner().next_literal_uc16_string(),
- tenured);
+ return isolate_->factory()->NewStringFromTwoByte(
+ scanner().next_literal_uc16_string(), tenured);
}
}
@@ -618,11 +623,12 @@ class Parser {
Literal* GetLiteralNumber(double value);
Handle<String> ParseIdentifier(bool* ok);
- Handle<String> ParseIdentifierOrReservedWord(bool* is_reserved, bool* ok);
+ Handle<String> ParseIdentifierOrStrictReservedWord(
+ bool* is_strict_reserved, bool* ok);
Handle<String> ParseIdentifierName(bool* ok);
- Handle<String> ParseIdentifierOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
+ Handle<String> ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok);
// Strict mode validation of LValue expressions
void CheckStrictModeLValue(Expression* expression,
@@ -642,7 +648,7 @@ class Parser {
BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
- void RegisterTargetUse(BreakTarget* target, Target* stop);
+ void RegisterTargetUse(Label* target, Target* stop);
// Factory methods.
@@ -686,15 +692,16 @@ class Parser {
Handle<String> type,
Vector< Handle<Object> > arguments);
+ Isolate* isolate_;
ZoneList<Handle<String> > symbol_cache_;
Handle<Script> script_;
- V8JavaScriptScanner scanner_;
+ JavaScriptScanner scanner_;
Scope* top_scope_;
int with_nesting_level_;
- TemporaryScope* temp_scope_;
+ LexicalScope* lexical_scope_;
Mode mode_;
Target* target_stack_; // for break, continue statements
@@ -709,6 +716,8 @@ class Parser {
// Heuristically that means that the function will be called immediately,
// so never lazily compile it.
bool parenthesized_function_;
+
+ friend class LexicalScope;
};
@@ -742,61 +751,6 @@ class CompileTimeValue: public AllStatic {
DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
};
-
-// ----------------------------------------------------------------------------
-// JSON PARSING
-
-// JSON is a subset of JavaScript, as specified in, e.g., the ECMAScript 5
-// specification section 15.12.1 (and appendix A.8).
-// The grammar is given section 15.12.1.2 (and appendix A.8.2).
-class JsonParser BASE_EMBEDDED {
- public:
- // Parse JSON input as a single JSON value.
- // Returns null handle and sets exception if parsing failed.
- static Handle<Object> Parse(Handle<String> source) {
- if (source->IsExternalTwoByteString()) {
- ExternalTwoByteStringUC16CharacterStream stream(
- Handle<ExternalTwoByteString>::cast(source), 0, source->length());
- return JsonParser().ParseJson(source, &stream);
- } else {
- GenericStringUC16CharacterStream stream(source, 0, source->length());
- return JsonParser().ParseJson(source, &stream);
- }
- }
-
- private:
- JsonParser() { }
- ~JsonParser() { }
-
- // Parse a string containing a single JSON value.
- Handle<Object> ParseJson(Handle<String> script, UC16CharacterStream* source);
- // Parse a single JSON value from input (grammar production JSONValue).
- // A JSON value is either a (double-quoted) string literal, a number literal,
- // one of "true", "false", or "null", or an object or array literal.
- Handle<Object> ParseJsonValue();
- // Parse a JSON object literal (grammar production JSONObject).
- // An object literal is a squiggly-braced and comma separated sequence
- // (possibly empty) of key/value pairs, where the key is a JSON string
- // literal, the value is a JSON value, and the two are separated by a colon.
- // A JSON array dosn't allow numbers and identifiers as keys, like a
- // JavaScript array.
- Handle<Object> ParseJsonObject();
- // Parses a JSON array literal (grammar production JSONArray). An array
- // literal is a square-bracketed and comma separated sequence (possibly empty)
- // of JSON values.
- // A JSON array doesn't allow leaving out values from the sequence, nor does
- // it allow a terminal comma, like a JavaScript array does.
- Handle<Object> ParseJsonArray();
-
- // Mark that a parsing error has happened at the current token, and
- // return a null handle. Primarily for readability.
- Handle<Object> ReportUnexpectedToken() { return Handle<Object>::null(); }
- // Converts the currently parsed literal to a JavaScript String.
- Handle<String> GetString();
-
- JsonScanner scanner_;
- bool stack_overflow_;
-};
} } // namespace v8::internal
#endif // V8_PARSER_H_
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index a7cc5256f..b99d73584 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -42,7 +42,6 @@
#include "v8.h"
#include "platform.h"
-#include "top.h"
#include "v8threads.h"
#include "vm-state-inl.h"
#include "win32-headers.h"
@@ -59,6 +58,9 @@ double ceiling(double x) {
}
+static Mutex* limit_mutex = NULL;
+
+
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -67,6 +69,7 @@ void OS::Setup() {
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
@@ -119,6 +122,9 @@ static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
@@ -143,7 +149,7 @@ void* OS::Allocate(const size_t requested,
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
+ LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
@@ -254,6 +260,7 @@ void OS::LogSharedLibraryAddresses() {
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+ i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
@@ -287,7 +294,7 @@ void OS::LogSharedLibraryAddresses() {
snprintf(lib_name, kLibNameLen,
"%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
}
- LOG(SharedLibraryEvent(lib_name, start, end));
+ LOG(isolate, SharedLibraryEvent(lib_name, start, end));
} else {
// Entry not describing executable data. Skip to end of line to setup
// reading the next entry.
@@ -314,103 +321,72 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
+// The VirtualMemory implementation is taken from platform-win32.cc.
+// The mmap-based virtual memory implementation as it is used on most posix
+// platforms does not work well because Cygwin does not support MAP_FIXED.
+// This causes VirtualMemory::Commit to not always commit the memory region
+// specified.
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
+ address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
size_ = size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
}
}
-bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
-}
-
-
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-
- if (mprotect(address, size, prot) != 0) {
+ int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+ if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}
- UpdateAllocatedSpaceLimits(address, size);
+ UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
return true;
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
+ ASSERT(IsReserved());
+ return VirtualFree(address, size, MEM_DECOMMIT) != false;
}
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
-
+ PlatformData() : thread_(kNoThread) {}
pthread_t thread_; // Thread handle for pthread.
};
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
+Thread::Thread(const Options& options)
+ : data_(new PlatformData),
+ stack_size_(options.stack_size) {
+ set_name(options.name);
}
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
- set_name("v8:<unknown>");
-}
-
-
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(const char* name)
+ : data_(new PlatformData),
+ stack_size_(0) {
set_name(name);
}
Thread::~Thread() {
+ delete data_;
}
@@ -419,8 +395,8 @@ static void* ThreadEntry(void* arg) {
// This is also initialized by the first argument to pthread_create() but we
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
- thread->thread_handle_data()->thread_ = pthread_self();
- ASSERT(thread->IsValid());
+ thread->data()->thread_ = pthread_self();
+ ASSERT(thread->data()->thread_ != kNoThread);
thread->Run();
return NULL;
}
@@ -433,13 +409,20 @@ void Thread::set_name(const char* name) {
void Thread::Start() {
- pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
- ASSERT(IsValid());
+ pthread_attr_t* attr_ptr = NULL;
+ pthread_attr_t attr;
+ if (stack_size_ > 0) {
+ pthread_attr_init(&attr);
+ pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+ attr_ptr = &attr;
+ }
+ pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+ ASSERT(data_->thread_ != kNoThread);
}
void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
+ pthread_join(data_->thread_, NULL);
}
@@ -617,128 +600,176 @@ Semaphore* OS::CreateSemaphore(int count) {
class Sampler::PlatformData : public Malloced {
public:
- explicit PlatformData(Sampler* sampler) {
- sampler_ = sampler;
- sampler_thread_ = INVALID_HANDLE_VALUE;
- profiled_thread_ = INVALID_HANDLE_VALUE;
+ // Get a handle to the calling thread. This is the thread that we are
+ // going to profile. We need to make a copy of the handle because we are
+ // going to use it in the sampler thread. Using GetThreadHandle() will
+ // not work in this case. We're using OpenThread because DuplicateHandle
+ // for some reason doesn't work in Chrome's sandbox.
+ PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
+ THREAD_SUSPEND_RESUME |
+ THREAD_QUERY_INFORMATION,
+ false,
+ GetCurrentThreadId())) {}
+
+ ~PlatformData() {
+ if (profiled_thread_ != NULL) {
+ CloseHandle(profiled_thread_);
+ profiled_thread_ = NULL;
+ }
}
- Sampler* sampler_;
- HANDLE sampler_thread_;
+ HANDLE profiled_thread() { return profiled_thread_; }
+
+ private:
HANDLE profiled_thread_;
- RuntimeProfilerRateLimiter rate_limiter_;
+};
+
+
+class SamplerThread : public Thread {
+ public:
+ explicit SamplerThread(int interval)
+ : Thread("SamplerThread"),
+ interval_(interval) {}
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ instance_ = new SamplerThread(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
+ }
+ }
- // Sampler thread handler.
- void Runner() {
- while (sampler_->IsActive()) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- Sample();
- Sleep(sampler_->interval_);
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+ instance_->Join();
+ delete instance_;
+ instance_ = NULL;
}
}
- void Sample() {
- if (sampler_->IsProfiling()) {
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state;
+ while ((state = SamplerRegistry::GetState()) !=
+ SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ }
+ OS::Sleep(interval_);
+ }
+ }
+
+ static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ if (!sampler->IsProfiling()) return;
+ SamplerThread* sampler_thread =
+ reinterpret_cast<SamplerThread*>(raw_sampler_thread);
+ sampler_thread->SampleContext(sampler);
+ }
+
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
+ void SampleContext(Sampler* sampler) {
+ HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
+ if (profiled_thread == NULL) return;
+
+ // Context used for sampling the register state of the profiled thread.
+ CONTEXT context;
+ memset(&context, 0, sizeof(context));
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ if (sample == NULL) sample = &sample_obj;
- static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread_) == kSuspendFailed) return;
- sample->state = Top::current_vm_state();
+ static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+ if (SuspendThread(profiled_thread) == kSuspendFailed) return;
+ sample->state = sampler->isolate()->current_vm_state();
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread_, &context) != 0) {
+ context.ContextFlags = CONTEXT_FULL;
+ if (GetThreadContext(profiled_thread, &context) != 0) {
#if V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(context.Rip);
- sample->sp = reinterpret_cast<Address>(context.Rsp);
- sample->fp = reinterpret_cast<Address>(context.Rbp);
+ sample->pc = reinterpret_cast<Address>(context.Rip);
+ sample->sp = reinterpret_cast<Address>(context.Rsp);
+ sample->fp = reinterpret_cast<Address>(context.Rbp);
#else
- sample->pc = reinterpret_cast<Address>(context.Eip);
- sample->sp = reinterpret_cast<Address>(context.Esp);
- sample->fp = reinterpret_cast<Address>(context.Ebp);
+ sample->pc = reinterpret_cast<Address>(context.Eip);
+ sample->sp = reinterpret_cast<Address>(context.Esp);
+ sample->fp = reinterpret_cast<Address>(context.Ebp);
#endif
- sampler_->SampleStack(sample);
- sampler_->Tick(sample);
- }
- ResumeThread(profiled_thread_);
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
}
- if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+ ResumeThread(profiled_thread);
}
+
+ const int interval_;
+ RuntimeProfilerRateLimiter rate_limiter_;
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SamplerThread* instance_;
+
+ DISALLOW_COPY_AND_ASSIGN(SamplerThread);
};
-// Entry point for sampler thread.
-static DWORD __stdcall SamplerEntry(void* arg) {
- Sampler::PlatformData* data =
- reinterpret_cast<Sampler::PlatformData*>(arg);
- data->Runner();
- return 0;
-}
+Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+SamplerThread* SamplerThread::instance_ = NULL;
-// Initialize a profile sampler.
-Sampler::Sampler(int interval)
- : interval_(interval),
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData(this);
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
+ ASSERT(!IsActive());
delete data_;
}
-// Start profiling.
void Sampler::Start() {
- // Do not start multiple threads for the same sampler.
ASSERT(!IsActive());
-
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- data_->profiled_thread_ = OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId());
- BOOL ok = data_->profiled_thread_ != NULL;
- if (!ok) return;
-
- // Start sampler thread.
- DWORD tid;
SetActive(true);
- data_->sampler_thread_ = CreateThread(NULL, 0, SamplerEntry, data_, 0, &tid);
- // Set thread to high priority to increase sampling accuracy.
- SetThreadPriority(data_->sampler_thread_, THREAD_PRIORITY_TIME_CRITICAL);
+ SamplerThread::AddActiveSampler(this);
}
-// Stop profiling.
void Sampler::Stop() {
- // Seting active to false triggers termination of the sampler
- // thread.
+ ASSERT(IsActive());
+ SamplerThread::RemoveActiveSampler(this);
SetActive(false);
-
- // Wait for sampler thread to terminate.
- Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
- WaitForSingleObject(data_->sampler_thread_, INFINITE);
-
- // Release the thread handles
- CloseHandle(data_->sampler_thread_);
- CloseHandle(data_->profiled_thread_);
}
-
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 4f93093c2..1eefaa396 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -42,6 +42,7 @@
#include <sys/stat.h> // open
#include <sys/fcntl.h> // open
#include <unistd.h> // getpagesize
+// If you don't have execinfo.h then you need devel/libexecinfo from ports.
#include <execinfo.h> // backtrace, backtrace_symbols
#include <strings.h> // index
#include <errno.h>
@@ -51,6 +52,7 @@
#undef MAP_TYPE
#include "v8.h"
+#include "v8threads.h"
#include "platform.h"
#include "vm-state-inl.h"
@@ -74,6 +76,9 @@ double ceiling(double x) {
}
+static Mutex* limit_mutex = NULL;
+
+
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -82,6 +87,7 @@ void OS::Setup() {
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
@@ -130,6 +136,9 @@ static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
@@ -155,7 +164,7 @@ void* OS::Allocate(const size_t requested,
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
+ LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
@@ -299,7 +308,7 @@ void OS::LogSharedLibraryAddresses() {
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
- LOG(SharedLibraryEvent(start_of_path, start, end));
+ LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
}
close(fd);
#endif
@@ -383,58 +392,28 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
pthread_t thread_; // Thread handle for pthread.
};
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
+Thread::Thread(const Options& options)
+ : data_(new PlatformData),
+ stack_size_(options.stack_size) {
+ set_name(options.name);
}
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
- set_name("v8:<unknown>");
-}
-
-
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(const char* name)
+ : data_(new PlatformData),
+ stack_size_(0) {
set_name(name);
}
Thread::~Thread() {
+ delete data_;
}
@@ -443,8 +422,8 @@ static void* ThreadEntry(void* arg) {
// This is also initialized by the first argument to pthread_create() but we
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
- thread->thread_handle_data()->thread_ = pthread_self();
- ASSERT(thread->IsValid());
+ thread->data()->thread_ = pthread_self();
+ ASSERT(thread->data()->thread_ != kNoThread);
thread->Run();
return NULL;
}
@@ -457,13 +436,20 @@ void Thread::set_name(const char* name) {
void Thread::Start() {
- pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
- ASSERT(IsValid());
+ pthread_attr_t* attr_ptr = NULL;
+ pthread_attr_t attr;
+ if (stack_size_ > 0) {
+ pthread_attr_init(&attr);
+ pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+ attr_ptr = &attr;
+ }
+ pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+ ASSERT(data_->thread_ != kNoThread);
}
void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
+ pthread_join(data_->thread_, NULL);
}
@@ -528,7 +514,12 @@ class FreeBSDMutex : public Mutex {
virtual bool TryLock() {
int result = pthread_mutex_trylock(&mutex_);
- return result == 0;
+ // Return false if the lock is busy and locking failed.
+ if (result == EBUSY) {
+ return false;
+ }
+ ASSERT(result == 0); // Verify no other errors.
+ return true;
}
private:
@@ -599,107 +590,232 @@ Semaphore* OS::CreateSemaphore(int count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
-static Sampler* active_sampler_ = NULL;
+static pthread_t GetThreadID() {
+ pthread_t thread_id = pthread_self();
+ return thread_id;
+}
+
+
+class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData() : vm_tid_(GetThreadID()) {}
+
+ pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+ pthread_t vm_tid_;
+};
+
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
- if (active_sampler_ == NULL) return;
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
+ // We require a fully initialized and entered isolate.
+ return;
+ }
+ if (v8::Locker::IsActive() &&
+ !isolate->thread_manager()->IsLockedByCurrentThread()) {
+ return;
+ }
- TickSample sample;
+ Sampler* sampler = isolate->logger()->sampler();
+ if (sampler == NULL || !sampler->IsActive()) return;
- // We always sample the VM state.
- sample.state = Top::current_vm_state();
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ if (sample == NULL) sample = &sample_obj;
- // If profiling, we extract the current pc and sp.
- if (active_sampler_->IsProfiling()) {
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
+ // Extracting the sample from the context is extremely machine dependent.
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ mcontext_t& mcontext = ucontext->uc_mcontext;
+ sample->state = isolate->current_vm_state();
#if V8_HOST_ARCH_IA32
- sample.pc = reinterpret_cast<Address>(mcontext.mc_eip);
- sample.sp = reinterpret_cast<Address>(mcontext.mc_esp);
- sample.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
+ sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
+ sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
+ sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
#elif V8_HOST_ARCH_X64
- sample.pc = reinterpret_cast<Address>(mcontext.mc_rip);
- sample.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
- sample.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
+ sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
+ sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
+ sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
#elif V8_HOST_ARCH_ARM
- sample.pc = reinterpret_cast<Address>(mcontext.mc_r15);
- sample.sp = reinterpret_cast<Address>(mcontext.mc_r13);
- sample.fp = reinterpret_cast<Address>(mcontext.mc_r11);
+ sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
+ sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
+ sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
#endif
- active_sampler_->SampleStack(&sample);
- }
-
- active_sampler_->Tick(&sample);
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
}
-class Sampler::PlatformData : public Malloced {
+class SignalSender : public Thread {
public:
- PlatformData() {
- signal_handler_installed_ = false;
+ enum SleepInterval {
+ HALF_INTERVAL,
+ FULL_INTERVAL
+ };
+
+ explicit SignalSender(int interval)
+ : Thread("SignalSender"),
+ interval_(interval) {}
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ // Install a signal handler.
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+
+ // Start a thread that sends SIGPROF signal to VM threads.
+ instance_ = new SignalSender(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
+ }
+ }
+
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+ instance_->Join();
+ delete instance_;
+ instance_ = NULL;
+
+ // Restore the old signal handler.
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+ }
+
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state;
+ while ((state = SamplerRegistry::GetState()) !=
+ SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ } else {
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
+ Sleep(FULL_INTERVAL);
+ }
+ }
+ }
+
+ static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+ if (!sampler->IsProfiling()) return;
+ SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
+ sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- struct itimerval old_timer_value_;
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
+ void SendProfilingSignal(pthread_t tid) {
+ if (!signal_handler_installed_) return;
+ pthread_kill(tid, SIGPROF);
+ }
+
+ void Sleep(SleepInterval full_or_half) {
+ // Convert ms to us and subtract 100 us to compensate delays
+ // occuring during signal delivery.
+ useconds_t interval = interval_ * 1000 - 100;
+ if (full_or_half == HALF_INTERVAL) interval /= 2;
+ int result = usleep(interval);
+#ifdef DEBUG
+ if (result != 0 && errno != EINTR) {
+ fprintf(stderr,
+ "SignalSender usleep error; interval = %u, errno = %d\n",
+ interval,
+ errno);
+ ASSERT(result == 0 || errno == EINTR);
+ }
+#endif
+ USE(result);
+ }
+
+ const int interval_;
+ RuntimeProfilerRateLimiter rate_limiter_;
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SignalSender* instance_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
+
+ DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
+SignalSender* SignalSender::instance_ = NULL;
+struct sigaction SignalSender::old_signal_handler_;
+bool SignalSender::signal_handler_installed_ = false;
-Sampler::Sampler(int interval)
- : interval_(interval),
+
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData();
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
+ ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
- if (active_sampler_ != NULL) return;
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
- data_->signal_handler_installed_ = true;
-
- // Set the itimer to generate a tick for each interval.
- itimerval itimer;
- itimer.it_interval.tv_sec = interval_ / 1000;
- itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
- itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
- itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
- setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
- active_ = true;
+ ASSERT(!IsActive());
+ SetActive(true);
+ SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
- // Restore old signal handler
- if (data_->signal_handler_installed_) {
- setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
- sigaction(SIGPROF, &data_->old_signal_handler_, 0);
- data_->signal_handler_installed_ = false;
- }
-
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
- active_ = false;
+ ASSERT(IsActive());
+ SignalSender::RemoveActiveSampler(this);
+ SetActive(false);
}
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index 733956ace..228d6f4d1 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -58,7 +58,6 @@
#include "v8.h"
#include "platform.h"
-#include "top.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -76,6 +75,9 @@ double ceiling(double x) {
}
+static Mutex* limit_mutex = NULL;
+
+
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -84,16 +86,34 @@ void OS::Setup() {
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
+
+#ifdef __arm__
+ // When running on ARM hardware check that the EABI used by V8 and
+ // by the C code is the same.
+ bool hard_float = OS::ArmUsingHardFloat();
+ if (hard_float) {
+#if !USE_EABI_HARDFLOAT
+ PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
+ "-DUSE_EABI_HARDFLOAT\n");
+ exit(1);
+#endif
+ } else {
+#if USE_EABI_HARDFLOAT
+ PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
+ "-DUSE_EABI_HARDFLOAT\n");
+ exit(1);
+#endif
+ }
+#endif
}
uint64_t OS::CpuFeaturesImpliedByPlatform() {
-#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
- // Here gcc is telling us that we are on an ARM and gcc is assuming that we
- // have VFP3 instructions. If gcc can assume it then so can we.
- return 1u << VFP3;
-#elif CAN_USE_ARMV7_INSTRUCTIONS
- return 1u << ARMv7;
+#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+ // Here gcc is telling us that we are on an MIPS and gcc is assuming that we
+ // have FPU instructions. If gcc can assume it then so can we.
+ return 1u << FPU;
#else
return 0; // Linux runs on anything.
#endif
@@ -134,6 +154,7 @@ static bool CPUInfoContainsString(const char * search_string) {
return false;
}
+
bool OS::ArmCpuHasFeature(CpuFeature feature) {
const char* search_string = NULL;
// Simple detection of VFP at runtime for Linux.
@@ -169,9 +190,105 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
return false;
}
+
+
+// Simple helper function to detect whether the C code is compiled with
+// option -mfloat-abi=hard. The register d0 is loaded with 1.0 and the register
+// pair r0, r1 is loaded with 0.0. If -mfloat-abi=hard is pased to GCC then
+// calling this will return 1.0 and otherwise 0.0.
+static void ArmUsingHardFloatHelper() {
+ asm("mov r0, #0");
+#if defined(__VFP_FP__) && !defined(__SOFTFP__)
+ // Load 0x3ff00000 into r1 using instructions available in both ARM
+ // and Thumb mode.
+ asm("mov r1, #3");
+ asm("mov r2, #255");
+ asm("lsl r1, r1, #8");
+ asm("orr r1, r1, r2");
+ asm("lsl r1, r1, #20");
+ // For vmov d0, r0, r1 use ARM mode.
+#ifdef __thumb__
+ asm volatile(
+ "@ Enter ARM Mode \n\t"
+ " adr r3, 1f \n\t"
+ " bx r3 \n\t"
+ " .ALIGN 4 \n\t"
+ " .ARM \n"
+ "1: vmov d0, r0, r1 \n\t"
+ "@ Enter THUMB Mode\n\t"
+ " adr r3, 2f+1 \n\t"
+ " bx r3 \n\t"
+ " .THUMB \n"
+ "2: \n\t");
+#else
+ asm("vmov d0, r0, r1");
+#endif // __thumb__
+#endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
+ asm("mov r1, #0");
+}
+
+
+bool OS::ArmUsingHardFloat() {
+ // Cast helper function from returning void to returning double.
+ typedef double (*F)();
+ F f = FUNCTION_CAST<F>(FUNCTION_ADDR(ArmUsingHardFloatHelper));
+ return f() == 1.0;
+}
#endif // def __arm__
+#ifdef __mips__
+bool OS::MipsCpuHasFeature(CpuFeature feature) {
+ const char* search_string = NULL;
+ const char* file_name = "/proc/cpuinfo";
+ // Simple detection of FPU at runtime for Linux.
+ // It is based on /proc/cpuinfo, which reveals hardware configuration
+ // to user-space applications. According to MIPS (early 2010), no similar
+ // facility is universally available on the MIPS architectures,
+ // so it's up to individual OSes to provide such.
+ //
+ // This is written as a straight shot one pass parser
+ // and not using STL string and ifstream because,
+ // on Linux, it's reading from a (non-mmap-able)
+ // character special device.
+
+ switch (feature) {
+ case FPU:
+ search_string = "FPU";
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ FILE* f = NULL;
+ const char* what = search_string;
+
+ if (NULL == (f = fopen(file_name, "r")))
+ return false;
+
+ int k;
+ while (EOF != (k = fgetc(f))) {
+ if (k == *what) {
+ ++what;
+ while ((*what != '\0') && (*what == fgetc(f))) {
+ ++what;
+ }
+ if (*what == '\0') {
+ fclose(f);
+ return true;
+ } else {
+ what = search_string;
+ }
+ }
+ }
+ fclose(f);
+
+ // Did not find string in the proc file.
+ return false;
+}
+#endif // def __mips__
+
+
int OS::ActivationFrameAlignment() {
#ifdef V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
@@ -187,8 +304,9 @@ int OS::ActivationFrameAlignment() {
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__)
- // Only use on ARM hardware.
+#if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
+ (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
+ // Only use on ARM or MIPS hardware.
MemoryBarrier();
#else
__asm__ __volatile__("" : : : "memory");
@@ -226,6 +344,9 @@ static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
@@ -251,7 +372,8 @@ void* OS::Allocate(const size_t requested,
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
+ LOG(i::Isolate::Current(),
+ StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
@@ -372,6 +494,7 @@ void OS::LogSharedLibraryAddresses() {
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+ i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
@@ -405,7 +528,7 @@ void OS::LogSharedLibraryAddresses() {
snprintf(lib_name, kLibNameLen,
"%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
}
- LOG(SharedLibraryEvent(lib_name, start, end));
+ LOG(isolate, SharedLibraryEvent(lib_name, start, end));
} else {
// Entry not describing executable data. Skip to end of line to setup
// reading the next entry.
@@ -523,59 +646,29 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
+ PlatformData() : thread_(kNoThread) {}
pthread_t thread_; // Thread handle for pthread.
};
-
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
- set_name("v8:<unknown>");
+Thread::Thread(const Options& options)
+ : data_(new PlatformData()),
+ stack_size_(options.stack_size) {
+ set_name(options.name);
}
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(const char* name)
+ : data_(new PlatformData()),
+ stack_size_(0) {
set_name(name);
}
Thread::~Thread() {
+ delete data_;
}
@@ -584,9 +677,11 @@ static void* ThreadEntry(void* arg) {
// This is also initialized by the first argument to pthread_create() but we
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
- prctl(PR_SET_NAME, thread->name(), 0, 0, 0);
- thread->thread_handle_data()->thread_ = pthread_self();
- ASSERT(thread->IsValid());
+ prctl(PR_SET_NAME,
+ reinterpret_cast<unsigned long>(thread->name()), // NOLINT
+ 0, 0, 0);
+ thread->data()->thread_ = pthread_self();
+ ASSERT(thread->data()->thread_ != kNoThread);
thread->Run();
return NULL;
}
@@ -599,13 +694,20 @@ void Thread::set_name(const char* name) {
void Thread::Start() {
- pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
- ASSERT(IsValid());
+ pthread_attr_t* attr_ptr = NULL;
+ pthread_attr_t attr;
+ if (stack_size_ > 0) {
+ pthread_attr_init(&attr);
+ pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+ attr_ptr = &attr;
+ }
+ pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+ ASSERT(data_->thread_ != kNoThread);
}
void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
+ pthread_join(data_->thread_, NULL);
}
@@ -645,7 +747,6 @@ void Thread::YieldCPU() {
class LinuxMutex : public Mutex {
public:
-
LinuxMutex() {
pthread_mutexattr_t attrs;
int result = pthread_mutexattr_init(&attrs);
@@ -760,10 +861,6 @@ Semaphore* OS::CreateSemaphore(int count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
-static Sampler* active_sampler_ = NULL;
-static int vm_tid_ = 0;
-
-
#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
// Android runs a fairly new Linux kernel, so signal info is there,
// but the C library doesn't have the structs defined.
@@ -792,7 +889,11 @@ enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
static int GetThreadID() {
// Glibc doesn't provide a wrapper for gettid(2).
+#if defined(ANDROID)
+ return syscall(__NR_gettid);
+#else
return syscall(SYS_gettid);
+#endif
}
@@ -800,17 +901,27 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
#ifndef V8_HOST_ARCH_MIPS
USE(info);
if (signal != SIGPROF) return;
- if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
- if (vm_tid_ != GetThreadID()) return;
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
+ // We require a fully initialized and entered isolate.
+ return;
+ }
+ if (v8::Locker::IsActive() &&
+ !isolate->thread_manager()->IsLockedByCurrentThread()) {
+ return;
+ }
+
+ Sampler* sampler = isolate->logger()->sampler();
+ if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent();
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = Top::current_vm_state();
+ sample->state = isolate->current_vm_state();
#if V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
@@ -831,54 +942,150 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif
#elif V8_HOST_ARCH_MIPS
- // Implement this on MIPS.
- UNIMPLEMENTED();
+ sample.pc = reinterpret_cast<Address>(mcontext.pc);
+ sample.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
+ sample.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif
- active_sampler_->SampleStack(sample);
- active_sampler_->Tick(sample);
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
#endif
}
class Sampler::PlatformData : public Malloced {
public:
+ PlatformData() : vm_tid_(GetThreadID()) {}
+
+ int vm_tid() const { return vm_tid_; }
+
+ private:
+ const int vm_tid_;
+};
+
+
+class SignalSender : public Thread {
+ public:
enum SleepInterval {
- FULL_INTERVAL,
- HALF_INTERVAL
+ HALF_INTERVAL,
+ FULL_INTERVAL
};
- explicit PlatformData(Sampler* sampler)
- : sampler_(sampler),
- signal_handler_installed_(false),
+ explicit SignalSender(int interval)
+ : Thread("SignalSender"),
vm_tgid_(getpid()),
- signal_sender_launched_(false) {
+ interval_(interval) {}
+
+ static void InstallSignalHandler() {
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+ }
+
+ static void RestoreSignalHandler() {
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ // Start a thread that will send SIGPROF signal to VM threads,
+ // when CPU profiling will be enabled.
+ instance_ = new SignalSender(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
+ }
}
- void SignalSender() {
- while (sampler_->IsActive()) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
- SendProfilingSignal();
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+ instance_->Join();
+ delete instance_;
+ instance_ = NULL;
+ RestoreSignalHandler();
+ }
+ }
+
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state;
+ while ((state = SamplerRegistry::GetState()) !=
+ SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ if (cpu_profiling_enabled && !signal_handler_installed_) {
+ InstallSignalHandler();
+ } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+ RestoreSignalHandler();
+ }
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
Sleep(HALF_INTERVAL);
- RuntimeProfiler::NotifyTick();
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
Sleep(HALF_INTERVAL);
} else {
- if (sampler_->IsProfiling()) SendProfilingSignal();
- if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
Sleep(FULL_INTERVAL);
}
}
}
- void SendProfilingSignal() {
+ static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+ if (!sampler->IsProfiling()) return;
+ SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
+ sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
+ }
+
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
+ void SendProfilingSignal(int tid) {
+ if (!signal_handler_installed_) return;
// Glibc doesn't provide a wrapper for tgkill(2).
- syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
+#if defined(ANDROID)
+ syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
+#else
+ syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
+#endif
}
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
- useconds_t interval = sampler_->interval_ * 1000 - 100;
+ useconds_t interval = interval_ * 1000 - 100;
if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
@@ -893,89 +1100,55 @@ class Sampler::PlatformData : public Malloced {
USE(result);
}
- Sampler* sampler_;
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- int vm_tgid_;
- bool signal_sender_launched_;
- pthread_t signal_sender_thread_;
+ const int vm_tgid_;
+ const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SignalSender* instance_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
+
+ DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
-static void* SenderEntry(void* arg) {
- Sampler::PlatformData* data =
- reinterpret_cast<Sampler::PlatformData*>(arg);
- data->SignalSender();
- return 0;
-}
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
+SignalSender* SignalSender::instance_ = NULL;
+struct sigaction SignalSender::old_signal_handler_;
+bool SignalSender::signal_handler_installed_ = false;
-Sampler::Sampler(int interval)
- : interval_(interval),
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData(this);
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
- ASSERT(!data_->signal_sender_launched_);
+ ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
ASSERT(!IsActive());
- vm_tid_ = GetThreadID();
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
- data_->signal_handler_installed_ = true;
-
- // Start a thread that sends SIGPROF signal to VM thread.
- // Sending the signal ourselves instead of relying on itimer provides
- // much better accuracy.
SetActive(true);
- if (pthread_create(
- &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
- data_->signal_sender_launched_ = true;
- }
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
+ SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
+ ASSERT(IsActive());
+ SignalSender::RemoveActiveSampler(this);
SetActive(false);
-
- // Wait for signal sender termination (it will exit after setting
- // active_ to false).
- if (data_->signal_sender_launched_) {
- Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
- pthread_join(data_->signal_sender_thread_, NULL);
- data_->signal_sender_launched_ = false;
- }
-
- // Restore old signal handler
- if (data_->signal_handler_installed_) {
- sigaction(SIGPROF, &data_->old_signal_handler_, 0);
- data_->signal_handler_installed_ = false;
- }
-
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
}
-
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 35724c352..cdbbb12d4 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -48,8 +48,10 @@
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
+#include <sys/sysctl.h>
#include <stdarg.h>
#include <stdlib.h>
+#include <string.h>
#include <errno.h>
#undef MAP_TYPE
@@ -88,6 +90,9 @@ double ceiling(double x) {
}
+static Mutex* limit_mutex = NULL;
+
+
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -96,6 +101,7 @@ void OS::Setup() {
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
@@ -109,6 +115,9 @@ static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
@@ -143,7 +152,7 @@ void* OS::Allocate(const size_t requested,
MAP_PRIVATE | MAP_ANON,
kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
@@ -258,7 +267,8 @@ void OS::LogSharedLibraryAddresses() {
if (code_ptr == NULL) continue;
const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
- LOG(SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
+ LOG(Isolate::Current(),
+ SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
}
#endif // ENABLE_LOGGING_AND_PROFILING
}
@@ -382,63 +392,31 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
+ PlatformData() : thread_(kNoThread) {}
pthread_t thread_; // Thread handle for pthread.
};
-
-
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
+Thread::Thread(const Options& options)
+ : data_(new PlatformData),
+ stack_size_(options.stack_size) {
+ set_name(options.name);
}
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
- set_name("v8:<unknown>");
-}
-
-
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(const char* name)
+ : data_(new PlatformData),
+ stack_size_(0) {
set_name(name);
}
Thread::~Thread() {
+ delete data_;
}
-
static void SetThreadName(const char* name) {
// pthread_setname_np is only available in 10.6 or later, so test
// for it at runtime.
@@ -461,9 +439,9 @@ static void* ThreadEntry(void* arg) {
// This is also initialized by the first argument to pthread_create() but we
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
- thread->thread_handle_data()->thread_ = pthread_self();
+ thread->data()->thread_ = pthread_self();
SetThreadName(thread->name());
- ASSERT(thread->IsValid());
+ ASSERT(thread->data()->thread_ != kNoThread);
thread->Run();
return NULL;
}
@@ -476,21 +454,96 @@ void Thread::set_name(const char* name) {
void Thread::Start() {
- pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+ pthread_attr_t* attr_ptr = NULL;
+ pthread_attr_t attr;
+ if (stack_size_ > 0) {
+ pthread_attr_init(&attr);
+ pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+ attr_ptr = &attr;
+ }
+ pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+ ASSERT(data_->thread_ != kNoThread);
}
void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
+ pthread_join(data_->thread_, NULL);
}
+#ifdef V8_FAST_TLS_SUPPORTED
+
+static Atomic32 tls_base_offset_initialized = 0;
+intptr_t kMacTlsBaseOffset = 0;
+
+// It's safe to do the initialization more that once, but it has to be
+// done at least once.
+static void InitializeTlsBaseOffset() {
+ const size_t kBufferSize = 128;
+ char buffer[kBufferSize];
+ size_t buffer_size = kBufferSize;
+ int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
+ if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
+ V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+ }
+ // The buffer now contains a string of the form XX.YY.ZZ, where
+ // XX is the major kernel version component.
+ // Make sure the buffer is 0-terminated.
+ buffer[kBufferSize - 1] = '\0';
+ char* period_pos = strchr(buffer, '.');
+ *period_pos = '\0';
+ int kernel_version_major =
+ static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
+ // The constants below are taken from pthreads.s from the XNU kernel
+ // sources archive at www.opensource.apple.com.
+ if (kernel_version_major < 11) {
+ // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
+ // same offsets.
+#if defined(V8_HOST_ARCH_IA32)
+ kMacTlsBaseOffset = 0x48;
+#else
+ kMacTlsBaseOffset = 0x60;
+#endif
+ } else {
+ // 11.x.x (Lion) changed the offset.
+ kMacTlsBaseOffset = 0;
+ }
+
+ Release_Store(&tls_base_offset_initialized, 1);
+}
+
+static void CheckFastTls(Thread::LocalStorageKey key) {
+ void* expected = reinterpret_cast<void*>(0x1234CAFE);
+ Thread::SetThreadLocal(key, expected);
+ void* actual = Thread::GetExistingThreadLocal(key);
+ if (expected != actual) {
+ V8_Fatal(__FILE__, __LINE__,
+ "V8 failed to initialize fast TLS on current kernel");
+ }
+ Thread::SetThreadLocal(key, NULL);
+}
+
+#endif // V8_FAST_TLS_SUPPORTED
+
+
Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+#ifdef V8_FAST_TLS_SUPPORTED
+ bool check_fast_tls = false;
+ if (tls_base_offset_initialized == 0) {
+ check_fast_tls = true;
+ InitializeTlsBaseOffset();
+ }
+#endif
pthread_key_t key;
int result = pthread_key_create(&key, NULL);
USE(result);
ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
+ LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
+#ifdef V8_FAST_TLS_SUPPORTED
+ // If we just initialized fast TLS support, make sure it works.
+ if (check_fast_tls) CheckFastTls(typed_key);
+#endif
+ return typed_key;
}
@@ -595,52 +648,111 @@ Semaphore* OS::CreateSemaphore(int count) {
class Sampler::PlatformData : public Malloced {
public:
- explicit PlatformData(Sampler* sampler)
- : sampler_(sampler),
- task_self_(mach_task_self()),
- profiled_thread_(0),
- sampler_thread_(0) {
+ PlatformData() : profiled_thread_(mach_thread_self()) {}
+
+ ~PlatformData() {
+ // Deallocate Mach port for thread.
+ mach_port_deallocate(mach_task_self(), profiled_thread_);
}
- Sampler* sampler_;
+ thread_act_t profiled_thread() { return profiled_thread_; }
+
+ private:
// Note: for profiled_thread_ Mach primitives are used instead of PThread's
// because the latter doesn't provide thread manipulation primitives required.
// For details, consult "Mac OS X Internals" book, Section 7.3.
- mach_port_t task_self_;
thread_act_t profiled_thread_;
- pthread_t sampler_thread_;
- RuntimeProfilerRateLimiter rate_limiter_;
+};
- // Sampler thread handler.
- void Runner() {
- while (sampler_->IsActive()) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- Sample();
- OS::Sleep(sampler_->interval_);
+class SamplerThread : public Thread {
+ public:
+ explicit SamplerThread(int interval)
+ : Thread("SamplerThread"),
+ interval_(interval) {}
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ instance_ = new SamplerThread(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
}
}
- void Sample() {
- if (sampler_->IsProfiling()) {
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+ instance_->Join();
+ delete instance_;
+ instance_ = NULL;
+ }
+ }
- if (KERN_SUCCESS != thread_suspend(profiled_thread_)) return;
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state;
+ while ((state = SamplerRegistry::GetState()) !=
+ SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ }
+ OS::Sleep(interval_);
+ }
+ }
+
+ static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ if (!sampler->IsProfiling()) return;
+ SamplerThread* sampler_thread =
+ reinterpret_cast<SamplerThread*>(raw_sampler_thread);
+ sampler_thread->SampleContext(sampler);
+ }
+
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
+ void SampleContext(Sampler* sampler) {
+ thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ if (sample == NULL) sample = &sample_obj;
+
+ if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
#if V8_HOST_ARCH_X64
- thread_state_flavor_t flavor = x86_THREAD_STATE64;
- x86_thread_state64_t state;
- mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
+ thread_state_flavor_t flavor = x86_THREAD_STATE64;
+ x86_thread_state64_t state;
+ mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
#if __DARWIN_UNIX03
#define REGISTER_FIELD(name) __r ## name
#else
#define REGISTER_FIELD(name) r ## name
#endif // __DARWIN_UNIX03
#elif V8_HOST_ARCH_IA32
- thread_state_flavor_t flavor = i386_THREAD_STATE;
- i386_thread_state_t state;
- mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
+ thread_state_flavor_t flavor = i386_THREAD_STATE;
+ i386_thread_state_t state;
+ mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
#if __DARWIN_UNIX03
#define REGISTER_FIELD(name) __e ## name
#else
@@ -650,81 +762,64 @@ class Sampler::PlatformData : public Malloced {
#error Unsupported Mac OS X host architecture.
#endif // V8_HOST_ARCH
- if (thread_get_state(profiled_thread_,
- flavor,
- reinterpret_cast<natural_t*>(&state),
- &count) == KERN_SUCCESS) {
- sample->state = Top::current_vm_state();
- sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
- sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
- sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
- sampler_->SampleStack(sample);
- sampler_->Tick(sample);
- }
- thread_resume(profiled_thread_);
+ if (thread_get_state(profiled_thread,
+ flavor,
+ reinterpret_cast<natural_t*>(&state),
+ &count) == KERN_SUCCESS) {
+ sample->state = sampler->isolate()->current_vm_state();
+ sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
+ sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
+ sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
}
- if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+ thread_resume(profiled_thread);
}
+
+ const int interval_;
+ RuntimeProfilerRateLimiter rate_limiter_;
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SamplerThread* instance_;
+
+ DISALLOW_COPY_AND_ASSIGN(SamplerThread);
};
#undef REGISTER_FIELD
-// Entry point for sampler thread.
-static void* SamplerEntry(void* arg) {
- Sampler::PlatformData* data =
- reinterpret_cast<Sampler::PlatformData*>(arg);
- data->Runner();
- return 0;
-}
+Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+SamplerThread* SamplerThread::instance_ = NULL;
-Sampler::Sampler(int interval)
- : interval_(interval),
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData(this);
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
+ ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
- // Do not start multiple threads for the same sampler.
ASSERT(!IsActive());
- data_->profiled_thread_ = mach_thread_self();
-
- // Create sampler thread with high priority.
- // According to POSIX spec, when SCHED_FIFO policy is used, a thread
- // runs until it exits or blocks.
- pthread_attr_t sched_attr;
- sched_param fifo_param;
- pthread_attr_init(&sched_attr);
- pthread_attr_setinheritsched(&sched_attr, PTHREAD_EXPLICIT_SCHED);
- pthread_attr_setschedpolicy(&sched_attr, SCHED_FIFO);
- fifo_param.sched_priority = sched_get_priority_max(SCHED_FIFO);
- pthread_attr_setschedparam(&sched_attr, &fifo_param);
-
SetActive(true);
- pthread_create(&data_->sampler_thread_, &sched_attr, SamplerEntry, data_);
+ SamplerThread::AddActiveSampler(this);
}
void Sampler::Stop() {
- // Seting active to false triggers termination of the sampler
- // thread.
+ ASSERT(IsActive());
+ SamplerThread::RemoveActiveSampler(this);
SetActive(false);
-
- // Wait for sampler thread to terminate.
- Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
- pthread_join(data_->sampler_thread_, NULL);
-
- // Deallocate Mach port for thread.
- mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
}
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/platform-nullos.cc b/deps/v8/src/platform-nullos.cc
index 49d3dd988..d309806ec 100644
--- a/deps/v8/src/platform-nullos.cc
+++ b/deps/v8/src/platform-nullos.cc
@@ -186,6 +186,11 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
}
+bool OS::ArmUsingHardFloat() {
+ UNIMPLEMENTED();
+}
+
+
bool OS::IsOutsideAllocatedSpace(void* address) {
UNIMPLEMENTED();
return false;
@@ -299,9 +304,9 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
public:
- explicit PlatformData(ThreadHandle::Kind kind) {
+ PlatformData() {
UNIMPLEMENTED();
}
@@ -309,50 +314,24 @@ class ThreadHandle::PlatformData : public Malloced {
};
-ThreadHandle::ThreadHandle(Kind kind) {
- UNIMPLEMENTED();
- // Shared setup follows.
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- UNIMPLEMENTED();
-}
-
-
-ThreadHandle::~ThreadHandle() {
+Thread::Thread(const Options& options)
+ : data_(new PlatformData()),
+ stack_size_(options.stack_size) {
+ set_name(options.name);
UNIMPLEMENTED();
- // Shared tear down follows.
- delete data_;
}
-bool ThreadHandle::IsSelf() const {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool ThreadHandle::IsValid() const {
- UNIMPLEMENTED();
- return false;
-}
-
-
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
- set_name("v8:<unknown>");
- UNIMPLEMENTED();
-}
-
-
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(const char* name)
+ : data_(new PlatformData()),
+ stack_size_(0) {
set_name(name);
UNIMPLEMENTED();
}
Thread::~Thread() {
+ delete data_;
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index e2796294a..6034800f7 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2006-2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -50,6 +50,7 @@
#undef MAP_TYPE
#include "v8.h"
+#include "v8threads.h"
#include "platform.h"
#include "vm-state-inl.h"
@@ -73,6 +74,9 @@ double ceiling(double x) {
}
+static Mutex* limit_mutex = NULL;
+
+
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -81,6 +85,7 @@ void OS::Setup() {
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
@@ -129,6 +134,9 @@ static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
@@ -154,7 +162,7 @@ void* OS::Allocate(const size_t requested,
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
+ LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
@@ -164,6 +172,7 @@ void* OS::Allocate(const size_t requested,
void OS::Free(void* buf, const size_t length) {
+ // TODO(1240712): munmap has a return value which is ignored here.
int result = munmap(buf, length);
USE(result);
ASSERT(result == 0);
@@ -297,7 +306,7 @@ void OS::LogSharedLibraryAddresses() {
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
- LOG(SharedLibraryEvent(start_of_path, start, end));
+ LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
}
close(fd);
#endif
@@ -309,8 +318,30 @@ void OS::SignalCodeMovingGC() {
int OS::StackWalk(Vector<OS::StackFrame> frames) {
- UNIMPLEMENTED();
- return 1;
+ int frames_size = frames.length();
+ ScopedVector<void*> addresses(frames_size);
+
+ int frames_count = backtrace(addresses.start(), frames_size);
+
+ char** symbols = backtrace_symbols(addresses.start(), frames_count);
+ if (symbols == NULL) {
+ return kStackWalkError;
+ }
+
+ for (int i = 0; i < frames_count; i++) {
+ frames[i].address = addresses[i];
+ // Format a text representation of the frame based on the information
+ // available.
+ SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
+ "%s",
+ symbols[i]);
+ // Make sure line termination is in place.
+ frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+ }
+
+ free(symbols);
+
+ return frames_count;
}
@@ -354,63 +385,33 @@ bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
bool VirtualMemory::Uncommit(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
pthread_t thread_; // Thread handle for pthread.
};
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
+Thread::Thread(const Options& options)
+ : data_(new PlatformData),
+ stack_size_(options.stack_size) {
+ set_name(options.name);
}
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
-}
-
-
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
- set_name("v8:<unknown>");
-}
-
-
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(const char* name)
+ : data_(new PlatformData),
+ stack_size_(0) {
set_name(name);
}
Thread::~Thread() {
+ delete data_;
}
@@ -419,8 +420,8 @@ static void* ThreadEntry(void* arg) {
// This is also initialized by the first argument to pthread_create() but we
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
- thread->thread_handle_data()->thread_ = pthread_self();
- ASSERT(thread->IsValid());
+ thread->data()->thread_ = pthread_self();
+ ASSERT(thread->data()->thread_ != kNoThread);
thread->Run();
return NULL;
}
@@ -433,13 +434,20 @@ void Thread::set_name(const char* name) {
void Thread::Start() {
- pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
- ASSERT(IsValid());
+ pthread_attr_t* attr_ptr = NULL;
+ pthread_attr_t attr;
+ if (stack_size_ > 0) {
+ pthread_attr_init(&attr);
+ pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+ attr_ptr = &attr;
+ }
+ pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+ ASSERT(data_->thread_ != kNoThread);
}
void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
+ pthread_join(data_->thread_, NULL);
}
@@ -479,7 +487,6 @@ void Thread::YieldCPU() {
class OpenBSDMutex : public Mutex {
public:
-
OpenBSDMutex() {
pthread_mutexattr_t attrs;
int result = pthread_mutexattr_init(&attrs);
@@ -502,6 +509,16 @@ class OpenBSDMutex : public Mutex {
return result;
}
+ virtual bool TryLock() {
+ int result = pthread_mutex_trylock(&mutex_);
+ // Return false if the lock is busy and locking failed.
+ if (result == EBUSY) {
+ return false;
+ }
+ ASSERT(result == 0); // Verify no other errors.
+ return true;
+ }
+
private:
pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
};
@@ -554,11 +571,16 @@ bool OpenBSDSemaphore::Wait(int timeout) {
struct timespec ts;
TIMEVAL_TO_TIMESPEC(&end_time, &ts);
+
+ int to = ts.tv_sec;
+
while (true) {
int result = sem_trywait(&sem_);
if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
+ if (!to) return false; // Timeout.
CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ usleep(ts.tv_nsec / 1000);
+ to--;
}
}
@@ -570,86 +592,231 @@ Semaphore* OS::CreateSemaphore(int count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
-static Sampler* active_sampler_ = NULL;
+static pthread_t GetThreadID() {
+ pthread_t thread_id = pthread_self();
+ return thread_id;
+}
+
+
+class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData() : vm_tid_(GetThreadID()) {}
+
+ pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+ pthread_t vm_tid_;
+};
+
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
- if (active_sampler_ == NULL) return;
-
- TickSample sample;
-
- // We always sample the VM state.
- sample.state = VMState::current_state();
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
+ // We require a fully initialized and entered isolate.
+ return;
+ }
+ if (v8::Locker::IsActive() &&
+ !isolate->thread_manager()->IsLockedByCurrentThread()) {
+ return;
+ }
- active_sampler_->Tick(&sample);
+ Sampler* sampler = isolate->logger()->sampler();
+ if (sampler == NULL || !sampler->IsActive()) return;
+
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ if (sample == NULL) sample = &sample_obj;
+
+ // Extracting the sample from the context is extremely machine dependent.
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ sample->state = isolate->current_vm_state();
+#if V8_HOST_ARCH_IA32
+ sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
+ sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
+ sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
+#elif V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
+ sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
+ sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
+#elif V8_HOST_ARCH_ARM
+ sample->pc = reinterpret_cast<Address>(ucontext->sc_r15);
+ sample->sp = reinterpret_cast<Address>(ucontext->sc_r13);
+ sample->fp = reinterpret_cast<Address>(ucontext->sc_r11);
+#endif
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
}
-class Sampler::PlatformData : public Malloced {
+class SignalSender : public Thread {
public:
- PlatformData() {
- signal_handler_installed_ = false;
+ enum SleepInterval {
+ HALF_INTERVAL,
+ FULL_INTERVAL
+ };
+
+ explicit SignalSender(int interval)
+ : Thread("SignalSender"),
+ interval_(interval) {}
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ // Install a signal handler.
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+
+ // Start a thread that sends SIGPROF signal to VM threads.
+ instance_ = new SignalSender(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
+ }
+ }
+
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+ instance_->Join();
+ delete instance_;
+ instance_ = NULL;
+
+ // Restore the old signal handler.
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+ }
+
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state;
+ while ((state = SamplerRegistry::GetState()) !=
+ SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ } else {
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
+ Sleep(FULL_INTERVAL);
+ }
+ }
+ }
+
+ static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+ if (!sampler->IsProfiling()) return;
+ SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
+ sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
+ }
+
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
+ void SendProfilingSignal(pthread_t tid) {
+ if (!signal_handler_installed_) return;
+ pthread_kill(tid, SIGPROF);
+ }
+
+ void Sleep(SleepInterval full_or_half) {
+ // Convert ms to us and subtract 100 us to compensate delays
+ // occuring during signal delivery.
+ useconds_t interval = interval_ * 1000 - 100;
+ if (full_or_half == HALF_INTERVAL) interval /= 2;
+ int result = usleep(interval);
+#ifdef DEBUG
+ if (result != 0 && errno != EINTR) {
+ fprintf(stderr,
+ "SignalSender usleep error; interval = %u, errno = %d\n",
+ interval,
+ errno);
+ ASSERT(result == 0 || errno == EINTR);
+ }
+#endif
+ USE(result);
}
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- struct itimerval old_timer_value_;
+ const int interval_;
+ RuntimeProfilerRateLimiter rate_limiter_;
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SignalSender* instance_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
+
+ DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
+SignalSender* SignalSender::instance_ = NULL;
+struct sigaction SignalSender::old_signal_handler_;
+bool SignalSender::signal_handler_installed_ = false;
+
-Sampler::Sampler(int interval)
- : interval_(interval),
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData();
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
+ ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
- if (active_sampler_ != NULL) return;
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
- data_->signal_handler_installed_ = true;
-
- // Set the itimer to generate a tick for each interval.
- itimerval itimer;
- itimer.it_interval.tv_sec = interval_ / 1000;
- itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
- itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
- itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
- setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
- active_ = true;
+ ASSERT(!IsActive());
+ SetActive(true);
+ SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
- // Restore old signal handler
- if (data_->signal_handler_installed_) {
- setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
- sigaction(SIGPROF, &data_->old_signal_handler_, 0);
- data_->signal_handler_installed_ = false;
- }
-
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
- active_ = false;
+ ASSERT(IsActive());
+ SignalSender::RemoveActiveSampler(this);
+ SetActive(false);
}
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 256dc75f6..83f6c8112 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -54,6 +54,18 @@
namespace v8 {
namespace internal {
+
+// Maximum size of the virtual memory. 0 means there is no artificial
+// limit.
+
+intptr_t OS::MaxVirtualMemory() {
+ struct rlimit limit;
+ int result = getrlimit(RLIMIT_DATA, &limit);
+ if (result != 0) return 0;
+ return limit.rlim_cur;
+}
+
+
// ----------------------------------------------------------------------------
// Math functions
@@ -127,7 +139,7 @@ bool OS::Remove(const char* path) {
}
-const char* OS::LogFileOpenMode = "w";
+const char* const OS::LogFileOpenMode = "w";
void OS::Print(const char* format, ...) {
@@ -139,7 +151,7 @@ void OS::Print(const char* format, ...) {
void OS::VPrint(const char* format, va_list args) {
-#if defined(ANDROID)
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
#else
vprintf(format, args);
@@ -156,7 +168,7 @@ void OS::FPrint(FILE* out, const char* format, ...) {
void OS::VFPrint(FILE* out, const char* format, va_list args) {
-#if defined(ANDROID)
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
#else
vfprintf(out, format, args);
@@ -173,7 +185,7 @@ void OS::PrintError(const char* format, ...) {
void OS::VPrintError(const char* format, va_list args) {
-#if defined(ANDROID)
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args);
#else
vfprintf(stderr, format, args);
@@ -205,6 +217,31 @@ int OS::VSNPrintF(Vector<char> str,
}
+#if defined(V8_TARGET_ARCH_IA32)
+static OS::MemCopyFunction memcopy_function = NULL;
+static Mutex* memcopy_function_mutex = OS::CreateMutex();
+// Defined in codegen-ia32.cc.
+OS::MemCopyFunction CreateMemCopyFunction();
+
+// Copy memory area to disjoint memory area.
+void OS::MemCopy(void* dest, const void* src, size_t size) {
+ if (memcopy_function == NULL) {
+ ScopedLock lock(memcopy_function_mutex);
+ if (memcopy_function == NULL) {
+ OS::MemCopyFunction temp = CreateMemCopyFunction();
+ MemoryBarrier();
+ memcopy_function = temp;
+ }
+ }
+ // Note: here we rely on dependent reads being ordered. This is true
+ // on all architectures we currently support.
+ (*memcopy_function)(dest, src, size);
+#ifdef DEBUG
+ CHECK_EQ(0, memcmp(dest, src, size));
+#endif
+}
+#endif // V8_TARGET_ARCH_IA32
+
// ----------------------------------------------------------------------------
// POSIX string support.
//
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 0ee2e7cdb..dd4bd5d7c 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -170,7 +170,7 @@ void* OS::Allocate(const size_t requested,
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(StringEvent("OS::Allocate", "mmap failed"));
+ LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
@@ -374,59 +374,29 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: thread_ = pthread_self(); break;
- case ThreadHandle::INVALID: thread_ = kNoThread; break;
- }
- }
+ PlatformData() : thread_(kNoThread) { }
pthread_t thread_; // Thread handle for pthread.
};
-
-ThreadHandle::ThreadHandle(Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
- return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
- return data_->thread_ != kNoThread;
+Thread::Thread(const Options& options)
+ : data_(new PlatformData()),
+ stack_size_(options.stack_size) {
+ set_name(options.name);
}
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
- set_name("v8:<unknown>");
-}
-
-
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(const char* name)
+ : data_(new PlatformData()),
+ stack_size_(0) {
set_name(name);
}
Thread::~Thread() {
+ delete data_;
}
@@ -435,8 +405,9 @@ static void* ThreadEntry(void* arg) {
// This is also initialized by the first argument to pthread_create() but we
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
- thread->thread_handle_data()->thread_ = pthread_self();
- ASSERT(thread->IsValid());
+ thread->data()->thread_ = pthread_self();
+ ASSERT(thread->data()->thread_ != kNoThread);
+ Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
@@ -449,13 +420,20 @@ void Thread::set_name(const char* name) {
void Thread::Start() {
- pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
- ASSERT(IsValid());
+ pthread_attr_t* attr_ptr = NULL;
+ pthread_attr_t attr;
+ if (stack_size_ > 0) {
+ pthread_attr_init(&attr);
+ pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+ attr_ptr = &attr;
+ }
+ pthread_create(&data_->thread_, NULL, ThreadEntry, this);
+ ASSERT(data_->thread_ != kNoThread);
}
void Thread::Join() {
- pthread_join(thread_handle_data()->thread_, NULL);
+ pthread_join(data_->thread_, NULL);
}
@@ -713,8 +691,9 @@ static void* SenderEntry(void* arg) {
}
-Sampler::Sampler(int interval)
- : interval_(interval),
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
@@ -777,7 +756,6 @@ void Sampler::Stop() {
active_sampler_ = NULL;
}
-
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/codegen-mips-inl.h b/deps/v8/src/platform-tls-mac.h
index 3a511b80f..728524e80 100644
--- a/deps/v8/src/mips/codegen-mips-inl.h
+++ b/deps/v8/src/platform-tls-mac.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,46 +25,38 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_PLATFORM_TLS_MAC_H_
+#define V8_PLATFORM_TLS_MAC_H_
-#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
-#define V8_MIPS_CODEGEN_MIPS_INL_H_
+#include "globals.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
-// Platform-specific inline functions.
+#define V8_FAST_TLS_SUPPORTED 1
-void DeferredCode::Jump() {
- __ b(&entry_label_);
- __ nop();
-}
-
-
-void Reference::GetValueAndSpill() {
- GetValue();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- Visit(statement);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
- VisitStatements(statements);
-}
+extern intptr_t kMacTlsBaseOffset;
+INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- Load(expression);
+inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
+ intptr_t result;
+#if defined(V8_HOST_ARCH_IA32)
+ asm("movl %%gs:(%1,%2,4), %0;"
+ :"=r"(result) // Output must be a writable register.
+ :"r"(kMacTlsBaseOffset), "r"(index));
+#else
+ asm("movq %%gs:(%1,%2,8), %0;"
+ :"=r"(result)
+ :"r"(kMacTlsBaseOffset), "r"(index));
+#endif
+ return result;
}
-
-#undef __
+#endif
} } // namespace v8::internal
-#endif // V8_MIPS_CODEGEN_MIPS_INL_H_
-
+#endif // V8_PLATFORM_TLS_MAC_H_
diff --git a/deps/v8/src/codegen-inl.h b/deps/v8/src/platform-tls-win32.h
index 54677894c..4056e8cc6 100644
--- a/deps/v8/src/codegen-inl.h
+++ b/deps/v8/src/platform-tls-win32.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,40 +25,38 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_PLATFORM_TLS_WIN32_H_
+#define V8_PLATFORM_TLS_WIN32_H_
-#ifndef V8_CODEGEN_INL_H_
-#define V8_CODEGEN_INL_H_
-
-#include "codegen.h"
-#include "compiler.h"
-#include "register-allocator-inl.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/codegen-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/codegen-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/codegen-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/codegen-mips-inl.h"
-#else
-#error Unsupported target architecture.
-#endif
-
+#include "checks.h"
+#include "globals.h"
+#include "win32-headers.h"
namespace v8 {
namespace internal {
-Handle<Script> CodeGenerator::script() { return info_->script(); }
-
-bool CodeGenerator::is_eval() { return info_->is_eval(); }
-
-Scope* CodeGenerator::scope() { return info_->function()->scope(); }
-
-StrictModeFlag CodeGenerator::strict_mode_flag() {
- return info_->function()->strict_mode() ? kStrictMode : kNonStrictMode;
+#if defined(_WIN32) && !defined(_WIN64)
+
+#define V8_FAST_TLS_SUPPORTED 1
+
+inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
+ const intptr_t kTibInlineTlsOffset = 0xE10;
+ const intptr_t kTibExtraTlsOffset = 0xF94;
+ const intptr_t kMaxInlineSlots = 64;
+ const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
+ ASSERT(0 <= index && index < kMaxSlots);
+ if (index < kMaxInlineSlots) {
+ return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
+ kPointerSize * index));
+ }
+ intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
+ ASSERT(extra != 0);
+ return *reinterpret_cast<intptr_t*>(extra +
+ kPointerSize * (index - kMaxInlineSlots));
}
+#endif
+
} } // namespace v8::internal
-#endif // V8_CODEGEN_INL_H_
+#endif // V8_PLATFORM_TLS_WIN32_H_
diff --git a/deps/v8/src/virtual-frame-inl.h b/deps/v8/src/platform-tls.h
index c9f4aac18..564917540 100644
--- a/deps/v8/src/virtual-frame-inl.h
+++ b/deps/v8/src/platform-tls.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,15 +25,26 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_VIRTUAL_FRAME_INL_H_
-#define V8_VIRTUAL_FRAME_INL_H_
+// Platform and architecture specific thread local store functions.
-#include "virtual-frame.h"
+#ifndef V8_PLATFORM_TLS_H_
+#define V8_PLATFORM_TLS_H_
+
+#ifdef V8_FAST_TLS
+
+// When fast TLS is requested we include the appropriate
+// implementation header.
+//
+// The implementation header defines V8_FAST_TLS_SUPPORTED if it
+// provides fast TLS support for the current platform and architecture
+// combination.
+
+#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
+#include "platform-tls-win32.h"
+#elif defined(__APPLE__)
+#include "platform-tls-mac.h"
+#endif
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-#include "virtual-frame-heavy-inl.h"
-#else
-#include "virtual-frame-light-inl.h"
#endif
-#endif // V8_VIRTUAL_FRAME_INL_H_
+#endif // V8_PLATFORM_TLS_H_
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index f24994b5b..b7eed47cb 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -44,6 +44,11 @@
namespace v8 {
namespace internal {
+intptr_t OS::MaxVirtualMemory() {
+ return 0;
+}
+
+
// Test for finite value - usually defined in math.h
int isfinite(double x) {
return _finite(x);
@@ -173,15 +178,53 @@ double ceiling(double x) {
return ceil(x);
}
+
+static Mutex* limit_mutex = NULL;
+
+#if defined(V8_TARGET_ARCH_IA32)
+static OS::MemCopyFunction memcopy_function = NULL;
+static Mutex* memcopy_function_mutex = OS::CreateMutex();
+// Defined in codegen-ia32.cc.
+OS::MemCopyFunction CreateMemCopyFunction();
+
+// Copy memory area to disjoint memory area.
+void OS::MemCopy(void* dest, const void* src, size_t size) {
+ if (memcopy_function == NULL) {
+ ScopedLock lock(memcopy_function_mutex);
+ if (memcopy_function == NULL) {
+ OS::MemCopyFunction temp = CreateMemCopyFunction();
+ MemoryBarrier();
+ memcopy_function = temp;
+ }
+ }
+ // Note: here we rely on dependent reads being ordered. This is true
+ // on all architectures we currently support.
+ (*memcopy_function)(dest, src, size);
+#ifdef DEBUG
+ CHECK_EQ(0, memcmp(dest, src, size));
+#endif
+}
+#endif // V8_TARGET_ARCH_IA32
+
#ifdef _WIN64
typedef double (*ModuloFunction)(double, double);
-
+static ModuloFunction modulo_function = NULL;
+static Mutex* modulo_function_mutex = OS::CreateMutex();
// Defined in codegen-x64.cc.
ModuloFunction CreateModuloFunction();
double modulo(double x, double y) {
- static ModuloFunction function = CreateModuloFunction();
- return function(x, y);
+ if (modulo_function == NULL) {
+ ScopedLock lock(modulo_function_mutex);
+ if (modulo_function == NULL) {
+ ModuloFunction temp = CreateModuloFunction();
+ MemoryBarrier();
+ modulo_function = temp;
+ }
+ }
+ // Note: here we rely on dependent reads being ordered. This is true
+ // on all architectures we currently support.
+ return (*modulo_function)(x, y);
}
#else // Win32
@@ -369,13 +412,11 @@ void Time::TzSet() {
}
// Make standard and DST timezone names.
- OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize),
- "%S",
- tzinfo_.StandardName);
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
+ std_tz_name_, kTzNameSize, NULL, NULL);
std_tz_name_[kTzNameSize - 1] = '\0';
- OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize),
- "%S",
- tzinfo_.DaylightName);
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
+ dst_tz_name_, kTzNameSize, NULL, NULL);
dst_tz_name_[kTzNameSize - 1] = '\0';
// If OS returned empty string or resource id (like "@tzres.dll,-211")
@@ -540,6 +581,7 @@ void OS::Setup() {
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srand(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
@@ -676,7 +718,7 @@ bool OS::Remove(const char* path) {
// Open log file in binary mode to avoid /n -> /r/n conversion.
-const char* OS::LogFileOpenMode = "wb";
+const char* const OS::LogFileOpenMode = "wb";
// Print (debug) message to console.
@@ -749,9 +791,13 @@ char* OS::StrChr(char* str, int c) {
void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
+ // Use _TRUNCATE or strncpy_s crashes (by design) if buffer is too small.
+ size_t buffer_size = static_cast<size_t>(dest.length());
+ if (n + 1 > buffer_size) // count for trailing '\0'
+ n = _TRUNCATE;
int result = strncpy_s(dest.start(), dest.length(), src, n);
USE(result);
- ASSERT(result == 0);
+ ASSERT(result == 0 || (n == _TRUNCATE && result == STRUNCATE));
}
@@ -765,6 +811,9 @@ static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
@@ -835,7 +884,7 @@ void* OS::Allocate(const size_t requested,
// For exectutable pages try and randomize the allocation address
if (prot == PAGE_EXECUTE_READWRITE &&
msize >= static_cast<size_t>(Page::kPageSize)) {
- address = (V8::RandomPrivate() << kPageSizeBits)
+ address = (V8::RandomPrivate(Isolate::Current()) << kPageSizeBits)
| kAllocationRandomAddressMin;
address &= kAllocationRandomAddressMax;
}
@@ -848,7 +897,7 @@ void* OS::Allocate(const size_t requested,
mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot);
if (mbase == NULL) {
- LOG(StringEvent("OS::Allocate", "VirtualAlloc failed"));
+ LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed"));
return NULL;
}
@@ -1191,7 +1240,8 @@ static bool LoadSymbols(HANDLE process_handle) {
if (err != ERROR_MOD_NOT_FOUND &&
err != ERROR_INVALID_HANDLE) return false;
}
- LOG(SharedLibraryEvent(
+ LOG(i::Isolate::Current(),
+ SharedLibraryEvent(
module_entry.szExePath,
reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
@@ -1421,24 +1471,6 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
// Definition of invalid thread handle and id.
static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
-static const DWORD kNoThreadId = 0;
-
-
-class ThreadHandle::PlatformData : public Malloced {
- public:
- explicit PlatformData(ThreadHandle::Kind kind) {
- Initialize(kind);
- }
-
- void Initialize(ThreadHandle::Kind kind) {
- switch (kind) {
- case ThreadHandle::SELF: tid_ = GetCurrentThreadId(); break;
- case ThreadHandle::INVALID: tid_ = kNoThreadId; break;
- }
- }
- DWORD tid_; // Win32 thread identifier.
-};
-
// Entry point for threads. The supplied argument is a pointer to the thread
// object. The entry function dispatches to the run method in the thread
@@ -1446,43 +1478,11 @@ class ThreadHandle::PlatformData : public Malloced {
// convention.
static unsigned int __stdcall ThreadEntry(void* arg) {
Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the last parameter to _beginthreadex() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->thread_handle_data()->tid_ = GetCurrentThreadId();
thread->Run();
return 0;
}
-// Initialize thread handle to invalid handle.
-ThreadHandle::ThreadHandle(ThreadHandle::Kind kind) {
- data_ = new PlatformData(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
- delete data_;
-}
-
-
-// The thread is running if it has the same id as the current thread.
-bool ThreadHandle::IsSelf() const {
- return GetCurrentThreadId() == data_->tid_;
-}
-
-
-// Test for invalid thread handle.
-bool ThreadHandle::IsValid() const {
- return data_->tid_ != kNoThreadId;
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
- data_->Initialize(kind);
-}
-
-
class Thread::PlatformData : public Malloced {
public:
explicit PlatformData(HANDLE thread) : thread_(thread) {}
@@ -1493,13 +1493,15 @@ class Thread::PlatformData : public Malloced {
// Initialize a Win32 thread object. The thread has an invalid thread
// handle until it is started.
-Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(const Options& options)
+ : stack_size_(options.stack_size) {
data_ = new PlatformData(kNoThread);
- set_name("v8:<unknown>");
+ set_name(options.name);
}
-Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
+Thread::Thread(const char* name)
+ : stack_size_(0) {
data_ = new PlatformData(kNoThread);
set_name(name);
}
@@ -1524,13 +1526,11 @@ Thread::~Thread() {
void Thread::Start() {
data_->thread_ = reinterpret_cast<HANDLE>(
_beginthreadex(NULL,
- 0,
+ static_cast<unsigned>(stack_size_),
ThreadEntry,
this,
0,
- reinterpret_cast<unsigned int*>(
- &thread_handle_data()->tid_)));
- ASSERT(IsValid());
+ NULL));
}
@@ -1582,7 +1582,6 @@ void Thread::YieldCPU() {
class Win32Mutex : public Mutex {
public:
-
Win32Mutex() { InitializeCriticalSection(&cs_); }
virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); }
@@ -1840,135 +1839,179 @@ Socket* OS::CreateSocket() {
// ----------------------------------------------------------------------------
// Win32 profiler support.
-//
-// On win32 we use a sampler thread with high priority to sample the program
-// counter for the profiled thread.
class Sampler::PlatformData : public Malloced {
public:
- explicit PlatformData(Sampler* sampler) {
- sampler_ = sampler;
- sampler_thread_ = INVALID_HANDLE_VALUE;
- profiled_thread_ = INVALID_HANDLE_VALUE;
+ // Get a handle to the calling thread. This is the thread that we are
+ // going to profile. We need to make a copy of the handle because we are
+ // going to use it in the sampler thread. Using GetThreadHandle() will
+ // not work in this case. We're using OpenThread because DuplicateHandle
+ // for some reason doesn't work in Chrome's sandbox.
+ PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
+ THREAD_SUSPEND_RESUME |
+ THREAD_QUERY_INFORMATION,
+ false,
+ GetCurrentThreadId())) {}
+
+ ~PlatformData() {
+ if (profiled_thread_ != NULL) {
+ CloseHandle(profiled_thread_);
+ profiled_thread_ = NULL;
+ }
}
- Sampler* sampler_;
- HANDLE sampler_thread_;
+ HANDLE profiled_thread() { return profiled_thread_; }
+
+ private:
HANDLE profiled_thread_;
- RuntimeProfilerRateLimiter rate_limiter_;
+};
+
- // Sampler thread handler.
- void Runner() {
- while (sampler_->IsActive()) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- Sample();
- Sleep(sampler_->interval_);
+class SamplerThread : public Thread {
+ public:
+ explicit SamplerThread(int interval)
+ : Thread("SamplerThread"),
+ interval_(interval) {}
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ instance_ = new SamplerThread(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
+ }
+ }
+
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+ instance_->Join();
+ delete instance_;
+ instance_ = NULL;
+ }
+ }
+
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state;
+ while ((state = SamplerRegistry::GetState()) !=
+ SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ }
+ OS::Sleep(interval_);
}
}
- void Sample() {
- if (sampler_->IsProfiling()) {
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
+ static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ if (!sampler->IsProfiling()) return;
+ SamplerThread* sampler_thread =
+ reinterpret_cast<SamplerThread*>(raw_sampler_thread);
+ sampler_thread->SampleContext(sampler);
+ }
+
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
+ void SampleContext(Sampler* sampler) {
+ HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
+ if (profiled_thread == NULL) return;
- static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread_) == kSuspendFailed) return;
- sample->state = Top::current_vm_state();
+ // Context used for sampling the register state of the profiled thread.
+ CONTEXT context;
+ memset(&context, 0, sizeof(context));
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread_, &context) != 0) {
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ if (sample == NULL) sample = &sample_obj;
+
+ static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+ if (SuspendThread(profiled_thread) == kSuspendFailed) return;
+ sample->state = sampler->isolate()->current_vm_state();
+
+ context.ContextFlags = CONTEXT_FULL;
+ if (GetThreadContext(profiled_thread, &context) != 0) {
#if V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(context.Rip);
- sample->sp = reinterpret_cast<Address>(context.Rsp);
- sample->fp = reinterpret_cast<Address>(context.Rbp);
+ sample->pc = reinterpret_cast<Address>(context.Rip);
+ sample->sp = reinterpret_cast<Address>(context.Rsp);
+ sample->fp = reinterpret_cast<Address>(context.Rbp);
#else
- sample->pc = reinterpret_cast<Address>(context.Eip);
- sample->sp = reinterpret_cast<Address>(context.Esp);
- sample->fp = reinterpret_cast<Address>(context.Ebp);
+ sample->pc = reinterpret_cast<Address>(context.Eip);
+ sample->sp = reinterpret_cast<Address>(context.Esp);
+ sample->fp = reinterpret_cast<Address>(context.Ebp);
#endif
- sampler_->SampleStack(sample);
- sampler_->Tick(sample);
- }
- ResumeThread(profiled_thread_);
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
}
- if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+ ResumeThread(profiled_thread);
}
+
+ const int interval_;
+ RuntimeProfilerRateLimiter rate_limiter_;
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SamplerThread* instance_;
+
+ DISALLOW_COPY_AND_ASSIGN(SamplerThread);
};
-// Entry point for sampler thread.
-static unsigned int __stdcall SamplerEntry(void* arg) {
- Sampler::PlatformData* data =
- reinterpret_cast<Sampler::PlatformData*>(arg);
- data->Runner();
- return 0;
-}
+Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+SamplerThread* SamplerThread::instance_ = NULL;
-// Initialize a profile sampler.
-Sampler::Sampler(int interval)
- : interval_(interval),
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData(this);
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
+ ASSERT(!IsActive());
delete data_;
}
-// Start profiling.
void Sampler::Start() {
- // Do not start multiple threads for the same sampler.
ASSERT(!IsActive());
-
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- data_->profiled_thread_ = OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId());
- BOOL ok = data_->profiled_thread_ != NULL;
- if (!ok) return;
-
- // Start sampler thread.
- unsigned int tid;
SetActive(true);
- data_->sampler_thread_ = reinterpret_cast<HANDLE>(
- _beginthreadex(NULL, 0, SamplerEntry, data_, 0, &tid));
- // Set thread to high priority to increase sampling accuracy.
- SetThreadPriority(data_->sampler_thread_, THREAD_PRIORITY_TIME_CRITICAL);
+ SamplerThread::AddActiveSampler(this);
}
-// Stop profiling.
void Sampler::Stop() {
- // Seting active to false triggers termination of the sampler
- // thread.
+ ASSERT(IsActive());
+ SamplerThread::RemoveActiveSampler(this);
SetActive(false);
-
- // Wait for sampler thread to terminate.
- Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
- WaitForSingleObject(data_->sampler_thread_, INFINITE);
-
- // Release the thread handles
- CloseHandle(data_->sampler_thread_);
- CloseHandle(data_->profiled_thread_);
}
-
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 88825e645..06d3ca467 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -114,6 +114,7 @@ int signbit(double x);
#endif // __GNUC__
#include "atomicops.h"
+#include "platform-tls.h"
#include "utils.h"
#include "v8globals.h"
@@ -177,7 +178,7 @@ class OS {
static bool Remove(const char* path);
// Log file open mode is platform-dependent due to line ends issues.
- static const char* LogFileOpenMode;
+ static const char* const LogFileOpenMode;
// Print output to console. This is mostly used for debugging output.
// On platforms that has standard terminal output, the output
@@ -287,18 +288,44 @@ class OS {
// positions indicated by the members of the CpuFeature enum from globals.h
static uint64_t CpuFeaturesImpliedByPlatform();
+ // Maximum size of the virtual memory. 0 means there is no artificial
+ // limit.
+ static intptr_t MaxVirtualMemory();
+
// Returns the double constant NAN
static double nan_value();
// Support runtime detection of VFP3 on ARM CPUs.
static bool ArmCpuHasFeature(CpuFeature feature);
+ // Support runtime detection of whether the hard float option of the
+ // EABI is used.
+ static bool ArmUsingHardFloat();
+
+ // Support runtime detection of FPU on MIPS CPUs.
+ static bool MipsCpuHasFeature(CpuFeature feature);
+
// Returns the activation frame alignment constraint or zero if
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();
static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
+#if defined(V8_TARGET_ARCH_IA32)
+ // Copy memory area to disjoint memory area.
+ static void MemCopy(void* dest, const void* src, size_t size);
+ // Limit below which the extra overhead of the MemCopy function is likely
+ // to outweigh the benefits of faster copying.
+ static const int kMinComplexMemCopy = 64;
+ typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
+
+#else // V8_TARGET_ARCH_IA32
+ static void MemCopy(void* dest, const void* src, size_t size) {
+ memcpy(dest, src, size);
+ }
+ static const int kMinComplexMemCopy = 256;
+#endif // V8_TARGET_ARCH_IA32
+
private:
static const int msPerSecond = 1000;
@@ -335,40 +362,6 @@ class VirtualMemory {
size_t size_; // Size of the virtual memory.
};
-
-// ----------------------------------------------------------------------------
-// ThreadHandle
-//
-// A ThreadHandle represents a thread identifier for a thread. The ThreadHandle
-// does not own the underlying os handle. Thread handles can be used for
-// refering to threads and testing equality.
-
-class ThreadHandle {
- public:
- enum Kind { SELF, INVALID };
- explicit ThreadHandle(Kind kind);
-
- // Destructor.
- ~ThreadHandle();
-
- // Test for thread running.
- bool IsSelf() const;
-
- // Test for valid thread handle.
- bool IsValid() const;
-
- // Get platform-specific data.
- class PlatformData;
- PlatformData* thread_handle_data() { return data_; }
-
- // Initialize the handle to kind
- void Initialize(Kind kind);
-
- private:
- PlatformData* data_; // Captures platform dependent data.
-};
-
-
// ----------------------------------------------------------------------------
// Thread
//
@@ -377,7 +370,7 @@ class ThreadHandle {
// thread. The Thread object should not be deallocated before the thread has
// terminated.
-class Thread: public ThreadHandle {
+class Thread {
public:
// Opaque data type for thread-local storage keys.
// LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
@@ -388,8 +381,15 @@ class Thread: public ThreadHandle {
LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
};
+ struct Options {
+ Options() : name("v8:<unknown>"), stack_size(0) {}
+
+ const char* name;
+ int stack_size;
+ };
+
// Create new thread.
- Thread();
+ explicit Thread(const Options& options);
explicit Thread(const char* name);
virtual ~Thread();
@@ -421,19 +421,37 @@ class Thread: public ThreadHandle {
return GetThreadLocal(key) != NULL;
}
+#ifdef V8_FAST_TLS_SUPPORTED
+ static inline void* GetExistingThreadLocal(LocalStorageKey key) {
+ void* result = reinterpret_cast<void*>(
+ InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
+ ASSERT(result == GetThreadLocal(key));
+ return result;
+ }
+#else
+ static inline void* GetExistingThreadLocal(LocalStorageKey key) {
+ return GetThreadLocal(key);
+ }
+#endif
+
// A hint to the scheduler to let another thread run.
static void YieldCPU();
+
// The thread name length is limited to 16 based on Linux's implementation of
// prctl().
static const int kMaxThreadNameLength = 16;
+
+ class PlatformData;
+ PlatformData* data() { return data_; }
+
private:
void set_name(const char *name);
- class PlatformData;
PlatformData* data_;
char name_[kMaxThreadNameLength];
+ int stack_size_;
DISALLOW_COPY_AND_ASSIGN(Thread);
};
@@ -468,11 +486,12 @@ class Mutex {
// ----------------------------------------------------------------------------
// ScopedLock
//
-// Stack-allocated ScopedLocks provide block-scoped locking and unlocking
-// of a mutex.
+// Stack-allocated ScopedLocks provide block-scoped locking and
+// unlocking of a mutex.
class ScopedLock {
public:
explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
+ ASSERT(mutex_ != NULL);
mutex_->Lock();
}
~ScopedLock() {
@@ -568,24 +587,31 @@ class TickSample {
sp(NULL),
fp(NULL),
tos(NULL),
- frames_count(0) {}
+ frames_count(0),
+ has_external_callback(false) {}
StateTag state; // The state of the VM.
- Address pc; // Instruction pointer.
- Address sp; // Stack pointer.
- Address fp; // Frame pointer.
- Address tos; // Top stack value (*sp).
+ Address pc; // Instruction pointer.
+ Address sp; // Stack pointer.
+ Address fp; // Frame pointer.
+ union {
+ Address tos; // Top stack value (*sp).
+ Address external_callback;
+ };
static const int kMaxFramesCount = 64;
Address stack[kMaxFramesCount]; // Call stack.
- int frames_count; // Number of captured frames.
+ int frames_count : 8; // Number of captured frames.
+ bool has_external_callback : 1;
};
#ifdef ENABLE_LOGGING_AND_PROFILING
class Sampler {
public:
// Initialize sampler.
- explicit Sampler(int interval);
+ Sampler(Isolate* isolate, int interval);
virtual ~Sampler();
+ int interval() const { return interval_; }
+
// Performs stack sampling.
void SampleStack(TickSample* sample) {
DoSampleStack(sample);
@@ -608,11 +634,16 @@ class Sampler {
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
+ Isolate* isolate() { return isolate_; }
+
// Used in tests to make sure that stack sampling is performed.
int samples_taken() const { return samples_taken_; }
void ResetSamplesTaken() { samples_taken_ = 0; }
class PlatformData;
+ PlatformData* data() { return data_; }
+
+ PlatformData* platform_data() { return data_; }
protected:
virtual void DoSampleStack(TickSample* sample) = 0;
@@ -621,6 +652,7 @@ class Sampler {
void SetActive(bool value) { NoBarrier_Store(&active_, value); }
void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; }
+ Isolate* isolate_;
const int interval_;
Atomic32 profiling_;
Atomic32 active_;
@@ -629,6 +661,7 @@ class Sampler {
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
+
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/register-allocator-x64.h b/deps/v8/src/preparse-data-format.h
index a2884d912..e64326e57 100644
--- a/deps/v8/src/x64/register-allocator-x64.h
+++ b/deps/v8/src/preparse-data-format.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,19 +25,38 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_X64_REGISTER_ALLOCATOR_X64_H_
-#define V8_X64_REGISTER_ALLOCATOR_X64_H_
+#ifndef V8_PREPARSE_DATA_FORMAT_H_
+#define V8_PREPARSE_DATA_FORMAT_H_
namespace v8 {
namespace internal {
-class RegisterAllocatorConstants : public AllStatic {
+// Generic and general data used by preparse data recorders and readers.
+
+struct PreparseDataConstants {
public:
- static const int kNumRegisters = 10;
- static const int kInvalidRegister = -1;
+ // Layout and constants of the preparse data exchange format.
+ static const unsigned kMagicNumber = 0xBadDead;
+ static const unsigned kCurrentVersion = 7;
+
+ static const int kMagicOffset = 0;
+ static const int kVersionOffset = 1;
+ static const int kHasErrorOffset = 2;
+ static const int kFunctionsSizeOffset = 3;
+ static const int kSymbolCountOffset = 4;
+ static const int kSizeOffset = 5;
+ static const int kHeaderSize = 6;
+
+ // If encoding a message, the following positions are fixed.
+ static const int kMessageStartPos = 0;
+ static const int kMessageEndPos = 1;
+ static const int kMessageArgCountPos = 2;
+ static const int kMessageTextPos = 3;
+
+ static const unsigned char kNumberTerminator = 0x80u;
};
-} } // namespace v8::internal
+} } // namespace v8::internal.
-#endif // V8_X64_REGISTER_ALLOCATOR_X64_H_
+#endif // V8_PREPARSE_DATA_FORMAT_H_
diff --git a/deps/v8/src/preparse-data.cc b/deps/v8/src/preparse-data.cc
index 7c9d8a610..98c343e79 100644
--- a/deps/v8/src/preparse-data.cc
+++ b/deps/v8/src/preparse-data.cc
@@ -26,13 +26,13 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../include/v8stdint.h"
-#include "globals.h"
+
+#include "preparse-data-format.h"
+#include "preparse-data.h"
+
#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-#include "list-inl.h"
+#include "globals.h"
#include "hashmap.h"
-#include "preparse-data.h"
namespace v8 {
namespace internal {
@@ -74,7 +74,7 @@ void FunctionLoggingParserRecorder::LogMessage(int start_pos,
function_store_.Add((arg_opt == NULL) ? 0 : 1);
STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 3);
WriteString(CStrVector(message));
- if (arg_opt) WriteString(CStrVector(arg_opt));
+ if (arg_opt != NULL) WriteString(CStrVector(arg_opt));
is_recording_ = false;
}
diff --git a/deps/v8/src/preparse-data.h b/deps/v8/src/preparse-data.h
index bb5707b61..c6503c4fc 100644
--- a/deps/v8/src/preparse-data.h
+++ b/deps/v8/src/preparse-data.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,40 +25,16 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_PREPARSER_DATA_H_
-#define V8_PREPARSER_DATA_H_
+#ifndef V8_PREPARSE_DATA_H_
+#define V8_PREPARSE_DATA_H_
+#include "allocation.h"
#include "hashmap.h"
+#include "utils-inl.h"
namespace v8 {
namespace internal {
-// Generic and general data used by preparse data recorders and readers.
-
-class PreparseDataConstants : public AllStatic {
- public:
- // Layout and constants of the preparse data exchange format.
- static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 6;
-
- static const int kMagicOffset = 0;
- static const int kVersionOffset = 1;
- static const int kHasErrorOffset = 2;
- static const int kFunctionsSizeOffset = 3;
- static const int kSymbolCountOffset = 4;
- static const int kSizeOffset = 5;
- static const int kHeaderSize = 6;
-
- // If encoding a message, the following positions are fixed.
- static const int kMessageStartPos = 0;
- static const int kMessageEndPos = 1;
- static const int kMessageArgCountPos = 2;
- static const int kMessageTextPos = 3;
-
- static const byte kNumberTerminator = 0x80u;
-};
-
-
// ----------------------------------------------------------------------------
// ParserRecorder - Logging of preparser data.
@@ -72,7 +48,8 @@ class ParserRecorder {
virtual void LogFunction(int start,
int end,
int literals,
- int properties) = 0;
+ int properties,
+ int strict_mode) = 0;
// Logs a symbol creation of a literal or identifier.
virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
@@ -108,11 +85,16 @@ class FunctionLoggingParserRecorder : public ParserRecorder {
FunctionLoggingParserRecorder();
virtual ~FunctionLoggingParserRecorder() {}
- virtual void LogFunction(int start, int end, int literals, int properties) {
+ virtual void LogFunction(int start,
+ int end,
+ int literals,
+ int properties,
+ int strict_mode) {
function_store_.Add(start);
function_store_.Add(end);
function_store_.Add(literals);
function_store_.Add(properties);
+ function_store_.Add(strict_mode);
}
// Logs an error message and marks the log as containing an error.
@@ -246,4 +228,4 @@ class CompleteParserRecorder: public FunctionLoggingParserRecorder {
} } // namespace v8::internal.
-#endif // V8_PREPARSER_DATA_H_
+#endif // V8_PREPARSE_DATA_H_
diff --git a/deps/v8/src/preparser-api.cc b/deps/v8/src/preparser-api.cc
index 3817935f8..e0ab5001f 100644
--- a/deps/v8/src/preparser-api.cc
+++ b/deps/v8/src/preparser-api.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -26,12 +26,14 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../include/v8-preparser.h"
+
#include "globals.h"
#include "checks.h"
#include "allocation.h"
#include "utils.h"
#include "list.h"
#include "scanner-base.h"
+#include "preparse-data-format.h"
#include "preparse-data.h"
#include "preparser.h"
@@ -156,21 +158,8 @@ class InputStreamUTF16Buffer : public UC16CharacterStream {
};
-class StandAloneJavaScriptScanner : public JavaScriptScanner {
- public:
- void Initialize(UC16CharacterStream* source) {
- source_ = source;
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- SkipWhiteSpace();
- Scan();
- }
-};
-
-
-// Functions declared by allocation.h
+// Functions declared by allocation.h and implemented in both api.cc (for v8)
+// or here (for a stand-alone preparser).
void FatalProcessOutOfMemory(const char* reason) {
V8_Fatal(__FILE__, __LINE__, reason);
@@ -187,7 +176,8 @@ UnicodeInputStream::~UnicodeInputStream() { }
PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
internal::InputStreamUTF16Buffer buffer(input);
uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
- internal::StandAloneJavaScriptScanner scanner;
+ internal::UnicodeCache unicode_cache;
+ internal::JavaScriptScanner scanner(&unicode_cache);
scanner.Initialize(&buffer);
internal::CompleteParserRecorder recorder;
preparser::PreParser::PreParseResult result =
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 252e88f46..da83f96ae 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -1,5 +1,4 @@
-
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,7 +32,9 @@
#include "allocation.h"
#include "utils.h"
#include "list.h"
+
#include "scanner-base.h"
+#include "preparse-data-format.h"
#include "preparse-data.h"
#include "preparser.h"
@@ -55,13 +56,6 @@ namespace preparser {
namespace i = ::v8::internal;
-#define CHECK_OK ok); \
- if (!*ok) return -1; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-
void PreParser::ReportUnexpectedToken(i::Token::Value token) {
// We don't report stack overflows here, to avoid increasing the
// stack depth even further. Instead we report it after parsing is
@@ -83,9 +77,14 @@ void PreParser::ReportUnexpectedToken(i::Token::Value token) {
return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
"unexpected_token_string", NULL);
case i::Token::IDENTIFIER:
- case i::Token::FUTURE_RESERVED_WORD:
return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
"unexpected_token_identifier", NULL);
+ case i::Token::FUTURE_RESERVED_WORD:
+ return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
+ "unexpected_reserved", NULL);
+ case i::Token::FUTURE_STRICT_RESERVED_WORD:
+ return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
+ "unexpected_strict_reserved", NULL);
default:
const char* name = i::Token::String(token);
ReportMessageAt(source_location.beg_pos, source_location.end_pos,
@@ -94,18 +93,53 @@ void PreParser::ReportUnexpectedToken(i::Token::Value token) {
}
+// Checks whether octal literal last seen is between beg_pos and end_pos.
+// If so, reports an error.
+void PreParser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+ i::Scanner::Location octal = scanner_->octal_position();
+ if (beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
+ ReportMessageAt(octal.beg_pos, octal.end_pos, "strict_octal_literal", NULL);
+ scanner_->clear_octal_position();
+ *ok = false;
+ }
+}
+
+
+#define CHECK_OK ok); \
+ if (!*ok) return kUnknownSourceElements; \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+
PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
bool* ok) {
// SourceElements ::
// (Statement)* <end_token>
+ bool allow_directive_prologue = true;
while (peek() != end_token) {
- ParseStatement(CHECK_OK);
+ Statement statement = ParseStatement(CHECK_OK);
+ if (allow_directive_prologue) {
+ if (statement.IsUseStrictLiteral()) {
+ set_strict_mode();
+ } else if (!statement.IsStringLiteral()) {
+ allow_directive_prologue = false;
+ }
+ }
}
return kUnknownSourceElements;
}
+#undef CHECK_OK
+#define CHECK_OK ok); \
+ if (!*ok) return Statement::Default(); \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+
PreParser::Statement PreParser::ParseStatement(bool* ok) {
// Statement ::
// Block
@@ -142,10 +176,10 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
case i::Token::SEMICOLON:
Next();
- return kUnknownStatement;
+ return Statement::Default();
case i::Token::IF:
- return ParseIfStatement(ok);
+ return ParseIfStatement(ok);
case i::Token::DO:
return ParseDoWhileStatement(ok);
@@ -180,9 +214,6 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
case i::Token::FUNCTION:
return ParseFunctionDeclaration(ok);
- case i::Token::NATIVE:
- return ParseNativeDeclaration(ok);
-
case i::Token::DEBUGGER:
return ParseDebuggerStatement(ok);
@@ -196,32 +227,24 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
// FunctionDeclaration ::
// 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
Expect(i::Token::FUNCTION, CHECK_OK);
- ParseIdentifier(CHECK_OK);
- ParseFunctionLiteral(CHECK_OK);
- return kUnknownStatement;
-}
+ Identifier identifier = ParseIdentifier(CHECK_OK);
+ i::Scanner::Location location = scanner_->location();
-// Language extension which is only enabled for source files loaded
-// through the API's extension mechanism. A native function
-// declaration is resolved by looking up the function through a
-// callback provided by the extension.
-PreParser::Statement PreParser::ParseNativeDeclaration(bool* ok) {
- Expect(i::Token::NATIVE, CHECK_OK);
- Expect(i::Token::FUNCTION, CHECK_OK);
- ParseIdentifier(CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- bool done = (peek() == i::Token::RPAREN);
- while (!done) {
- ParseIdentifier(CHECK_OK);
- done = (peek() == i::Token::RPAREN);
- if (!done) {
- Expect(i::Token::COMMA, CHECK_OK);
+ Expression function_value = ParseFunctionLiteral(CHECK_OK);
+
+ if (function_value.IsStrictFunction() &&
+ !identifier.IsValidStrictVariable()) {
+ // Strict mode violation, using either reserved word or eval/arguments
+ // as name of strict function.
+ const char* type = "strict_function_name";
+ if (identifier.IsFutureStrictReserved()) {
+ type = "strict_reserved_word";
}
+ ReportMessageAt(location.beg_pos, location.end_pos, type, NULL);
+ *ok = false;
}
- Expect(i::Token::RPAREN, CHECK_OK);
- Expect(i::Token::SEMICOLON, CHECK_OK);
- return kUnknownStatement;
+ return Statement::FunctionDeclaration();
}
@@ -234,10 +257,18 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
//
Expect(i::Token::LBRACE, CHECK_OK);
while (peek() != i::Token::RBRACE) {
- ParseStatement(CHECK_OK);
+ i::Scanner::Location start_location = scanner_->peek_location();
+ Statement statement = ParseStatement(CHECK_OK);
+ i::Scanner::Location end_location = scanner_->location();
+ if (strict_mode() && statement.IsFunctionDeclaration()) {
+ ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+ "strict_function", NULL);
+ *ok = false;
+ return Statement::Default();
+ }
}
- Expect(i::Token::RBRACE, CHECK_OK);
- return kUnknownStatement;
+ Expect(i::Token::RBRACE, ok);
+ return Statement::Default();
}
@@ -265,10 +296,17 @@ PreParser::Statement PreParser::ParseVariableDeclarations(bool accept_IN,
if (peek() == i::Token::VAR) {
Consume(i::Token::VAR);
} else if (peek() == i::Token::CONST) {
+ if (strict_mode()) {
+ i::Scanner::Location location = scanner_->peek_location();
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "strict_const", NULL);
+ *ok = false;
+ return Statement::Default();
+ }
Consume(i::Token::CONST);
} else {
*ok = false;
- return 0;
+ return Statement::Default();
}
// The scope of a variable/const declared anywhere inside a function
@@ -277,7 +315,14 @@ PreParser::Statement PreParser::ParseVariableDeclarations(bool accept_IN,
do {
// Parse variable name.
if (nvars > 0) Consume(i::Token::COMMA);
- ParseIdentifier(CHECK_OK);
+ Identifier identifier = ParseIdentifier(CHECK_OK);
+ if (strict_mode() && !identifier.IsValidStrictVariable()) {
+ StrictModeIdentifierViolation(scanner_->location(),
+ "strict_var_name",
+ identifier,
+ ok);
+ return Statement::Default();
+ }
nvars++;
if (peek() == i::Token::ASSIGN) {
Expect(i::Token::ASSIGN, CHECK_OK);
@@ -286,24 +331,37 @@ PreParser::Statement PreParser::ParseVariableDeclarations(bool accept_IN,
} while (peek() == i::Token::COMMA);
if (num_decl != NULL) *num_decl = nvars;
- return kUnknownStatement;
+ return Statement::Default();
}
-PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
- bool* ok) {
+PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
// ExpressionStatement | LabelledStatement ::
// Expression ';'
// Identifier ':' Statement
Expression expr = ParseExpression(true, CHECK_OK);
- if (peek() == i::Token::COLON && expr == kIdentifierExpression) {
- Consume(i::Token::COLON);
- return ParseStatement(ok);
+ if (expr.IsRawIdentifier()) {
+ if (peek() == i::Token::COLON &&
+ (!strict_mode() || !expr.AsIdentifier().IsFutureReserved())) {
+ Consume(i::Token::COLON);
+ i::Scanner::Location start_location = scanner_->peek_location();
+ Statement statement = ParseStatement(CHECK_OK);
+ if (strict_mode() && statement.IsFunctionDeclaration()) {
+ i::Scanner::Location end_location = scanner_->location();
+ ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+ "strict_function", NULL);
+ *ok = false;
+ }
+ return Statement::Default();
+ }
+ // Preparsing is disabled for extensions (because the extension details
+ // aren't passed to lazily compiled functions), so we don't
+ // accept "native function" in the preparser.
}
// Parsed expression statement.
ExpectSemicolon(CHECK_OK);
- return kUnknownStatement;
+ return Statement::ExpressionStatement(expr);
}
@@ -320,7 +378,7 @@ PreParser::Statement PreParser::ParseIfStatement(bool* ok) {
Next();
ParseStatement(CHECK_OK);
}
- return kUnknownStatement;
+ return Statement::Default();
}
@@ -330,14 +388,14 @@ PreParser::Statement PreParser::ParseContinueStatement(bool* ok) {
Expect(i::Token::CONTINUE, CHECK_OK);
i::Token::Value tok = peek();
- if (!scanner_->has_line_terminator_before_next() &&
+ if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
tok != i::Token::SEMICOLON &&
tok != i::Token::RBRACE &&
tok != i::Token::EOS) {
ParseIdentifier(CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
- return kUnknownStatement;
+ return Statement::Default();
}
@@ -347,14 +405,14 @@ PreParser::Statement PreParser::ParseBreakStatement(bool* ok) {
Expect(i::Token::BREAK, CHECK_OK);
i::Token::Value tok = peek();
- if (!scanner_->has_line_terminator_before_next() &&
+ if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
tok != i::Token::SEMICOLON &&
tok != i::Token::RBRACE &&
tok != i::Token::EOS) {
ParseIdentifier(CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
- return kUnknownStatement;
+ return Statement::Default();
}
@@ -373,14 +431,14 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
// This is not handled during preparsing.
i::Token::Value tok = peek();
- if (!scanner_->has_line_terminator_before_next() &&
+ if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
tok != i::Token::SEMICOLON &&
tok != i::Token::RBRACE &&
tok != i::Token::EOS) {
ParseExpression(true, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
- return kUnknownStatement;
+ return Statement::Default();
}
@@ -388,6 +446,13 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
Expect(i::Token::WITH, CHECK_OK);
+ if (strict_mode()) {
+ i::Scanner::Location location = scanner_->location();
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "strict_mode_with", NULL);
+ *ok = false;
+ return Statement::Default();
+ }
Expect(i::Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, CHECK_OK);
@@ -395,7 +460,7 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
scope_->EnterWith();
ParseStatement(CHECK_OK);
scope_->LeaveWith();
- return kUnknownStatement;
+ return Statement::Default();
}
@@ -419,13 +484,20 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
Expect(i::Token::DEFAULT, CHECK_OK);
Expect(i::Token::COLON, CHECK_OK);
} else {
- ParseStatement(CHECK_OK);
+ i::Scanner::Location start_location = scanner_->peek_location();
+ Statement statement = ParseStatement(CHECK_OK);
+ if (strict_mode() && statement.IsFunctionDeclaration()) {
+ i::Scanner::Location end_location = scanner_->location();
+ ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+ "strict_function", NULL);
+ *ok = false;
+ return Statement::Default();
+ }
}
token = peek();
}
- Expect(i::Token::RBRACE, CHECK_OK);
-
- return kUnknownStatement;
+ Expect(i::Token::RBRACE, ok);
+ return Statement::Default();
}
@@ -438,8 +510,8 @@ PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
Expect(i::Token::WHILE, CHECK_OK);
Expect(i::Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
- return kUnknownStatement;
+ Expect(i::Token::RPAREN, ok);
+ return Statement::Default();
}
@@ -451,8 +523,8 @@ PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
Expect(i::Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, CHECK_OK);
- ParseStatement(CHECK_OK);
- return kUnknownStatement;
+ ParseStatement(ok);
+ return Statement::Default();
}
@@ -472,7 +544,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
Expect(i::Token::RPAREN, CHECK_OK);
ParseStatement(CHECK_OK);
- return kUnknownStatement;
+ return Statement::Default();
}
} else {
ParseExpression(false, CHECK_OK);
@@ -482,7 +554,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
Expect(i::Token::RPAREN, CHECK_OK);
ParseStatement(CHECK_OK);
- return kUnknownStatement;
+ return Statement::Default();
}
}
}
@@ -500,8 +572,8 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
Expect(i::Token::RPAREN, CHECK_OK);
- ParseStatement(CHECK_OK);
- return kUnknownStatement;
+ ParseStatement(ok);
+ return Statement::Default();
}
@@ -510,17 +582,16 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
// 'throw' [no line terminator] Expression ';'
Expect(i::Token::THROW, CHECK_OK);
- if (scanner_->has_line_terminator_before_next()) {
+ if (scanner_->HasAnyLineTerminatorBeforeNext()) {
i::JavaScriptScanner::Location pos = scanner_->location();
ReportMessageAt(pos.beg_pos, pos.end_pos,
"newline_after_throw", NULL);
*ok = false;
- return kUnknownStatement;
+ return Statement::Default();
}
ParseExpression(true, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
-
- return kUnknownStatement;
+ ExpectSemicolon(ok);
+ return Statement::Default();
}
@@ -547,12 +618,19 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
if (peek() == i::Token::CATCH) {
Consume(i::Token::CATCH);
Expect(i::Token::LPAREN, CHECK_OK);
- ParseIdentifier(CHECK_OK);
+ Identifier id = ParseIdentifier(CHECK_OK);
+ if (strict_mode() && !id.IsValidStrictVariable()) {
+ StrictModeIdentifierViolation(scanner_->location(),
+ "strict_catch_variable",
+ id,
+ ok);
+ return Statement::Default();
+ }
Expect(i::Token::RPAREN, CHECK_OK);
scope_->EnterWith();
ParseBlock(ok);
scope_->LeaveWith();
- if (!*ok) return kUnknownStatement;
+ if (!*ok) Statement::Default();
catch_or_finally_seen = true;
}
if (peek() == i::Token::FINALLY) {
@@ -563,7 +641,7 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
if (!catch_or_finally_seen) {
*ok = false;
}
- return kUnknownStatement;
+ return Statement::Default();
}
@@ -575,11 +653,19 @@ PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
// 'debugger' ';'
Expect(i::Token::DEBUGGER, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return kUnknownStatement;
+ ExpectSemicolon(ok);
+ return Statement::Default();
}
+#undef CHECK_OK
+#define CHECK_OK ok); \
+ if (!*ok) return Expression::Default(); \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+
// Precedence = 1
PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
// Expression ::
@@ -590,7 +676,7 @@ PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
while (peek() == i::Token::COMMA) {
Expect(i::Token::COMMA, CHECK_OK);
ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = kUnknownExpression;
+ result = Expression::Default();
}
return result;
}
@@ -603,6 +689,7 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
// ConditionalExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
+ i::Scanner::Location before = scanner_->peek_location();
Expression expression = ParseConditionalExpression(accept_IN, CHECK_OK);
if (!i::Token::IsAssignmentOp(peek())) {
@@ -610,14 +697,23 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
return expression;
}
+ if (strict_mode() && expression.IsIdentifier() &&
+ expression.AsIdentifier().IsEvalOrArguments()) {
+ i::Scanner::Location after = scanner_->location();
+ ReportMessageAt(before.beg_pos, after.end_pos,
+ "strict_lhs_assignment", NULL);
+ *ok = false;
+ return Expression::Default();
+ }
+
i::Token::Value op = Next(); // Get assignment operator.
ParseAssignmentExpression(accept_IN, CHECK_OK);
- if ((op == i::Token::ASSIGN) && (expression == kThisPropertyExpression)) {
+ if ((op == i::Token::ASSIGN) && expression.IsThisProperty()) {
scope_->AddProperty();
}
- return kUnknownExpression;
+ return Expression::Default();
}
@@ -638,7 +734,7 @@ PreParser::Expression PreParser::ParseConditionalExpression(bool accept_IN,
ParseAssignmentExpression(true, CHECK_OK);
Expect(i::Token::COLON, CHECK_OK);
ParseAssignmentExpression(accept_IN, CHECK_OK);
- return kUnknownExpression;
+ return Expression::Default();
}
@@ -660,7 +756,7 @@ PreParser::Expression PreParser::ParseBinaryExpression(int prec,
while (Precedence(peek(), accept_IN) == prec1) {
Next();
ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
- result = kUnknownExpression;
+ result = Expression::Default();
}
}
return result;
@@ -681,10 +777,22 @@ PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
// '!' UnaryExpression
i::Token::Value op = peek();
- if (i::Token::IsUnaryOp(op) || i::Token::IsCountOp(op)) {
+ if (i::Token::IsUnaryOp(op)) {
op = Next();
ParseUnaryExpression(ok);
- return kUnknownExpression;
+ return Expression::Default();
+ } else if (i::Token::IsCountOp(op)) {
+ op = Next();
+ i::Scanner::Location before = scanner_->peek_location();
+ Expression expression = ParseUnaryExpression(CHECK_OK);
+ if (strict_mode() && expression.IsIdentifier() &&
+ expression.AsIdentifier().IsEvalOrArguments()) {
+ i::Scanner::Location after = scanner_->location();
+ ReportMessageAt(before.beg_pos, after.end_pos,
+ "strict_lhs_prefix", NULL);
+ *ok = false;
+ }
+ return Expression::Default();
} else {
return ParsePostfixExpression(ok);
}
@@ -695,11 +803,20 @@ PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
// PostfixExpression ::
// LeftHandSideExpression ('++' | '--')?
+ i::Scanner::Location before = scanner_->peek_location();
Expression expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner_->has_line_terminator_before_next() &&
+ if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
i::Token::IsCountOp(peek())) {
+ if (strict_mode() && expression.IsIdentifier() &&
+ expression.AsIdentifier().IsEvalOrArguments()) {
+ i::Scanner::Location after = scanner_->location();
+ ReportMessageAt(before.beg_pos, after.end_pos,
+ "strict_lhs_postfix", NULL);
+ *ok = false;
+ return Expression::Default();
+ }
Next();
- return kUnknownExpression;
+ return Expression::Default();
}
return expression;
}
@@ -709,7 +826,7 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
// LeftHandSideExpression ::
// (NewExpression | MemberExpression) ...
- Expression result;
+ Expression result = Expression::Default();
if (peek() == i::Token::NEW) {
result = ParseNewExpression(CHECK_OK);
} else {
@@ -722,27 +839,27 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
Consume(i::Token::LBRACK);
ParseExpression(true, CHECK_OK);
Expect(i::Token::RBRACK, CHECK_OK);
- if (result == kThisExpression) {
- result = kThisPropertyExpression;
+ if (result.IsThis()) {
+ result = Expression::ThisProperty();
} else {
- result = kUnknownExpression;
+ result = Expression::Default();
}
break;
}
case i::Token::LPAREN: {
ParseArguments(CHECK_OK);
- result = kUnknownExpression;
+ result = Expression::Default();
break;
}
case i::Token::PERIOD: {
Consume(i::Token::PERIOD);
ParseIdentifierName(CHECK_OK);
- if (result == kThisExpression) {
- result = kThisPropertyExpression;
+ if (result.IsThis()) {
+ result = Expression::ThisProperty();
} else {
- result = kUnknownExpression;
+ result = Expression::Default();
}
break;
}
@@ -788,13 +905,21 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
// ('[' Expression ']' | '.' Identifier | Arguments)*
// Parse the initial primary or function expression.
- Expression result = kUnknownExpression;
+ Expression result = Expression::Default();
if (peek() == i::Token::FUNCTION) {
Consume(i::Token::FUNCTION);
+ Identifier identifier = Identifier::Default();
if (peek_any_identifier()) {
- ParseIdentifier(CHECK_OK);
+ identifier = ParseIdentifier(CHECK_OK);
}
result = ParseFunctionLiteral(CHECK_OK);
+ if (result.IsStrictFunction() && !identifier.IsValidStrictVariable()) {
+ StrictModeIdentifierViolation(scanner_->location(),
+ "strict_function_name",
+ identifier,
+ ok);
+ return Expression::Default();
+ }
} else {
result = ParsePrimaryExpression(CHECK_OK);
}
@@ -805,20 +930,20 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
Consume(i::Token::LBRACK);
ParseExpression(true, CHECK_OK);
Expect(i::Token::RBRACK, CHECK_OK);
- if (result == kThisExpression) {
- result = kThisPropertyExpression;
+ if (result.IsThis()) {
+ result = Expression::ThisProperty();
} else {
- result = kUnknownExpression;
+ result = Expression::Default();
}
break;
}
case i::Token::PERIOD: {
Consume(i::Token::PERIOD);
ParseIdentifierName(CHECK_OK);
- if (result == kThisExpression) {
- result = kThisPropertyExpression;
+ if (result.IsThis()) {
+ result = Expression::ThisProperty();
} else {
- result = kUnknownExpression;
+ result = Expression::Default();
}
break;
}
@@ -827,7 +952,7 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
// Consume one of the new prefixes (already parsed).
ParseArguments(CHECK_OK);
new_count--;
- result = kUnknownExpression;
+ result = Expression::Default();
break;
}
default:
@@ -851,18 +976,36 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
// RegExpLiteral
// '(' Expression ')'
- Expression result = kUnknownExpression;
+ Expression result = Expression::Default();
switch (peek()) {
case i::Token::THIS: {
Next();
- result = kThisExpression;
+ result = Expression::This();
break;
}
- case i::Token::IDENTIFIER:
case i::Token::FUTURE_RESERVED_WORD: {
- ParseIdentifier(CHECK_OK);
- result = kIdentifierExpression;
+ Next();
+ i::Scanner::Location location = scanner_->location();
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "reserved_word", NULL);
+ *ok = false;
+ return Expression::Default();
+ }
+
+ case i::Token::FUTURE_STRICT_RESERVED_WORD:
+ if (strict_mode()) {
+ Next();
+ i::Scanner::Location location = scanner_->location();
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "strict_reserved_word", NULL);
+ *ok = false;
+ return Expression::Default();
+ }
+ // FALLTHROUGH
+ case i::Token::IDENTIFIER: {
+ Identifier id = ParseIdentifier(CHECK_OK);
+ result = Expression::FromIdentifier(id);
break;
}
@@ -900,7 +1043,7 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
parenthesized_function_ = (peek() == i::Token::FUNCTION);
result = ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, CHECK_OK);
- if (result == kIdentifierExpression) result = kUnknownExpression;
+ result = result.Parenthesize();
break;
case i::Token::MOD:
@@ -910,7 +1053,7 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
default: {
Next();
*ok = false;
- return kUnknownExpression;
+ return Expression::Default();
}
}
@@ -933,7 +1076,7 @@ PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) {
Expect(i::Token::RBRACK, CHECK_OK);
scope_->NextMaterializedLiteralIndex();
- return kUnknownExpression;
+ return Expression::Default();
}
@@ -949,20 +1092,22 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
i::Token::Value next = peek();
switch (next) {
case i::Token::IDENTIFIER:
- case i::Token::FUTURE_RESERVED_WORD: {
+ case i::Token::FUTURE_RESERVED_WORD:
+ case i::Token::FUTURE_STRICT_RESERVED_WORD: {
bool is_getter = false;
bool is_setter = false;
- ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+ ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
if ((is_getter || is_setter) && peek() != i::Token::COLON) {
i::Token::Value name = Next();
bool is_keyword = i::Token::IsKeyword(name);
if (name != i::Token::IDENTIFIER &&
name != i::Token::FUTURE_RESERVED_WORD &&
+ name != i::Token::FUTURE_STRICT_RESERVED_WORD &&
name != i::Token::NUMBER &&
name != i::Token::STRING &&
!is_keyword) {
*ok = false;
- return kUnknownExpression;
+ return Expression::Default();
}
if (!is_keyword) {
LogSymbol();
@@ -988,7 +1133,7 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
} else {
// Unexpected token.
*ok = false;
- return kUnknownExpression;
+ return Expression::Default();
}
}
@@ -1001,7 +1146,7 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
Expect(i::Token::RBRACE, CHECK_OK);
scope_->NextMaterializedLiteralIndex();
- return kUnknownExpression;
+ return Expression::Default();
}
@@ -1013,7 +1158,7 @@ PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
ReportMessageAt(location.beg_pos, location.end_pos,
"unterminated_regexp", NULL);
*ok = false;
- return kUnknownExpression;
+ return Expression::Default();
}
scope_->NextMaterializedLiteralIndex();
@@ -1024,10 +1169,10 @@ PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
ReportMessageAt(location.beg_pos, location.end_pos,
"invalid_regexp_flags", NULL);
*ok = false;
- return kUnknownExpression;
+ return Expression::Default();
}
Next();
- return kUnknownExpression;
+ return Expression::Default();
}
@@ -1035,16 +1180,21 @@ PreParser::Arguments PreParser::ParseArguments(bool* ok) {
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(i::Token::LPAREN, ok);
+ if (!*ok) return -1;
bool done = (peek() == i::Token::RPAREN);
int argc = 0;
while (!done) {
- ParseAssignmentExpression(true, CHECK_OK);
+ ParseAssignmentExpression(true, ok);
+ if (!*ok) return -1;
argc++;
done = (peek() == i::Token::RPAREN);
- if (!done) Expect(i::Token::COMMA, CHECK_OK);
+ if (!done) {
+ Expect(i::Token::COMMA, ok);
+ if (!*ok) return -1;
+ }
}
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(i::Token::RPAREN, ok);
return argc;
}
@@ -1057,13 +1207,19 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
ScopeType outer_scope_type = scope_->type();
bool inside_with = scope_->IsInsideWith();
Scope function_scope(&scope_, kFunctionScope);
-
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(i::Token::LPAREN, CHECK_OK);
+ int start_position = scanner_->location().beg_pos;
bool done = (peek() == i::Token::RPAREN);
while (!done) {
- ParseIdentifier(CHECK_OK);
+ Identifier id = ParseIdentifier(CHECK_OK);
+ if (!id.IsValidStrictVariable()) {
+ StrictModeIdentifierViolation(scanner_->location(),
+ "strict_param_name",
+ id,
+ CHECK_OK);
+ }
done = (peek() == i::Token::RPAREN);
if (!done) {
Expect(i::Token::COMMA, CHECK_OK);
@@ -1086,7 +1242,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
log_->PauseRecording();
ParseSourceElements(i::Token::RBRACE, ok);
log_->ResumeRecording();
- if (!*ok) return kUnknownExpression;
+ if (!*ok) Expression::Default();
Expect(i::Token::RBRACE, CHECK_OK);
@@ -1094,12 +1250,21 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
int end_pos = scanner_->location().end_pos;
log_->LogFunction(function_block_pos, end_pos,
function_scope.materialized_literal_count(),
- function_scope.expected_properties());
+ function_scope.expected_properties(),
+ strict_mode() ? 1 : 0);
} else {
ParseSourceElements(i::Token::RBRACE, CHECK_OK);
Expect(i::Token::RBRACE, CHECK_OK);
}
- return kUnknownExpression;
+
+ if (strict_mode()) {
+ int end_position = scanner_->location().end_pos;
+ CheckOctalLiteral(start_position, end_position, CHECK_OK);
+ CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
+ return Expression::StrictFunction();
+ }
+
+ return Expression::Default();
}
@@ -1109,11 +1274,13 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
Expect(i::Token::MOD, CHECK_OK);
ParseIdentifier(CHECK_OK);
- ParseArguments(CHECK_OK);
+ ParseArguments(ok);
- return kUnknownExpression;
+ return Expression::Default();
}
+#undef CHECK_OK
+
void PreParser::ExpectSemicolon(bool* ok) {
// Check for automatic semicolon insertion according to
@@ -1123,7 +1290,7 @@ void PreParser::ExpectSemicolon(bool* ok) {
Next();
return;
}
- if (scanner_->has_line_terminator_before_next() ||
+ if (scanner_->HasAnyLineTerminatorBeforeNext() ||
tok == i::Token::RBRACE ||
tok == i::Token::EOS) {
return;
@@ -1142,24 +1309,111 @@ void PreParser::LogSymbol() {
}
-PreParser::Identifier PreParser::GetIdentifierSymbol() {
+PreParser::Expression PreParser::GetStringSymbol() {
+ const int kUseStrictLength = 10;
+ const char* kUseStrictChars = "use strict";
LogSymbol();
- return kUnknownIdentifier;
+ if (scanner_->is_literal_ascii() &&
+ scanner_->literal_length() == kUseStrictLength &&
+ !scanner_->literal_contains_escapes() &&
+ !strncmp(scanner_->literal_ascii_string().start(), kUseStrictChars,
+ kUseStrictLength)) {
+ return Expression::UseStrictStringLiteral();
+ }
+ return Expression::StringLiteral();
}
-PreParser::Expression PreParser::GetStringSymbol() {
+PreParser::Identifier PreParser::GetIdentifierSymbol() {
LogSymbol();
- return kUnknownExpression;
+ if (scanner_->current_token() == i::Token::FUTURE_RESERVED_WORD) {
+ return Identifier::FutureReserved();
+ } else if (scanner_->current_token() ==
+ i::Token::FUTURE_STRICT_RESERVED_WORD) {
+ return Identifier::FutureStrictReserved();
+ }
+ if (scanner_->is_literal_ascii()) {
+ // Detect strict-mode poison words.
+ if (scanner_->literal_length() == 4 &&
+ !strncmp(scanner_->literal_ascii_string().start(), "eval", 4)) {
+ return Identifier::Eval();
+ }
+ if (scanner_->literal_length() == 9 &&
+ !strncmp(scanner_->literal_ascii_string().start(), "arguments", 9)) {
+ return Identifier::Arguments();
+ }
+ }
+ return Identifier::Default();
}
PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
- if (!Check(i::Token::FUTURE_RESERVED_WORD)) {
- Expect(i::Token::IDENTIFIER, ok);
+ i::Token::Value next = Next();
+ switch (next) {
+ case i::Token::FUTURE_RESERVED_WORD: {
+ i::Scanner::Location location = scanner_->location();
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "reserved_word", NULL);
+ *ok = false;
+ }
+ // FALLTHROUGH
+ case i::Token::FUTURE_STRICT_RESERVED_WORD:
+ case i::Token::IDENTIFIER:
+ return GetIdentifierSymbol();
+ default:
+ *ok = false;
+ return Identifier::Default();
+ }
+}
+
+
+void PreParser::SetStrictModeViolation(i::Scanner::Location location,
+ const char* type,
+ bool* ok) {
+ if (strict_mode()) {
+ ReportMessageAt(location.beg_pos, location.end_pos, type, NULL);
+ *ok = false;
+ return;
}
- if (!*ok) return kUnknownIdentifier;
- return GetIdentifierSymbol();
+ // Delay report in case this later turns out to be strict code
+ // (i.e., for function names and parameters prior to a "use strict"
+ // directive).
+ strict_mode_violation_location_ = location;
+ strict_mode_violation_type_ = type;
+}
+
+
+void PreParser::CheckDelayedStrictModeViolation(int beg_pos,
+ int end_pos,
+ bool* ok) {
+ i::Scanner::Location location = strict_mode_violation_location_;
+ if (location.IsValid() &&
+ location.beg_pos > beg_pos && location.end_pos < end_pos) {
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ strict_mode_violation_type_, NULL);
+ *ok = false;
+ }
+ strict_mode_violation_location_ = i::Scanner::Location::invalid();
+}
+
+
+void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location,
+ const char* eval_args_type,
+ Identifier identifier,
+ bool* ok) {
+ const char* type = eval_args_type;
+ if (identifier.IsFutureReserved()) {
+ type = "reserved_word";
+ } else if (identifier.IsFutureStrictReserved()) {
+ type = "strict_reserved_word";
+ }
+ if (strict_mode()) {
+ ReportMessageAt(location.beg_pos, location.end_pos, type, NULL);
+ *ok = false;
+ return;
+ }
+ strict_mode_violation_location_ = location;
+ strict_mode_violation_type_ = type;
}
@@ -1170,24 +1424,29 @@ PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
const char* keyword = i::Token::String(next);
log_->LogAsciiSymbol(pos, i::Vector<const char>(keyword,
i::StrLength(keyword)));
- return kUnknownExpression;
+ return Identifier::Default();
}
if (next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD) {
+ next == i::Token::FUTURE_RESERVED_WORD ||
+ next == i::Token::FUTURE_STRICT_RESERVED_WORD) {
return GetIdentifierSymbol();
}
*ok = false;
- return kUnknownIdentifier;
+ return Identifier::Default();
}
+#undef CHECK_OK
+
// This function reads an identifier and determines whether or not it
// is 'get' or 'set'.
-PreParser::Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- PreParser::Identifier result = ParseIdentifier(CHECK_OK);
- if (scanner_->is_literal_ascii() && scanner_->literal_length() == 3) {
+PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok) {
+ Identifier result = ParseIdentifierName(ok);
+ if (!*ok) return Identifier::Default();
+ if (scanner_->is_literal_ascii() &&
+ scanner_->literal_length() == 3) {
const char* token = scanner_->literal_ascii_string().start();
*is_get = strncmp(token, "get", 3) == 0;
*is_set = !*is_get && strncmp(token, "set", 3) == 0;
@@ -1198,8 +1457,7 @@ PreParser::Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get,
bool PreParser::peek_any_identifier() {
i::Token::Value next = peek();
return next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD;
+ next == i::Token::FUTURE_RESERVED_WORD ||
+ next == i::Token::FUTURE_STRICT_RESERVED_WORD;
}
-
-#undef CHECK_OK
} } // v8::preparser
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index b7fa6c73b..3d72c97e2 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,7 +33,7 @@ namespace preparser {
// Preparsing checks a JavaScript program and emits preparse-data that helps
// a later parsing to be faster.
-// See preparse-data.h for the data.
+// See preparse-data-format.h for the data format.
// The PreParser checks that the syntax follows the grammar for JavaScript,
// and collects some information about the program along the way.
@@ -67,41 +67,234 @@ class PreParser {
}
private:
+ // These types form an algebra over syntactic categories that is just
+ // rich enough to let us recognize and propagate the constructs that
+ // are either being counted in the preparser data, or is important
+ // to throw the correct syntax error exceptions.
+
enum ScopeType {
kTopLevelScope,
kFunctionScope
};
- // Types that allow us to recognize simple this-property assignments.
- // A simple this-property assignment is a statement on the form
- // "this.propertyName = {primitive constant or function parameter name);"
- // where propertyName isn't "__proto__".
- // The result is only relevant if the function body contains only
- // simple this-property assignments.
+ class Expression;
+
+ class Identifier {
+ public:
+ static Identifier Default() {
+ return Identifier(kUnknownIdentifier);
+ }
+ static Identifier Eval() {
+ return Identifier(kEvalIdentifier);
+ }
+ static Identifier Arguments() {
+ return Identifier(kArgumentsIdentifier);
+ }
+ static Identifier FutureReserved() {
+ return Identifier(kFutureReservedIdentifier);
+ }
+ static Identifier FutureStrictReserved() {
+ return Identifier(kFutureStrictReservedIdentifier);
+ }
+ bool IsEval() { return type_ == kEvalIdentifier; }
+ bool IsArguments() { return type_ == kArgumentsIdentifier; }
+ bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
+ bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
+ bool IsFutureStrictReserved() {
+ return type_ == kFutureStrictReservedIdentifier;
+ }
+ bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
- enum StatementType {
- kUnknownStatement
+ private:
+ enum Type {
+ kUnknownIdentifier,
+ kFutureReservedIdentifier,
+ kFutureStrictReservedIdentifier,
+ kEvalIdentifier,
+ kArgumentsIdentifier
+ };
+ explicit Identifier(Type type) : type_(type) { }
+ Type type_;
+
+ friend class Expression;
};
- enum ExpressionType {
- kUnknownExpression,
- kIdentifierExpression, // Used to detect labels.
- kThisExpression,
- kThisPropertyExpression
+ // Bits 0 and 1 are used to identify the type of expression:
+ // If bit 0 is set, it's an identifier.
+ // if bit 1 is set, it's a string literal.
+ // If neither is set, it's no particular type, and both set isn't
+ // use yet.
+ // Bit 2 is used to mark the expression as being parenthesized,
+ // so "(foo)" isn't recognized as a pure identifier (and possible label).
+ class Expression {
+ public:
+ static Expression Default() {
+ return Expression(kUnknownExpression);
+ }
+
+ static Expression FromIdentifier(Identifier id) {
+ return Expression(kIdentifierFlag | (id.type_ << kIdentifierShift));
+ }
+
+ static Expression StringLiteral() {
+ return Expression(kUnknownStringLiteral);
+ }
+
+ static Expression UseStrictStringLiteral() {
+ return Expression(kUseStrictString);
+ }
+
+ static Expression This() {
+ return Expression(kThisExpression);
+ }
+
+ static Expression ThisProperty() {
+ return Expression(kThisPropertyExpression);
+ }
+
+ static Expression StrictFunction() {
+ return Expression(kStrictFunctionExpression);
+ }
+
+ bool IsIdentifier() {
+ return (code_ & kIdentifierFlag) != 0;
+ }
+
+ // Only works corretly if it is actually an identifier expression.
+ PreParser::Identifier AsIdentifier() {
+ return PreParser::Identifier(
+ static_cast<PreParser::Identifier::Type>(code_ >> kIdentifierShift));
+ }
+
+ bool IsParenthesized() {
+ // If bit 0 or 1 is set, we interpret bit 2 as meaning parenthesized.
+ return (code_ & 7) > 4;
+ }
+
+ bool IsRawIdentifier() {
+ return !IsParenthesized() && IsIdentifier();
+ }
+
+ bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
+
+ bool IsRawStringLiteral() {
+ return !IsParenthesized() && IsStringLiteral();
+ }
+
+ bool IsUseStrictLiteral() {
+ return (code_ & kStringLiteralMask) == kUseStrictString;
+ }
+
+ bool IsThis() {
+ return code_ == kThisExpression;
+ }
+
+ bool IsThisProperty() {
+ return code_ == kThisPropertyExpression;
+ }
+
+ bool IsStrictFunction() {
+ return code_ == kStrictFunctionExpression;
+ }
+
+ Expression Parenthesize() {
+ int type = code_ & 3;
+ if (type != 0) {
+ // Identifiers and string literals can be parenthesized.
+ // They no longer work as labels or directive prologues,
+ // but are still recognized in other contexts.
+ return Expression(code_ | kParentesizedExpressionFlag);
+ }
+ // For other types of expressions, it's not important to remember
+ // the parentheses.
+ return *this;
+ }
+
+ private:
+ // First two/three bits are used as flags.
+ // Bit 0 and 1 represent identifiers or strings literals, and are
+ // mutually exclusive, but can both be absent.
+ // If bit 0 or 1 are set, bit 2 marks that the expression has
+ // been wrapped in parentheses (a string literal can no longer
+ // be a directive prologue, and an identifier can no longer be
+ // a label.
+ enum {
+ kUnknownExpression = 0,
+ // Identifiers
+ kIdentifierFlag = 1, // Used to detect labels.
+ kIdentifierShift = 3,
+
+ kStringLiteralFlag = 2, // Used to detect directive prologue.
+ kUnknownStringLiteral = kStringLiteralFlag,
+ kUseStrictString = kStringLiteralFlag | 8,
+ kStringLiteralMask = kUseStrictString,
+
+ kParentesizedExpressionFlag = 4, // Only if identifier or string literal.
+
+ // Below here applies if neither identifier nor string literal.
+ kThisExpression = 4,
+ kThisPropertyExpression = 8,
+ kStrictFunctionExpression = 12
+ };
+
+ explicit Expression(int expression_code) : code_(expression_code) { }
+
+ int code_;
};
- enum IdentifierType {
- kUnknownIdentifier
+ class Statement {
+ public:
+ static Statement Default() {
+ return Statement(kUnknownStatement);
+ }
+
+ static Statement FunctionDeclaration() {
+ return Statement(kFunctionDeclaration);
+ }
+
+ // Creates expression statement from expression.
+ // Preserves being an unparenthesized string literal, possibly
+ // "use strict".
+ static Statement ExpressionStatement(Expression expression) {
+ if (!expression.IsParenthesized()) {
+ if (expression.IsUseStrictLiteral()) {
+ return Statement(kUseStrictExpressionStatement);
+ }
+ if (expression.IsStringLiteral()) {
+ return Statement(kStringLiteralExpressionStatement);
+ }
+ }
+ return Default();
+ }
+
+ bool IsStringLiteral() {
+ return code_ != kUnknownStatement;
+ }
+
+ bool IsUseStrictLiteral() {
+ return code_ == kUseStrictExpressionStatement;
+ }
+
+ bool IsFunctionDeclaration() {
+ return code_ == kFunctionDeclaration;
+ }
+
+ private:
+ enum Type {
+ kUnknownStatement,
+ kStringLiteralExpressionStatement,
+ kUseStrictExpressionStatement,
+ kFunctionDeclaration
+ };
+
+ explicit Statement(Type code) : code_(code) {}
+ Type code_;
};
- enum SourceElementTypes {
+ enum SourceElements {
kUnknownSourceElements
};
- typedef int SourceElements;
- typedef int Expression;
- typedef int Statement;
- typedef int Identifier;
typedef int Arguments;
class Scope {
@@ -112,7 +305,8 @@ class PreParser {
type_(type),
materialized_literal_count_(0),
expected_properties_(0),
- with_nesting_count_(0) {
+ with_nesting_count_(0),
+ strict_((prev_ != NULL) && prev_->is_strict()) {
*variable = this;
}
~Scope() { *variable_ = prev_; }
@@ -122,6 +316,8 @@ class PreParser {
int expected_properties() { return expected_properties_; }
int materialized_literal_count() { return materialized_literal_count_; }
bool IsInsideWith() { return with_nesting_count_ != 0; }
+ bool is_strict() { return strict_; }
+ void set_strict() { strict_ = true; }
void EnterWith() { with_nesting_count_++; }
void LeaveWith() { with_nesting_count_--; }
@@ -132,6 +328,7 @@ class PreParser {
int materialized_literal_count_;
int expected_properties_;
int with_nesting_count_;
+ bool strict_;
};
// Private constructor only used in PreParseProgram.
@@ -143,6 +340,8 @@ class PreParser {
log_(log),
scope_(NULL),
stack_limit_(stack_limit),
+ strict_mode_violation_location_(i::Scanner::Location::invalid()),
+ strict_mode_violation_type_(NULL),
stack_overflow_(false),
allow_lazy_(true),
parenthesized_function_(false) { }
@@ -152,10 +351,13 @@ class PreParser {
PreParseResult PreParse() {
Scope top_scope(&scope_, kTopLevelScope);
bool ok = true;
+ int start_position = scanner_->peek_location().beg_pos;
ParseSourceElements(i::Token::EOS, &ok);
if (stack_overflow_) return kPreParseStackOverflow;
if (!ok) {
ReportUnexpectedToken(scanner_->current_token());
+ } else if (scope_->is_strict()) {
+ CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
}
return kPreParseSuccess;
}
@@ -169,6 +371,8 @@ class PreParser {
log_->LogMessage(start_pos, end_pos, type, name_opt);
}
+ void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
+
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -176,7 +380,6 @@ class PreParser {
SourceElements ParseSourceElements(int end_token, bool* ok);
Statement ParseStatement(bool* ok);
Statement ParseFunctionDeclaration(bool* ok);
- Statement ParseNativeDeclaration(bool* ok);
Statement ParseBlock(bool* ok);
Statement ParseVariableStatement(bool* ok);
Statement ParseVariableDeclarations(bool accept_IN, int* num_decl, bool* ok);
@@ -215,7 +418,9 @@ class PreParser {
Identifier ParseIdentifier(bool* ok);
Identifier ParseIdentifierName(bool* ok);
- Identifier ParseIdentifierOrGetOrSet(bool* is_get, bool* is_set, bool* ok);
+ Identifier ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok);
// Logs the currently parsed literal as a symbol in the preparser data.
void LogSymbol();
@@ -245,6 +450,12 @@ class PreParser {
bool peek_any_identifier();
+ void set_strict_mode() {
+ scope_->set_strict();
+ }
+
+ bool strict_mode() { return scope_->is_strict(); }
+
void Consume(i::Token::Value token) { Next(); }
void Expect(i::Token::Value token, bool* ok) {
@@ -265,10 +476,23 @@ class PreParser {
static int Precedence(i::Token::Value tok, bool accept_IN);
+ void SetStrictModeViolation(i::Scanner::Location,
+ const char* type,
+ bool *ok);
+
+ void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
+
+ void StrictModeIdentifierViolation(i::Scanner::Location,
+ const char* eval_args_type,
+ Identifier identifier,
+ bool* ok);
+
i::JavaScriptScanner* scanner_;
i::ParserRecorder* log_;
Scope* scope_;
uintptr_t stack_limit_;
+ i::Scanner::Location strict_mode_violation_location_;
+ const char* strict_mode_violation_type_;
bool stack_overflow_;
bool allow_lazy_;
bool parenthesized_function_;
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index dda7abbb3..cd38d1334 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -123,15 +123,16 @@ void PrettyPrinter::VisitReturnStatement(ReturnStatement* node) {
}
-void PrettyPrinter::VisitWithEnterStatement(WithEnterStatement* node) {
- Print("<enter with> (");
+void PrettyPrinter::VisitEnterWithContextStatement(
+ EnterWithContextStatement* node) {
+ Print("<enter with context> (");
Visit(node->expression());
Print(") ");
}
-void PrettyPrinter::VisitWithExitStatement(WithExitStatement* node) {
- Print("<exit with>");
+void PrettyPrinter::VisitExitContextStatement(ExitContextStatement* node) {
+ Print("<exit context>");
}
@@ -201,7 +202,8 @@ void PrettyPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
Print("try ");
Visit(node->try_block());
Print(" catch (");
- Visit(node->catch_var());
+ const bool quote = false;
+ PrintLiteral(node->name(), quote);
Print(") ");
Visit(node->catch_block());
}
@@ -282,15 +284,6 @@ void PrettyPrinter::VisitArrayLiteral(ArrayLiteral* node) {
}
-void PrettyPrinter::VisitCatchExtensionObject(CatchExtensionObject* node) {
- Print("{ ");
- Visit(node->key());
- Print(": ");
- Visit(node->value());
- Print(" }");
-}
-
-
void PrettyPrinter::VisitSlot(Slot* node) {
switch (node->type()) {
case Slot::PARAMETER:
@@ -370,17 +363,15 @@ void PrettyPrinter::VisitCallRuntime(CallRuntime* node) {
void PrettyPrinter::VisitUnaryOperation(UnaryOperation* node) {
- Print("(%s", Token::String(node->op()));
+ Token::Value op = node->op();
+ bool needsSpace =
+ op == Token::DELETE || op == Token::TYPEOF || op == Token::VOID;
+ Print("(%s%s", Token::String(op), needsSpace ? " " : "");
Visit(node->expression());
Print(")");
}
-void PrettyPrinter::VisitIncrementOperation(IncrementOperation* node) {
- UNREACHABLE();
-}
-
-
void PrettyPrinter::VisitCountOperation(CountOperation* node) {
Print("(");
if (node->is_prefix()) Print("%s", Token::String(node->op()));
@@ -393,7 +384,7 @@ void PrettyPrinter::VisitCountOperation(CountOperation* node) {
void PrettyPrinter::VisitBinaryOperation(BinaryOperation* node) {
Print("(");
Visit(node->left());
- Print("%s", Token::String(node->op()));
+ Print(" %s ", Token::String(node->op()));
Visit(node->right());
Print(")");
}
@@ -402,7 +393,7 @@ void PrettyPrinter::VisitBinaryOperation(BinaryOperation* node) {
void PrettyPrinter::VisitCompareOperation(CompareOperation* node) {
Print("(");
Visit(node->left());
- Print("%s", Token::String(node->op()));
+ Print(" %s ", Token::String(node->op()));
Visit(node->right());
Print(")");
}
@@ -526,13 +517,13 @@ void PrettyPrinter::PrintLiteral(Handle<Object> value, bool quote) {
Print("%c", string->Get(i));
}
if (quote) Print("\"");
- } else if (object == Heap::null_value()) {
+ } else if (object->IsNull()) {
Print("null");
- } else if (object == Heap::true_value()) {
+ } else if (object->IsTrue()) {
Print("true");
- } else if (object == Heap::false_value()) {
+ } else if (object->IsFalse()) {
Print("false");
- } else if (object == Heap::undefined_value()) {
+ } else if (object->IsUndefined()) {
Print("undefined");
} else if (object->IsNumber()) {
Print("%g", object->Number());
@@ -602,22 +593,13 @@ void PrettyPrinter::PrintCaseClause(CaseClause* clause) {
class IndentedScope BASE_EMBEDDED {
public:
- IndentedScope() {
+ explicit IndentedScope(AstPrinter* printer) : ast_printer_(printer) {
ast_printer_->inc_indent();
}
- explicit IndentedScope(const char* txt, AstNode* node = NULL) {
+ IndentedScope(AstPrinter* printer, const char* txt, AstNode* node = NULL)
+ : ast_printer_(printer) {
ast_printer_->PrintIndented(txt);
- if (node != NULL && node->AsExpression() != NULL) {
- Expression* expr = node->AsExpression();
- bool printed_first = false;
- if ((expr->type() != NULL) && (expr->type()->IsKnown())) {
- ast_printer_->Print(" (type = ");
- ast_printer_->Print(StaticType::Type2String(expr->type()));
- printed_first = true;
- }
- if (printed_first) ast_printer_->Print(")");
- }
ast_printer_->Print("\n");
ast_printer_->inc_indent();
}
@@ -626,30 +608,20 @@ class IndentedScope BASE_EMBEDDED {
ast_printer_->dec_indent();
}
- static void SetAstPrinter(AstPrinter* a) { ast_printer_ = a; }
-
private:
- static AstPrinter* ast_printer_;
+ AstPrinter* ast_printer_;
};
-AstPrinter* IndentedScope::ast_printer_ = NULL;
-
-
//-----------------------------------------------------------------------------
-int AstPrinter::indent_ = 0;
-
-AstPrinter::AstPrinter() {
- ASSERT(indent_ == 0);
- IndentedScope::SetAstPrinter(this);
+AstPrinter::AstPrinter() : indent_(0) {
}
AstPrinter::~AstPrinter() {
ASSERT(indent_ == 0);
- IndentedScope::SetAstPrinter(NULL);
}
@@ -673,18 +645,13 @@ void AstPrinter::PrintLiteralIndented(const char* info,
void AstPrinter::PrintLiteralWithModeIndented(const char* info,
Variable* var,
- Handle<Object> value,
- StaticType* type) {
+ Handle<Object> value) {
if (var == NULL) {
PrintLiteralIndented(info, value, true);
} else {
EmbeddedVector<char, 256> buf;
int pos = OS::SNPrintF(buf, "%s (mode = %s", info,
Variable::Mode2String(var->mode()));
- if (type->IsKnown()) {
- pos += OS::SNPrintF(buf + pos, ", type = %s",
- StaticType::Type2String(type));
- }
OS::SNPrintF(buf + pos, ")");
PrintLiteralIndented(buf.start(), value, true);
}
@@ -708,14 +675,14 @@ void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
- IndentedScope indent(s, node);
+ IndentedScope indent(this, s, node);
Visit(node);
}
const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
Init();
- { IndentedScope indent("FUNC");
+ { IndentedScope indent(this, "FUNC");
PrintLiteralIndented("NAME", program->name(), true);
PrintLiteralIndented("INFERRED NAME", program->inferred_name(), true);
PrintParameters(program->scope());
@@ -728,7 +695,7 @@ const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
void AstPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
if (declarations->length() > 0) {
- IndentedScope indent("DECLS");
+ IndentedScope indent(this, "DECLS");
for (int i = 0; i < declarations->length(); i++) {
Visit(declarations->at(i));
}
@@ -738,11 +705,10 @@ void AstPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
void AstPrinter::PrintParameters(Scope* scope) {
if (scope->num_parameters() > 0) {
- IndentedScope indent("PARAMS");
+ IndentedScope indent(this, "PARAMS");
for (int i = 0; i < scope->num_parameters(); i++) {
PrintLiteralWithModeIndented("VAR", scope->parameter(i),
- scope->parameter(i)->name(),
- scope->parameter(i)->type());
+ scope->parameter(i)->name());
}
}
}
@@ -764,10 +730,10 @@ void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
void AstPrinter::PrintCaseClause(CaseClause* clause) {
if (clause->is_default()) {
- IndentedScope indent("DEFAULT");
+ IndentedScope indent(this, "DEFAULT");
PrintStatements(clause->statements());
} else {
- IndentedScope indent("CASE");
+ IndentedScope indent(this, "CASE");
Visit(clause->label());
PrintStatements(clause->statements());
}
@@ -776,7 +742,7 @@ void AstPrinter::PrintCaseClause(CaseClause* clause) {
void AstPrinter::VisitBlock(Block* node) {
const char* block_txt = node->is_initializer_block() ? "BLOCK INIT" : "BLOCK";
- IndentedScope indent(block_txt);
+ IndentedScope indent(this, block_txt);
PrintStatements(node->statements());
}
@@ -786,8 +752,7 @@ void AstPrinter::VisitDeclaration(Declaration* node) {
// var or const declarations
PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
node->proxy()->AsVariable(),
- node->proxy()->name(),
- node->proxy()->AsVariable()->type());
+ node->proxy()->name());
} else {
// function declarations
PrintIndented("FUNCTION ");
@@ -833,18 +798,19 @@ void AstPrinter::VisitReturnStatement(ReturnStatement* node) {
}
-void AstPrinter::VisitWithEnterStatement(WithEnterStatement* node) {
- PrintIndentedVisit("WITH ENTER", node->expression());
+void AstPrinter::VisitEnterWithContextStatement(
+ EnterWithContextStatement* node) {
+ PrintIndentedVisit("ENTER WITH CONTEXT", node->expression());
}
-void AstPrinter::VisitWithExitStatement(WithExitStatement* node) {
- PrintIndented("WITH EXIT\n");
+void AstPrinter::VisitExitContextStatement(ExitContextStatement* node) {
+ PrintIndented("EXIT CONTEXT\n");
}
void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
- IndentedScope indent("SWITCH");
+ IndentedScope indent(this, "SWITCH");
PrintLabelsIndented(NULL, node->labels());
PrintIndentedVisit("TAG", node->tag());
for (int i = 0; i < node->cases()->length(); i++) {
@@ -854,7 +820,7 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
- IndentedScope indent("DO");
+ IndentedScope indent(this, "DO");
PrintLabelsIndented(NULL, node->labels());
PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond());
@@ -862,7 +828,7 @@ void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
void AstPrinter::VisitWhileStatement(WhileStatement* node) {
- IndentedScope indent("WHILE");
+ IndentedScope indent(this, "WHILE");
PrintLabelsIndented(NULL, node->labels());
PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
@@ -870,7 +836,7 @@ void AstPrinter::VisitWhileStatement(WhileStatement* node) {
void AstPrinter::VisitForStatement(ForStatement* node) {
- IndentedScope indent("FOR");
+ IndentedScope indent(this, "FOR");
PrintLabelsIndented(NULL, node->labels());
if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond());
@@ -880,7 +846,7 @@ void AstPrinter::VisitForStatement(ForStatement* node) {
void AstPrinter::VisitForInStatement(ForInStatement* node) {
- IndentedScope indent("FOR IN");
+ IndentedScope indent(this, "FOR IN");
PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("IN", node->enumerable());
PrintIndentedVisit("BODY", node->body());
@@ -888,27 +854,28 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
- IndentedScope indent("TRY CATCH");
+ IndentedScope indent(this, "TRY CATCH");
PrintIndentedVisit("TRY", node->try_block());
- PrintIndentedVisit("CATCHVAR", node->catch_var());
+ const bool quote = false;
+ PrintLiteralIndented("CATCHVAR", node->name(), quote);
PrintIndentedVisit("CATCH", node->catch_block());
}
void AstPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
- IndentedScope indent("TRY FINALLY");
+ IndentedScope indent(this, "TRY FINALLY");
PrintIndentedVisit("TRY", node->try_block());
PrintIndentedVisit("FINALLY", node->finally_block());
}
void AstPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
- IndentedScope indent("DEBUGGER");
+ IndentedScope indent(this, "DEBUGGER");
}
void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
- IndentedScope indent("FUNC LITERAL");
+ IndentedScope indent(this, "FUNC LITERAL");
PrintLiteralIndented("NAME", node->name(), false);
PrintLiteralIndented("INFERRED NAME", node->inferred_name(), false);
PrintParameters(node->scope());
@@ -921,13 +888,13 @@ void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
void AstPrinter::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* node) {
- IndentedScope indent("FUNC LITERAL");
+ IndentedScope indent(this, "FUNC LITERAL");
PrintLiteralIndented("SHARED INFO", node->shared_function_info(), true);
}
void AstPrinter::VisitConditional(Conditional* node) {
- IndentedScope indent("CONDITIONAL");
+ IndentedScope indent(this, "CONDITIONAL");
PrintIndentedVisit("?", node->condition());
PrintIndentedVisit("THEN", node->then_expression());
PrintIndentedVisit("ELSE", node->else_expression());
@@ -940,14 +907,14 @@ void AstPrinter::VisitLiteral(Literal* node) {
void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
- IndentedScope indent("REGEXP LITERAL");
+ IndentedScope indent(this, "REGEXP LITERAL");
PrintLiteralIndented("PATTERN", node->pattern(), false);
PrintLiteralIndented("FLAGS", node->flags(), false);
}
void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
- IndentedScope indent("OBJ LITERAL");
+ IndentedScope indent(this, "OBJ LITERAL");
for (int i = 0; i < node->properties()->length(); i++) {
const char* prop_kind = NULL;
switch (node->properties()->at(i)->kind()) {
@@ -972,7 +939,7 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
default:
UNREACHABLE();
}
- IndentedScope prop(prop_kind);
+ IndentedScope prop(this, prop_kind);
PrintIndentedVisit("KEY", node->properties()->at(i)->key());
PrintIndentedVisit("VALUE", node->properties()->at(i)->value());
}
@@ -980,9 +947,9 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
- IndentedScope indent("ARRAY LITERAL");
+ IndentedScope indent(this, "ARRAY LITERAL");
if (node->values()->length() > 0) {
- IndentedScope indent("VALUES");
+ IndentedScope indent(this, "VALUES");
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
@@ -990,13 +957,6 @@ void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
}
-void AstPrinter::VisitCatchExtensionObject(CatchExtensionObject* node) {
- IndentedScope indent("CatchExtensionObject");
- PrintIndentedVisit("KEY", node->key());
- PrintIndentedVisit("VALUE", node->value());
-}
-
-
void AstPrinter::VisitSlot(Slot* node) {
PrintIndented("SLOT ");
PrettyPrinter::VisitSlot(node);
@@ -1005,18 +965,17 @@ void AstPrinter::VisitSlot(Slot* node) {
void AstPrinter::VisitVariableProxy(VariableProxy* node) {
- PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name(),
- node->type());
+ PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name());
Variable* var = node->var();
if (var != NULL && var->rewrite() != NULL) {
- IndentedScope indent;
+ IndentedScope indent(this);
Visit(var->rewrite());
}
}
void AstPrinter::VisitAssignment(Assignment* node) {
- IndentedScope indent(Token::Name(node->op()), node);
+ IndentedScope indent(this, Token::Name(node->op()), node);
Visit(node->target());
Visit(node->value());
}
@@ -1028,7 +987,7 @@ void AstPrinter::VisitThrow(Throw* node) {
void AstPrinter::VisitProperty(Property* node) {
- IndentedScope indent("PROPERTY", node);
+ IndentedScope indent(this, "PROPERTY", node);
Visit(node->obj());
Literal* literal = node->key()->AsLiteral();
if (literal != NULL && literal->handle()->IsSymbol()) {
@@ -1040,14 +999,14 @@ void AstPrinter::VisitProperty(Property* node) {
void AstPrinter::VisitCall(Call* node) {
- IndentedScope indent("CALL");
+ IndentedScope indent(this, "CALL");
Visit(node->expression());
PrintArguments(node->arguments());
}
void AstPrinter::VisitCallNew(CallNew* node) {
- IndentedScope indent("CALL NEW");
+ IndentedScope indent(this, "CALL NEW");
Visit(node->expression());
PrintArguments(node->arguments());
}
@@ -1055,7 +1014,7 @@ void AstPrinter::VisitCallNew(CallNew* node) {
void AstPrinter::VisitCallRuntime(CallRuntime* node) {
PrintLiteralIndented("CALL RUNTIME ", node->name(), false);
- IndentedScope indent;
+ IndentedScope indent(this);
PrintArguments(node->arguments());
}
@@ -1065,35 +1024,23 @@ void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
}
-void AstPrinter::VisitIncrementOperation(IncrementOperation* node) {
- UNREACHABLE();
-}
-
-
void AstPrinter::VisitCountOperation(CountOperation* node) {
EmbeddedVector<char, 128> buf;
- if (node->type()->IsKnown()) {
- OS::SNPrintF(buf, "%s %s (type = %s)",
- (node->is_prefix() ? "PRE" : "POST"),
- Token::Name(node->op()),
- StaticType::Type2String(node->type()));
- } else {
- OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
- Token::Name(node->op()));
- }
+ OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
+ Token::Name(node->op()));
PrintIndentedVisit(buf.start(), node->expression());
}
void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
- IndentedScope indent(Token::Name(node->op()), node);
+ IndentedScope indent(this, Token::Name(node->op()), node);
Visit(node->left());
Visit(node->right());
}
void AstPrinter::VisitCompareOperation(CompareOperation* node) {
- IndentedScope indent(Token::Name(node->op()), node);
+ IndentedScope indent(this, Token::Name(node->op()), node);
Visit(node->left());
Visit(node->right());
}
@@ -1103,13 +1050,13 @@ void AstPrinter::VisitCompareToNull(CompareToNull* node) {
const char* name = node->is_strict()
? "COMPARE-TO-NULL-STRICT"
: "COMPARE-TO-NULL";
- IndentedScope indent(name, node);
+ IndentedScope indent(this, name, node);
Visit(node->expression());
}
void AstPrinter::VisitThisFunction(ThisFunction* node) {
- IndentedScope indent("THIS-FUNCTION");
+ IndentedScope indent(this, "THIS-FUNCTION");
}
@@ -1246,14 +1193,15 @@ void JsonAstBuilder::VisitReturnStatement(ReturnStatement* stmt) {
}
-void JsonAstBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
- TagScope tag(this, "WithEnterStatement");
+void JsonAstBuilder::VisitEnterWithContextStatement(
+ EnterWithContextStatement* stmt) {
+ TagScope tag(this, "EnterWithContextStatement");
Visit(stmt->expression());
}
-void JsonAstBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
- TagScope tag(this, "WithExitStatement");
+void JsonAstBuilder::VisitExitContextStatement(ExitContextStatement* stmt) {
+ TagScope tag(this, "ExitContextStatement");
}
@@ -1295,8 +1243,10 @@ void JsonAstBuilder::VisitForInStatement(ForInStatement* stmt) {
void JsonAstBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
TagScope tag(this, "TryCatchStatement");
+ { AttributesScope attributes(this);
+ AddAttribute("variable", stmt->name());
+ }
Visit(stmt->try_block());
- Visit(stmt->catch_var());
Visit(stmt->catch_block());
}
@@ -1401,13 +1351,6 @@ void JsonAstBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
}
-void JsonAstBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- TagScope tag(this, "CatchExtensionObject");
- Visit(expr->key());
- Visit(expr->value());
-}
-
-
void JsonAstBuilder::VisitAssignment(Assignment* expr) {
TagScope tag(this, "Assignment");
{
@@ -1427,10 +1370,6 @@ void JsonAstBuilder::VisitThrow(Throw* expr) {
void JsonAstBuilder::VisitProperty(Property* expr) {
TagScope tag(this, "Property");
- {
- AttributesScope attributes(this);
- AddAttribute("type", expr->is_synthetic() ? "SYNTHETIC" : "NORMAL");
- }
Visit(expr->obj());
Visit(expr->key());
}
@@ -1470,11 +1409,6 @@ void JsonAstBuilder::VisitUnaryOperation(UnaryOperation* expr) {
}
-void JsonAstBuilder::VisitIncrementOperation(IncrementOperation* expr) {
- UNREACHABLE();
-}
-
-
void JsonAstBuilder::VisitCountOperation(CountOperation* expr) {
TagScope tag(this, "CountOperation");
{
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index c83de3451..080081dd3 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,7 @@
#ifndef V8_PRETTYPRINTER_H_
#define V8_PRETTYPRINTER_H_
+#include "allocation.h"
#include "ast.h"
namespace v8 {
@@ -104,14 +105,13 @@ class AstPrinter: public PrettyPrinter {
void PrintLiteralIndented(const char* info, Handle<Object> value, bool quote);
void PrintLiteralWithModeIndented(const char* info,
Variable* var,
- Handle<Object> value,
- StaticType* type);
+ Handle<Object> value);
void PrintLabelsIndented(const char* info, ZoneStringList* labels);
void inc_indent() { indent_++; }
void dec_indent() { indent_--; }
- static int indent_;
+ int indent_;
};
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 7612eab99..b2c9de852 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,14 +28,15 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "v8.h"
+
+#include "profile-generator-inl.h"
+
#include "global-handles.h"
+#include "heap-profiler.h"
#include "scopeinfo.h"
-#include "top.h"
#include "unicode.h"
#include "zone-inl.h"
-#include "profile-generator-inl.h"
-
namespace v8 {
namespace internal {
@@ -47,24 +48,27 @@ TokenEnumerator::TokenEnumerator()
TokenEnumerator::~TokenEnumerator() {
+ Isolate* isolate = Isolate::Current();
for (int i = 0; i < token_locations_.length(); ++i) {
if (!token_removed_[i]) {
- GlobalHandles::ClearWeakness(token_locations_[i]);
- GlobalHandles::Destroy(token_locations_[i]);
+ isolate->global_handles()->ClearWeakness(token_locations_[i]);
+ isolate->global_handles()->Destroy(token_locations_[i]);
}
}
}
int TokenEnumerator::GetTokenId(Object* token) {
+ Isolate* isolate = Isolate::Current();
if (token == NULL) return TokenEnumerator::kNoSecurityToken;
for (int i = 0; i < token_locations_.length(); ++i) {
if (*token_locations_[i] == token && !token_removed_[i]) return i;
}
- Handle<Object> handle = GlobalHandles::Create(token);
+ Handle<Object> handle = isolate->global_handles()->Create(token);
// handle.location() points to a memory cell holding a pointer
// to a token object in the V8's heap.
- GlobalHandles::MakeWeak(handle.location(), this, TokenRemovedCallback);
+ isolate->global_handles()->MakeWeak(handle.location(), this,
+ TokenRemovedCallback);
token_locations_.Add(handle.location());
token_removed_.Add(false);
return token_locations_.length() - 1;
@@ -94,55 +98,74 @@ StringsStorage::StringsStorage()
}
-static void DeleteIndexName(char** name_ptr) {
- DeleteArray(*name_ptr);
-}
-
-
StringsStorage::~StringsStorage() {
for (HashMap::Entry* p = names_.Start();
p != NULL;
p = names_.Next(p)) {
DeleteArray(reinterpret_cast<const char*>(p->value));
}
- index_names_.Iterate(DeleteIndexName);
+}
+
+
+const char* StringsStorage::GetCopy(const char* src) {
+ int len = static_cast<int>(strlen(src));
+ Vector<char> dst = Vector<char>::New(len + 1);
+ OS::StrNCpy(dst, src, len);
+ dst[len] = '\0';
+ uint32_t hash = HashSequentialString(dst.start(), len);
+ return AddOrDisposeString(dst.start(), hash);
+}
+
+
+const char* StringsStorage::GetFormatted(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ const char* result = GetVFormatted(format, args);
+ va_end(args);
+ return result;
+}
+
+
+const char* StringsStorage::AddOrDisposeString(char* str, uint32_t hash) {
+ HashMap::Entry* cache_entry = names_.Lookup(str, hash, true);
+ if (cache_entry->value == NULL) {
+ // New entry added.
+ cache_entry->value = str;
+ } else {
+ DeleteArray(str);
+ }
+ return reinterpret_cast<const char*>(cache_entry->value);
+}
+
+
+const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
+ Vector<char> str = Vector<char>::New(1024);
+ int len = OS::VSNPrintF(str, format, args);
+ if (len == -1) {
+ DeleteArray(str.start());
+ return format;
+ }
+ uint32_t hash = HashSequentialString(str.start(), len);
+ return AddOrDisposeString(str.start(), hash);
}
const char* StringsStorage::GetName(String* name) {
if (name->IsString()) {
- char* c_name =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach();
- HashMap::Entry* cache_entry = names_.Lookup(c_name, name->Hash(), true);
- if (cache_entry->value == NULL) {
- // New entry added.
- cache_entry->value = c_name;
- } else {
- DeleteArray(c_name);
- }
- return reinterpret_cast<const char*>(cache_entry->value);
+ return AddOrDisposeString(
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach(),
+ name->Hash());
}
return "";
}
const char* StringsStorage::GetName(int index) {
- ASSERT(index >= 0);
- if (index_names_.length() <= index) {
- index_names_.AddBlock(
- NULL, index - index_names_.length() + 1);
- }
- if (index_names_[index] == NULL) {
- const int kMaximumNameLength = 32;
- char* name = NewArray<char>(kMaximumNameLength);
- OS::SNPrintF(Vector<char>(name, kMaximumNameLength), "%d", index);
- index_names_[index] = name;
- }
- return index_names_[index];
+ return GetFormatted("%d", index);
}
-const char* CodeEntry::kEmptyNamePrefix = "";
+const char* const CodeEntry::kEmptyNamePrefix = "";
void CodeEntry::CopyData(const CodeEntry& source) {
@@ -298,7 +321,7 @@ struct NodesPair {
class FilteredCloneCallback {
public:
- explicit FilteredCloneCallback(ProfileNode* dst_root, int security_token_id)
+ FilteredCloneCallback(ProfileNode* dst_root, int security_token_id)
: stack_(10),
security_token_id_(security_token_id) {
stack_.Add(NodesPair(NULL, dst_root));
@@ -465,7 +488,7 @@ void CpuProfile::Print() {
}
-CodeEntry* const CodeMap::kSfiCodeEntry = NULL;
+CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue =
CodeMap::CodeEntryInfo(NULL, 0);
@@ -483,18 +506,18 @@ CodeEntry* CodeMap::FindEntry(Address addr) {
}
-int CodeMap::GetSFITag(Address addr) {
+int CodeMap::GetSharedId(Address addr) {
CodeTree::Locator locator;
- // For SFI entries, 'size' field is used to store their IDs.
+ // For shared function entries, 'size' field is used to store their IDs.
if (tree_.Find(addr, &locator)) {
const CodeEntryInfo& entry = locator.value();
- ASSERT(entry.entry == kSfiCodeEntry);
+ ASSERT(entry.entry == kSharedFunctionCodeEntry);
return entry.size;
} else {
tree_.Insert(addr, &locator);
- int tag = next_sfi_tag_++;
- locator.set_value(CodeEntryInfo(kSfiCodeEntry, tag));
- return tag;
+ int id = next_shared_id_++;
+ locator.set_value(CodeEntryInfo(kSharedFunctionCodeEntry, id));
+ return id;
}
}
@@ -528,13 +551,16 @@ static void DeleteCpuProfile(CpuProfile** profile_ptr) {
}
static void DeleteProfilesList(List<CpuProfile*>** list_ptr) {
- (*list_ptr)->Iterate(DeleteCpuProfile);
- delete *list_ptr;
+ if (*list_ptr != NULL) {
+ (*list_ptr)->Iterate(DeleteCpuProfile);
+ delete *list_ptr;
+ }
}
CpuProfilesCollection::~CpuProfilesCollection() {
delete current_profiles_semaphore_;
current_profiles_.Iterate(DeleteCpuProfile);
+ detached_profiles_.Iterate(DeleteCpuProfile);
profiles_by_token_.Iterate(DeleteProfilesList);
code_entries_.Iterate(DeleteCodeEntry);
}
@@ -599,15 +625,8 @@ CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
unsigned uid) {
- HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid),
- false);
- int index;
- if (entry != NULL) {
- index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- } else {
- return NULL;
- }
+ int index = GetProfileIndex(uid);
+ if (index < 0) return NULL;
List<CpuProfile*>* unabridged_list =
profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
if (security_token_id == TokenEnumerator::kNoSecurityToken) {
@@ -622,6 +641,15 @@ CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
}
+int CpuProfilesCollection::GetProfileIndex(unsigned uid) {
+ HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
+ static_cast<uint32_t>(uid),
+ false);
+ return entry != NULL ?
+ static_cast<int>(reinterpret_cast<intptr_t>(entry->value)) : -1;
+}
+
+
bool CpuProfilesCollection::IsLastProfile(const char* title) {
// Called from VM thread, and only it can mutate the list,
// so no locking is needed here.
@@ -631,6 +659,39 @@ bool CpuProfilesCollection::IsLastProfile(const char* title) {
}
+void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
+ // Called from VM thread for a completed profile.
+ unsigned uid = profile->uid();
+ int index = GetProfileIndex(uid);
+ if (index < 0) {
+ detached_profiles_.RemoveElement(profile);
+ return;
+ }
+ profiles_uids_.Remove(reinterpret_cast<void*>(uid),
+ static_cast<uint32_t>(uid));
+ // Decrement all indexes above the deleted one.
+ for (HashMap::Entry* p = profiles_uids_.Start();
+ p != NULL;
+ p = profiles_uids_.Next(p)) {
+ intptr_t p_index = reinterpret_cast<intptr_t>(p->value);
+ if (p_index > index) {
+ p->value = reinterpret_cast<void*>(p_index - 1);
+ }
+ }
+ for (int i = 0; i < profiles_by_token_.length(); ++i) {
+ List<CpuProfile*>* list = profiles_by_token_[i];
+ if (list != NULL && index < list->length()) {
+ // Move all filtered clones into detached_profiles_,
+ // so we can know that they are still in use.
+ CpuProfile* cloned_profile = list->Remove(index);
+ if (cloned_profile != NULL && cloned_profile != profile) {
+ detached_profiles_.Add(cloned_profile);
+ }
+ }
+ }
+}
+
+
int CpuProfilesCollection::TokenToIndex(int security_token_id) {
ASSERT(TokenEnumerator::kNoSecurityToken == -1);
return security_token_id + 1; // kNoSecurityToken -> 0, 0 -> 1, ...
@@ -763,10 +824,12 @@ void SampleRateCalculator::UpdateMeasurements(double current_time) {
}
-const char* ProfileGenerator::kAnonymousFunctionName = "(anonymous function)";
-const char* ProfileGenerator::kProgramEntryName = "(program)";
-const char* ProfileGenerator::kGarbageCollectorEntryName =
- "(garbage collector)";
+const char* const ProfileGenerator::kAnonymousFunctionName =
+ "(anonymous function)";
+const char* const ProfileGenerator::kProgramEntryName =
+ "(program)";
+const char* const ProfileGenerator::kGarbageCollectorEntryName =
+ "(garbage collector)";
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
@@ -789,7 +852,15 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
if (sample.pc != NULL) {
*entry++ = code_map_.FindEntry(sample.pc);
- if (sample.tos != NULL) {
+ if (sample.has_external_callback) {
+ // Don't use PC when in external callback code, as it can point
+ // inside callback's code, and we will erroneously report
+ // that a callback calls itself.
+ *(entries.start()) = NULL;
+ *entry++ = code_map_.FindEntry(sample.external_callback);
+ } else if (sample.tos != NULL) {
+ // Find out, if top of stack was pointing inside a JS function
+ // meaning that we have encountered a frameless invocation.
*entry = code_map_.FindEntry(sample.tos);
if (*entry != NULL && !(*entry)->is_js_function()) {
*entry = NULL;
@@ -914,11 +985,6 @@ int HeapEntry::RetainedSize(bool exact) {
}
-List<HeapGraphPath*>* HeapEntry::GetRetainingPaths() {
- return snapshot_->GetRetainingPaths(this);
-}
-
-
template<class Visitor>
void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) {
List<HeapEntry*> list(10);
@@ -1009,6 +1075,7 @@ const char* HeapEntry::TypeAsString() {
case kArray: return "/array/";
case kRegExp: return "/regexp/";
case kHeapNumber: return "/number/";
+ case kNative: return "/native/";
default: return "???";
}
}
@@ -1029,7 +1096,7 @@ class RetainedSizeCalculator {
: retained_size_(0) {
}
- int reained_size() const { return retained_size_; }
+ int retained_size() const { return retained_size_; }
void Apply(HeapEntry** entry_ptr) {
if ((*entry_ptr)->painted_reachable()) {
@@ -1070,113 +1137,12 @@ void HeapEntry::CalculateExactRetainedSize() {
RetainedSizeCalculator ret_size_calc;
snapshot()->IterateEntries(&ret_size_calc);
- retained_size_ = ret_size_calc.reained_size();
+ retained_size_ = ret_size_calc.retained_size();
ASSERT((retained_size_ & kExactRetainedSizeTag) == 0);
retained_size_ |= kExactRetainedSizeTag;
}
-class CachedHeapGraphPath {
- public:
- CachedHeapGraphPath()
- : nodes_(NodesMatch) { }
- CachedHeapGraphPath(const CachedHeapGraphPath& src)
- : nodes_(NodesMatch, &HashMap::DefaultAllocator, src.nodes_.capacity()),
- path_(src.path_.length() + 1) {
- for (HashMap::Entry* p = src.nodes_.Start();
- p != NULL;
- p = src.nodes_.Next(p)) {
- nodes_.Lookup(p->key, p->hash, true);
- }
- path_.AddAll(src.path_);
- }
- void Add(HeapGraphEdge* edge) {
- nodes_.Lookup(edge->to(), Hash(edge->to()), true);
- path_.Add(edge);
- }
- bool ContainsNode(HeapEntry* node) {
- return nodes_.Lookup(node, Hash(node), false) != NULL;
- }
- const List<HeapGraphEdge*>* path() const { return &path_; }
-
- private:
- static uint32_t Hash(HeapEntry* entry) {
- return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry));
- }
- static bool NodesMatch(void* key1, void* key2) { return key1 == key2; }
-
- HashMap nodes_;
- List<HeapGraphEdge*> path_;
-};
-
-
-List<HeapGraphPath*>* HeapEntry::CalculateRetainingPaths() {
- List<HeapGraphPath*>* retaining_paths = new List<HeapGraphPath*>(4);
- CachedHeapGraphPath path;
- FindRetainingPaths(&path, retaining_paths);
- return retaining_paths;
-}
-
-
-void HeapEntry::FindRetainingPaths(CachedHeapGraphPath* prev_path,
- List<HeapGraphPath*>* retaining_paths) {
- Vector<HeapGraphEdge*> rets = retainers();
- for (int i = 0; i < rets.length(); ++i) {
- HeapGraphEdge* ret_edge = rets[i];
- if (prev_path->ContainsNode(ret_edge->From())) continue;
- if (ret_edge->From() != snapshot()->root()) {
- CachedHeapGraphPath path(*prev_path);
- path.Add(ret_edge);
- ret_edge->From()->FindRetainingPaths(&path, retaining_paths);
- } else {
- HeapGraphPath* ret_path = new HeapGraphPath(*prev_path->path());
- ret_path->Set(0, ret_edge);
- retaining_paths->Add(ret_path);
- }
- }
-}
-
-
-HeapGraphPath::HeapGraphPath(const List<HeapGraphEdge*>& path)
- : path_(path.length() + 1) {
- Add(NULL);
- for (int i = path.length() - 1; i >= 0; --i) {
- Add(path[i]);
- }
-}
-
-
-void HeapGraphPath::Print() {
- path_[0]->From()->Print(1, 0);
- for (int i = 0; i < path_.length(); ++i) {
- OS::Print(" -> ");
- HeapGraphEdge* edge = path_[i];
- switch (edge->type()) {
- case HeapGraphEdge::kContextVariable:
- OS::Print("[#%s] ", edge->name());
- break;
- case HeapGraphEdge::kElement:
- case HeapGraphEdge::kHidden:
- OS::Print("[%d] ", edge->index());
- break;
- case HeapGraphEdge::kInternal:
- OS::Print("[$%s] ", edge->name());
- break;
- case HeapGraphEdge::kProperty:
- OS::Print("[%s] ", edge->name());
- break;
- case HeapGraphEdge::kShortcut:
- OS::Print("[^%s] ", edge->name());
- break;
- default:
- OS::Print("!!! unknown edge type: %d ", edge->type());
- }
- edge->to()->Print(1, 0);
- }
- OS::Print("\n");
-}
-
-
// It is very important to keep objects that form a heap snapshot
// as small as possible.
namespace { // Avoid littering the global namespace.
@@ -1205,9 +1171,9 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
uid_(uid),
root_entry_(NULL),
gc_roots_entry_(NULL),
+ natives_root_entry_(NULL),
raw_entries_(NULL),
- entries_sorted_(false),
- retaining_paths_(HeapEntry::Match) {
+ entries_sorted_(false) {
STATIC_ASSERT(
sizeof(HeapGraphEdge) ==
SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapGraphEdgeSize); // NOLINT
@@ -1216,21 +1182,14 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize); // NOLINT
}
-
-static void DeleteHeapGraphPath(HeapGraphPath** path_ptr) {
- delete *path_ptr;
-}
-
HeapSnapshot::~HeapSnapshot() {
DeleteArray(raw_entries_);
- for (HashMap::Entry* p = retaining_paths_.Start();
- p != NULL;
- p = retaining_paths_.Next(p)) {
- List<HeapGraphPath*>* list =
- reinterpret_cast<List<HeapGraphPath*>*>(p->value);
- list->Iterate(DeleteHeapGraphPath);
- delete list;
- }
+}
+
+
+void HeapSnapshot::Delete() {
+ collection_->RemoveSnapshot(this);
+ delete this;
}
@@ -1279,6 +1238,19 @@ HeapEntry* HeapSnapshot::AddGcRootsEntry(int children_count,
}
+HeapEntry* HeapSnapshot::AddNativesRootEntry(int children_count,
+ int retainers_count) {
+ ASSERT(natives_root_entry_ == NULL);
+ return (natives_root_entry_ = AddEntry(
+ HeapEntry::kObject,
+ "(Native objects)",
+ HeapObjectsMap::kNativesRootObjectId,
+ 0,
+ children_count,
+ retainers_count));
+}
+
+
HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
const char* name,
uint64_t id,
@@ -1313,14 +1285,7 @@ HeapEntry* HeapSnapshot::GetNextEntryToInit() {
}
-HeapSnapshotsDiff* HeapSnapshot::CompareWith(HeapSnapshot* snapshot) {
- return collection_->CompareSnapshots(this, snapshot);
-}
-
-
HeapEntry* HeapSnapshot::GetEntryById(uint64_t id) {
- // GetSortedEntriesList is used in diff algorithm and sorts
- // entries by their id.
List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
// Perform a binary search by id.
@@ -1341,16 +1306,6 @@ HeapEntry* HeapSnapshot::GetEntryById(uint64_t id) {
}
-List<HeapGraphPath*>* HeapSnapshot::GetRetainingPaths(HeapEntry* entry) {
- HashMap::Entry* p =
- retaining_paths_.Lookup(entry, HeapEntry::Hash(entry), true);
- if (p->value == NULL) {
- p->value = entry->CalculateRetainingPaths();
- }
- return reinterpret_cast<List<HeapGraphPath*>*>(p->value);
-}
-
-
template<class T>
static int SortByIds(const T* entry1_ptr,
const T* entry2_ptr) {
@@ -1372,10 +1327,13 @@ void HeapSnapshot::Print(int max_depth) {
}
-const uint64_t HeapObjectsMap::kInternalRootObjectId = 0;
-const uint64_t HeapObjectsMap::kGcRootsObjectId = 1;
+// We split IDs on evens for embedder objects (see
+// HeapObjectsMap::GenerateId) and odds for native objects.
+const uint64_t HeapObjectsMap::kInternalRootObjectId = 1;
+const uint64_t HeapObjectsMap::kGcRootsObjectId = 3;
+const uint64_t HeapObjectsMap::kNativesRootObjectId = 5;
// Increase kFirstAvailableObjectId if new 'special' objects appear.
-const uint64_t HeapObjectsMap::kFirstAvailableObjectId = 2;
+const uint64_t HeapObjectsMap::kFirstAvailableObjectId = 7;
HeapObjectsMap::HeapObjectsMap()
: initial_fill_mode_(true),
@@ -1400,7 +1358,8 @@ uint64_t HeapObjectsMap::FindObject(Address addr) {
uint64_t existing = FindEntry(addr);
if (existing != 0) return existing;
}
- uint64_t id = next_id_++;
+ uint64_t id = next_id_;
+ next_id_ += 2;
AddEntry(addr, id);
return id;
}
@@ -1468,6 +1427,17 @@ void HeapObjectsMap::RemoveDeadEntries() {
}
+uint64_t HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
+ uint64_t id = static_cast<uint64_t>(info->GetHash());
+ const char* label = info->GetLabel();
+ id ^= HashSequentialString(label, static_cast<int>(strlen(label)));
+ intptr_t element_count = info->GetElementCount();
+ if (element_count != -1)
+ id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count));
+ return id << 1;
+}
+
+
HeapSnapshotsCollection::HeapSnapshotsCollection()
: is_tracking_objects_(false),
snapshots_uids_(HeapSnapshotsMatch),
@@ -1517,10 +1487,11 @@ HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) {
}
-HeapSnapshotsDiff* HeapSnapshotsCollection::CompareSnapshots(
- HeapSnapshot* snapshot1,
- HeapSnapshot* snapshot2) {
- return comparator_.Compare(snapshot1, snapshot2);
+void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
+ snapshots_.RemoveElement(snapshot);
+ unsigned uid = snapshot->uid();
+ snapshots_uids_.Remove(reinterpret_cast<void*>(uid),
+ static_cast<uint32_t>(uid));
}
@@ -1551,6 +1522,8 @@ void HeapEntriesMap::AllocateEntries() {
p->key,
entry_info->children_count,
entry_info->retainers_count);
+ ASSERT(entry_info->entry != NULL);
+ ASSERT(entry_info->entry != kHeapEntryPlaceholder);
entry_info->children_count = 0;
entry_info->retainers_count = 0;
}
@@ -1629,10 +1602,34 @@ void HeapObjectsSet::Insert(Object* obj) {
}
+const char* HeapObjectsSet::GetTag(Object* obj) {
+ HeapObject* object = HeapObject::cast(obj);
+ HashMap::Entry* cache_entry =
+ entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
+ if (cache_entry != NULL
+ && cache_entry->value != HeapEntriesMap::kHeapEntryPlaceholder) {
+ return reinterpret_cast<const char*>(cache_entry->value);
+ } else {
+ return NULL;
+ }
+}
+
+
+void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
+ if (!obj->IsHeapObject()) return;
+ HeapObject* object = HeapObject::cast(obj);
+ HashMap::Entry* cache_entry =
+ entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
+ cache_entry->value = const_cast<char*>(tag);
+}
+
+
HeapObject *const V8HeapExplorer::kInternalRootObject =
- reinterpret_cast<HeapObject*>(1);
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
HeapObject *const V8HeapExplorer::kGcRootsObject =
- reinterpret_cast<HeapObject*>(2);
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
V8HeapExplorer::V8HeapExplorer(
@@ -1664,32 +1661,45 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
return snapshot_->AddRootEntry(children_count);
} else if (object == kGcRootsObject) {
return snapshot_->AddGcRootsEntry(children_count, retainers_count);
+ } else if (object->IsJSGlobalObject()) {
+ const char* tag = objects_tags_.GetTag(object);
+ const char* name = collection_->names()->GetName(
+ GetConstructorNameForHeapProfile(JSObject::cast(object)));
+ if (tag != NULL) {
+ name = collection_->names()->GetFormatted("%s / %s", name, tag);
+ }
+ return AddEntry(object,
+ HeapEntry::kObject,
+ name,
+ children_count,
+ retainers_count);
} else if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
return AddEntry(object,
HeapEntry::kClosure,
- collection_->GetName(String::cast(shared->name())),
+ collection_->names()->GetName(String::cast(shared->name())),
children_count,
retainers_count);
} else if (object->IsJSRegExp()) {
JSRegExp* re = JSRegExp::cast(object);
return AddEntry(object,
HeapEntry::kRegExp,
- collection_->GetName(re->Pattern()),
+ collection_->names()->GetName(re->Pattern()),
children_count,
retainers_count);
} else if (object->IsJSObject()) {
return AddEntry(object,
HeapEntry::kObject,
- collection_->GetName(GetConstructorNameForHeapProfile(
- JSObject::cast(object))),
+ collection_->names()->GetName(
+ GetConstructorNameForHeapProfile(
+ JSObject::cast(object))),
children_count,
retainers_count);
} else if (object->IsString()) {
return AddEntry(object,
HeapEntry::kString,
- collection_->GetName(String::cast(object)),
+ collection_->names()->GetName(String::cast(object)),
children_count,
retainers_count);
} else if (object->IsCode()) {
@@ -1702,7 +1712,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
return AddEntry(object,
HeapEntry::kCode,
- collection_->GetName(String::cast(shared->name())),
+ collection_->names()->GetName(String::cast(shared->name())),
children_count,
retainers_count);
} else if (object->IsScript()) {
@@ -1710,10 +1720,12 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
return AddEntry(object,
HeapEntry::kCode,
script->name()->IsString() ?
- collection_->GetName(String::cast(script->name())) : "",
+ collection_->names()->GetName(
+ String::cast(script->name()))
+ : "",
children_count,
retainers_count);
- } else if (object->IsFixedArray()) {
+ } else if (object->IsFixedArray() || object->IsByteArray()) {
return AddEntry(object,
HeapEntry::kArray,
"",
@@ -1728,7 +1740,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
}
return AddEntry(object,
HeapEntry::kHidden,
- "system",
+ GetSystemEntryName(object),
children_count,
retainers_count);
}
@@ -1749,8 +1761,23 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
- filler->AddEntry(kInternalRootObject);
- filler->AddEntry(kGcRootsObject);
+ filler->AddEntry(kInternalRootObject, this);
+ filler->AddEntry(kGcRootsObject, this);
+}
+
+
+const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
+ switch (object->map()->instance_type()) {
+ case MAP_TYPE: return "system / Map";
+ case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
+ case FOREIGN_TYPE: return "system / Foreign";
+ case ODDBALL_TYPE: return "system / Oddball";
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+ case NAME##_TYPE: return "system / "#Name;
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ default: return "system";
+ }
}
@@ -1768,26 +1795,39 @@ class IndexedReferencesExtractor : public ObjectVisitor {
public:
IndexedReferencesExtractor(V8HeapExplorer* generator,
HeapObject* parent_obj,
- HeapEntry* parent_entry,
- HeapObjectsSet* known_references = NULL)
+ HeapEntry* parent_entry)
: generator_(generator),
parent_obj_(parent_obj),
parent_(parent_entry),
- known_references_(known_references),
next_index_(1) {
}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
- if (!known_references_ || !known_references_->Contains(*p)) {
- generator_->SetHiddenReference(parent_obj_, parent_, next_index_++, *p);
- }
+ if (CheckVisitedAndUnmark(p)) continue;
+ generator_->SetHiddenReference(parent_obj_, parent_, next_index_++, *p);
}
}
+ static void MarkVisitedField(HeapObject* obj, int offset) {
+ if (offset < 0) return;
+ Address field = obj->address() + offset;
+ ASSERT(!Memory::Object_at(field)->IsFailure());
+ ASSERT(Memory::Object_at(field)->IsHeapObject());
+ *field |= kFailureTag;
+ }
+
private:
+ bool CheckVisitedAndUnmark(Object** field) {
+ if ((*field)->IsFailure()) {
+ intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask;
+ *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag);
+ ASSERT((*field)->IsHeapObject());
+ return true;
+ }
+ return false;
+ }
V8HeapExplorer* generator_;
HeapObject* parent_obj_;
HeapEntry* parent_;
- HeapObjectsSet* known_references_;
int next_index_;
};
@@ -1796,13 +1836,13 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
HeapEntry* entry = GetEntry(obj);
if (entry == NULL) return; // No interest in this object.
- known_references_.Clear();
if (obj->IsJSGlobalProxy()) {
// We need to reference JS global objects from snapshot's root.
// We use JSGlobalProxy because this is what embedder (e.g. browser)
// uses for the global object.
JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
SetRootShortcutReference(proxy->map()->prototype());
+ SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
} else if (obj->IsJSObject()) {
@@ -1812,16 +1852,40 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
ExtractElementReferences(js_obj, entry);
ExtractInternalReferences(js_obj, entry);
SetPropertyReference(
- obj, entry, Heap::Proto_symbol(), js_obj->GetPrototype());
+ obj, entry, HEAP->Proto_symbol(), js_obj->GetPrototype());
if (obj->IsJSFunction()) {
- JSFunction* js_fun = JSFunction::cast(obj);
- if (js_fun->has_prototype()) {
- SetPropertyReference(
- obj, entry, Heap::prototype_symbol(), js_fun->prototype());
+ JSFunction* js_fun = JSFunction::cast(js_obj);
+ Object* proto_or_map = js_fun->prototype_or_initial_map();
+ if (!proto_or_map->IsTheHole()) {
+ if (!proto_or_map->IsMap()) {
+ SetPropertyReference(
+ obj, entry,
+ HEAP->prototype_symbol(), proto_or_map,
+ JSFunction::kPrototypeOrInitialMapOffset);
+ } else {
+ SetPropertyReference(
+ obj, entry,
+ HEAP->prototype_symbol(), js_fun->prototype());
+ }
}
+ SetInternalReference(js_fun, entry,
+ "shared", js_fun->shared(),
+ JSFunction::kSharedFunctionInfoOffset);
+ SetInternalReference(js_fun, entry,
+ "context", js_fun->unchecked_context(),
+ JSFunction::kContextOffset);
+ SetInternalReference(js_fun, entry,
+ "literals", js_fun->literals(),
+ JSFunction::kLiteralsOffset);
}
- IndexedReferencesExtractor refs_extractor(
- this, obj, entry, &known_references_);
+ SetInternalReference(obj, entry,
+ "properties", js_obj->properties(),
+ JSObject::kPropertiesOffset);
+ SetInternalReference(obj, entry,
+ "elements", js_obj->elements(),
+ JSObject::kElementsOffset);
+ SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+ IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
} else if (obj->IsString()) {
if (obj->IsConsString()) {
@@ -1829,7 +1893,43 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
SetInternalReference(obj, entry, 1, cs->first());
SetInternalReference(obj, entry, 2, cs->second());
}
+ } else if (obj->IsMap()) {
+ Map* map = Map::cast(obj);
+ SetInternalReference(obj, entry,
+ "prototype", map->prototype(), Map::kPrototypeOffset);
+ SetInternalReference(obj, entry,
+ "constructor", map->constructor(),
+ Map::kConstructorOffset);
+ if (!map->instance_descriptors()->IsEmpty()) {
+ SetInternalReference(obj, entry,
+ "descriptors", map->instance_descriptors(),
+ Map::kInstanceDescriptorsOrBitField3Offset);
+ }
+ SetInternalReference(obj, entry,
+ "code_cache", map->code_cache(),
+ Map::kCodeCacheOffset);
+ SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+ IndexedReferencesExtractor refs_extractor(this, obj, entry);
+ obj->Iterate(&refs_extractor);
+ } else if (obj->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+ SetInternalReference(obj, entry,
+ "name", shared->name(),
+ SharedFunctionInfo::kNameOffset);
+ SetInternalReference(obj, entry,
+ "code", shared->unchecked_code(),
+ SharedFunctionInfo::kCodeOffset);
+ SetInternalReference(obj, entry,
+ "instance_class_name", shared->instance_class_name(),
+ SharedFunctionInfo::kInstanceClassNameOffset);
+ SetInternalReference(obj, entry,
+ "script", shared->script(),
+ SharedFunctionInfo::kScriptOffset);
+ SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+ IndexedReferencesExtractor refs_extractor(this, obj, entry);
+ obj->Iterate(&refs_extractor);
} else {
+ SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
}
@@ -1842,7 +1942,7 @@ void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
HandleScope hs;
JSFunction* func = JSFunction::cast(js_obj);
Context* context = func->context();
- ZoneScope zscope(DELETE_ON_EXIT);
+ ZoneScope zscope(Isolate::Current(), DELETE_ON_EXIT);
SerializedScopeInfo* serialized_scope_info =
context->closure()->shared()->scope_info();
ScopeInfo<ZoneListAllocationPolicy> zone_scope_info(serialized_scope_info);
@@ -1854,7 +1954,6 @@ void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
SetClosureReference(js_obj, entry, local_name, context->get(idx));
}
}
- SetInternalReference(js_obj, entry, "code", func->shared());
}
}
@@ -1867,13 +1966,22 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
- SetPropertyReference(
- js_obj, entry, descs->GetKey(i), js_obj->FastPropertyAt(index));
+ if (index < js_obj->map()->inobject_properties()) {
+ SetPropertyReference(
+ js_obj, entry,
+ descs->GetKey(i), js_obj->InObjectPropertyAt(index),
+ js_obj->GetInObjectPropertyOffset(index));
+ } else {
+ SetPropertyReference(
+ js_obj, entry,
+ descs->GetKey(i), js_obj->FastPropertyAt(index));
+ }
break;
}
case CONSTANT_FUNCTION:
SetPropertyReference(
- js_obj, entry, descs->GetKey(i), descs->GetConstantFunction(i));
+ js_obj, entry,
+ descs->GetKey(i), descs->GetConstantFunction(i));
break;
default: ;
}
@@ -1933,14 +2041,15 @@ void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj,
int length = js_obj->GetInternalFieldCount();
for (int i = 0; i < length; ++i) {
Object* o = js_obj->GetInternalField(i);
- SetInternalReference(js_obj, entry, i, o);
+ SetInternalReference(
+ js_obj, entry, i, o, js_obj->GetInternalFieldOffset(i));
}
}
HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
if (!obj->IsHeapObject()) return NULL;
- return filler_->FindOrAddEntry(obj);
+ return filler_->FindOrAddEntry(obj, this);
}
@@ -1977,7 +2086,7 @@ bool V8HeapExplorer::IterateAndExtractReferences(
}
SetRootGcRootsReference();
RootsReferencesExtractor extractor(this);
- Heap::IterateRoots(&extractor, VISIT_ALL);
+ HEAP->IterateRoots(&extractor, VISIT_ALL);
filler_ = NULL;
return progress_->ProgressReport(false);
}
@@ -1992,10 +2101,9 @@ void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
parent_obj,
parent_entry,
- collection_->GetName(reference_name),
+ collection_->names()->GetName(reference_name),
child_obj,
child_entry);
- known_references_.Insert(child_obj);
}
}
@@ -2012,7 +2120,6 @@ void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
index,
child_obj,
child_entry);
- known_references_.Insert(child_obj);
}
}
@@ -2020,7 +2127,8 @@ void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
HeapEntry* parent_entry,
const char* reference_name,
- Object* child_obj) {
+ Object* child_obj,
+ int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetNamedReference(HeapGraphEdge::kInternal,
@@ -2029,7 +2137,7 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
reference_name,
child_obj,
child_entry);
- known_references_.Insert(child_obj);
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
}
@@ -2037,16 +2145,17 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
HeapEntry* parent_entry,
int index,
- Object* child_obj) {
+ Object* child_obj,
+ int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetNamedReference(HeapGraphEdge::kInternal,
parent_obj,
parent_entry,
- collection_->GetName(index),
+ collection_->names()->GetName(index),
child_obj,
child_entry);
- known_references_.Insert(child_obj);
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
}
@@ -2070,7 +2179,8 @@ void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
HeapEntry* parent_entry,
String* reference_name,
- Object* child_obj) {
+ Object* child_obj,
+ int field_offset) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
HeapGraphEdge::Type type = reference_name->length() > 0 ?
@@ -2078,25 +2188,24 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
filler_->SetNamedReference(type,
parent_obj,
parent_entry,
- collection_->GetName(reference_name),
+ collection_->names()->GetName(reference_name),
child_obj,
child_entry);
- known_references_.Insert(child_obj);
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
}
}
-void V8HeapExplorer::SetPropertyShortcutReference(
- HeapObject* parent_obj,
- HeapEntry* parent_entry,
- String* reference_name,
- Object* child_obj) {
+void V8HeapExplorer::SetPropertyShortcutReference(HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ String* reference_name,
+ Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetNamedReference(HeapGraphEdge::kShortcut,
parent_obj,
parent_entry,
- collection_->GetName(reference_name),
+ collection_->names()->GetName(reference_name),
child_obj,
child_entry);
}
@@ -2132,25 +2241,275 @@ void V8HeapExplorer::SetGcRootsReference(Object* child_obj) {
}
+class GlobalObjectsEnumerator : public ObjectVisitor {
+ public:
+ virtual void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsGlobalContext()) {
+ Context* context = Context::cast(*p);
+ JSObject* proxy = context->global_proxy();
+ if (proxy->IsJSGlobalProxy()) {
+ Object* global = proxy->map()->prototype();
+ if (global->IsJSGlobalObject()) {
+ objects_.Add(Handle<JSGlobalObject>(JSGlobalObject::cast(global)));
+ }
+ }
+ }
+ }
+ }
+ int count() { return objects_.length(); }
+ Handle<JSGlobalObject>& at(int i) { return objects_[i]; }
+
+ private:
+ List<Handle<JSGlobalObject> > objects_;
+};
+
+
+// Modifies heap. Must not be run during heap traversal.
+void V8HeapExplorer::TagGlobalObjects() {
+ Isolate* isolate = Isolate::Current();
+ GlobalObjectsEnumerator enumerator;
+ isolate->global_handles()->IterateAllRoots(&enumerator);
+ Handle<String> document_string =
+ isolate->factory()->NewStringFromAscii(CStrVector("document"));
+ Handle<String> url_string =
+ isolate->factory()->NewStringFromAscii(CStrVector("URL"));
+ const char** urls = NewArray<const char*>(enumerator.count());
+ for (int i = 0, l = enumerator.count(); i < l; ++i) {
+ urls[i] = NULL;
+ Handle<JSGlobalObject> global_obj = enumerator.at(i);
+ Object* obj_document;
+ if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
+ obj_document->IsJSObject()) {
+ JSObject* document = JSObject::cast(obj_document);
+ Object* obj_url;
+ if (document->GetProperty(*url_string)->ToObject(&obj_url) &&
+ obj_url->IsString()) {
+ urls[i] = collection_->names()->GetName(String::cast(obj_url));
+ }
+ }
+ }
+
+ AssertNoAllocation no_allocation;
+ for (int i = 0, l = enumerator.count(); i < l; ++i) {
+ objects_tags_.SetTag(*enumerator.at(i), urls[i]);
+ }
+
+ DeleteArray(urls);
+}
+
+
+class GlobalHandlesExtractor : public ObjectVisitor {
+ public:
+ explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
+ : explorer_(explorer) {}
+ virtual ~GlobalHandlesExtractor() {}
+ virtual void VisitPointers(Object** start, Object** end) {
+ UNREACHABLE();
+ }
+ virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {
+ explorer_->VisitSubtreeWrapper(p, class_id);
+ }
+ private:
+ NativeObjectsExplorer* explorer_;
+};
+
+HeapThing const NativeObjectsExplorer::kNativesRootObject =
+ reinterpret_cast<HeapThing>(
+ static_cast<intptr_t>(HeapObjectsMap::kNativesRootObjectId));
+
+
+NativeObjectsExplorer::NativeObjectsExplorer(
+ HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
+ : snapshot_(snapshot),
+ collection_(snapshot_->collection()),
+ progress_(progress),
+ embedder_queried_(false),
+ objects_by_info_(RetainedInfosMatch),
+ filler_(NULL) {
+}
+
+
+NativeObjectsExplorer::~NativeObjectsExplorer() {
+ for (HashMap::Entry* p = objects_by_info_.Start();
+ p != NULL;
+ p = objects_by_info_.Next(p)) {
+ v8::RetainedObjectInfo* info =
+ reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
+ info->Dispose();
+ List<HeapObject*>* objects =
+ reinterpret_cast<List<HeapObject*>* >(p->value);
+ delete objects;
+ }
+}
+
+
+HeapEntry* NativeObjectsExplorer::AllocateEntry(
+ HeapThing ptr, int children_count, int retainers_count) {
+ if (ptr == kNativesRootObject) {
+ return snapshot_->AddNativesRootEntry(children_count, retainers_count);
+ } else {
+ v8::RetainedObjectInfo* info =
+ reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
+ intptr_t elements = info->GetElementCount();
+ intptr_t size = info->GetSizeInBytes();
+ return snapshot_->AddEntry(
+ HeapEntry::kNative,
+ elements != -1 ?
+ collection_->names()->GetFormatted(
+ "%s / %" V8_PTR_PREFIX "d entries",
+ info->GetLabel(),
+ info->GetElementCount()) :
+ collection_->names()->GetCopy(info->GetLabel()),
+ HeapObjectsMap::GenerateId(info),
+ size != -1 ? static_cast<int>(size) : 0,
+ children_count,
+ retainers_count);
+ }
+}
+
+
+void NativeObjectsExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
+ if (EstimateObjectsCount() <= 0) return;
+ filler->AddEntry(kNativesRootObject, this);
+}
+
+
+int NativeObjectsExplorer::EstimateObjectsCount() {
+ FillRetainedObjects();
+ return objects_by_info_.occupancy();
+}
+
+
+void NativeObjectsExplorer::FillRetainedObjects() {
+ if (embedder_queried_) return;
+ Isolate* isolate = Isolate::Current();
+ // Record objects that are joined into ObjectGroups.
+ isolate->heap()->CallGlobalGCPrologueCallback();
+ List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
+ for (int i = 0; i < groups->length(); ++i) {
+ ObjectGroup* group = groups->at(i);
+ if (group->info_ == NULL) continue;
+ List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info_);
+ for (size_t j = 0; j < group->length_; ++j) {
+ HeapObject* obj = HeapObject::cast(*group->objects_[j]);
+ list->Add(obj);
+ in_groups_.Insert(obj);
+ }
+ group->info_ = NULL; // Acquire info object ownership.
+ }
+ isolate->global_handles()->RemoveObjectGroups();
+ isolate->heap()->CallGlobalGCEpilogueCallback();
+ // Record objects that are not in ObjectGroups, but have class ID.
+ GlobalHandlesExtractor extractor(this);
+ isolate->global_handles()->IterateAllRootsWithClassIds(&extractor);
+ embedder_queried_ = true;
+}
+
+
+List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
+ v8::RetainedObjectInfo* info) {
+ HashMap::Entry* entry =
+ objects_by_info_.Lookup(info, InfoHash(info), true);
+ if (entry->value != NULL) {
+ info->Dispose();
+ } else {
+ entry->value = new List<HeapObject*>(4);
+ }
+ return reinterpret_cast<List<HeapObject*>* >(entry->value);
+}
+
+
+bool NativeObjectsExplorer::IterateAndExtractReferences(
+ SnapshotFillerInterface* filler) {
+ if (EstimateObjectsCount() <= 0) return true;
+ filler_ = filler;
+ FillRetainedObjects();
+ for (HashMap::Entry* p = objects_by_info_.Start();
+ p != NULL;
+ p = objects_by_info_.Next(p)) {
+ v8::RetainedObjectInfo* info =
+ reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
+ SetNativeRootReference(info);
+ List<HeapObject*>* objects =
+ reinterpret_cast<List<HeapObject*>* >(p->value);
+ for (int i = 0; i < objects->length(); ++i) {
+ SetWrapperNativeReferences(objects->at(i), info);
+ }
+ }
+ SetRootNativesRootReference();
+ filler_ = NULL;
+ return true;
+}
+
+
+void NativeObjectsExplorer::SetNativeRootReference(
+ v8::RetainedObjectInfo* info) {
+ HeapEntry* child_entry = filler_->FindOrAddEntry(info, this);
+ ASSERT(child_entry != NULL);
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement,
+ kNativesRootObject, snapshot_->natives_root(),
+ info, child_entry);
+}
+
+
+void NativeObjectsExplorer::SetWrapperNativeReferences(
+ HeapObject* wrapper, v8::RetainedObjectInfo* info) {
+ HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
+ ASSERT(wrapper_entry != NULL);
+ HeapEntry* info_entry = filler_->FindOrAddEntry(info, this);
+ ASSERT(info_entry != NULL);
+ filler_->SetNamedReference(HeapGraphEdge::kInternal,
+ wrapper, wrapper_entry,
+ "native",
+ info, info_entry);
+ filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
+ info, info_entry,
+ wrapper, wrapper_entry);
+}
+
+
+void NativeObjectsExplorer::SetRootNativesRootReference() {
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement,
+ V8HeapExplorer::kInternalRootObject, snapshot_->root(),
+ kNativesRootObject, snapshot_->natives_root());
+}
+
+
+void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
+ if (in_groups_.Contains(*p)) return;
+ Isolate* isolate = Isolate::Current();
+ v8::RetainedObjectInfo* info =
+ isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
+ if (info == NULL) return;
+ GetListMaybeDisposeInfo(info)->Add(HeapObject::cast(*p));
+}
+
+
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
v8::ActivityControl* control)
: snapshot_(snapshot),
control_(control),
- v8_heap_explorer_(snapshot_, this) {
+ v8_heap_explorer_(snapshot_, this),
+ dom_explorer_(snapshot_, this) {
}
class SnapshotCounter : public SnapshotFillerInterface {
public:
- SnapshotCounter(HeapEntriesAllocator* allocator, HeapEntriesMap* entries)
- : allocator_(allocator), entries_(entries) { }
- HeapEntry* AddEntry(HeapThing ptr) {
- entries_->Pair(ptr, allocator_, HeapEntriesMap::kHeapEntryPlaceholder);
+ explicit SnapshotCounter(HeapEntriesMap* entries) : entries_(entries) { }
+ HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ entries_->Pair(ptr, allocator, HeapEntriesMap::kHeapEntryPlaceholder);
return HeapEntriesMap::kHeapEntryPlaceholder;
}
- HeapEntry* FindOrAddEntry(HeapThing ptr) {
- HeapEntry* entry = entries_->Map(ptr);
- return entry != NULL ? entry : AddEntry(ptr);
+ HeapEntry* FindEntry(HeapThing ptr) {
+ return entries_->Map(ptr);
+ }
+ HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = FindEntry(ptr);
+ return entry != NULL ? entry : AddEntry(ptr, allocator);
}
void SetIndexedReference(HeapGraphEdge::Type,
HeapThing parent_ptr,
@@ -2182,8 +2541,8 @@ class SnapshotCounter : public SnapshotFillerInterface {
HeapEntry*) {
entries_->CountReference(parent_ptr, child_ptr);
}
+
private:
- HeapEntriesAllocator* allocator_;
HeapEntriesMap* entries_;
};
@@ -2194,13 +2553,16 @@ class SnapshotFiller : public SnapshotFillerInterface {
: snapshot_(snapshot),
collection_(snapshot->collection()),
entries_(entries) { }
- HeapEntry* AddEntry(HeapThing ptr) {
+ HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
UNREACHABLE();
return NULL;
}
- HeapEntry* FindOrAddEntry(HeapThing ptr) {
- HeapEntry* entry = entries_->Map(ptr);
- return entry != NULL ? entry : AddEntry(ptr);
+ HeapEntry* FindEntry(HeapThing ptr) {
+ return entries_->Map(ptr);
+ }
+ HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = FindEntry(ptr);
+ return entry != NULL ? entry : AddEntry(ptr, allocator);
}
void SetIndexedReference(HeapGraphEdge::Type type,
HeapThing parent_ptr,
@@ -2247,10 +2609,11 @@ class SnapshotFiller : public SnapshotFillerInterface {
parent_ptr, child_ptr, &child_index, &retainer_index);
parent_entry->SetNamedReference(type,
child_index,
- collection_->GetName(child_index + 1),
+ collection_->names()->GetName(child_index + 1),
child_entry,
retainer_index);
}
+
private:
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
@@ -2259,6 +2622,8 @@ class SnapshotFiller : public SnapshotFillerInterface {
bool HeapSnapshotGenerator::GenerateSnapshot() {
+ v8_heap_explorer_.TagGlobalObjects();
+
AssertNoAllocation no_alloc;
SetProgressTotal(4); // 2 passes + dominators + sizes.
@@ -2303,21 +2668,28 @@ bool HeapSnapshotGenerator::ProgressReport(bool force) {
void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
if (control_ == NULL) return;
- progress_total_ = v8_heap_explorer_.EstimateObjectsCount() * iterations_count;
+ progress_total_ = (
+ v8_heap_explorer_.EstimateObjectsCount() +
+ dom_explorer_.EstimateObjectsCount()) * iterations_count;
progress_counter_ = 0;
}
bool HeapSnapshotGenerator::CountEntriesAndReferences() {
- SnapshotCounter counter(&v8_heap_explorer_, &entries_);
+ SnapshotCounter counter(&entries_);
v8_heap_explorer_.AddRootEntries(&counter);
- return v8_heap_explorer_.IterateAndExtractReferences(&counter);
+ dom_explorer_.AddRootEntries(&counter);
+ return
+ v8_heap_explorer_.IterateAndExtractReferences(&counter) &&
+ dom_explorer_.IterateAndExtractReferences(&counter);
}
bool HeapSnapshotGenerator::FillReferences() {
SnapshotFiller filler(snapshot_, &entries_);
- return v8_heap_explorer_.IterateAndExtractReferences(&filler);
+ return
+ v8_heap_explorer_.IterateAndExtractReferences(&filler) &&
+ dom_explorer_.IterateAndExtractReferences(&filler);
}
@@ -2447,83 +2819,6 @@ bool HeapSnapshotGenerator::ApproximateRetainedSizes() {
}
-void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) {
- raw_additions_root_ =
- NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0));
- additions_root()->Init(
- snapshot2_, HeapEntry::kHidden, "", 0, 0, additions_count, 0);
- raw_deletions_root_ =
- NewArray<char>(HeapEntry::EntriesSize(1, deletions_count, 0));
- deletions_root()->Init(
- snapshot1_, HeapEntry::kHidden, "", 0, 0, deletions_count, 0);
-}
-
-
-static void DeleteHeapSnapshotsDiff(HeapSnapshotsDiff** diff_ptr) {
- delete *diff_ptr;
-}
-
-HeapSnapshotsComparator::~HeapSnapshotsComparator() {
- diffs_.Iterate(DeleteHeapSnapshotsDiff);
-}
-
-
-HeapSnapshotsDiff* HeapSnapshotsComparator::Compare(HeapSnapshot* snapshot1,
- HeapSnapshot* snapshot2) {
- snapshot1->ClearPaint();
- snapshot1->root()->PaintAllReachable();
- snapshot2->ClearPaint();
- snapshot2->root()->PaintAllReachable();
-
- List<HeapEntry*>* entries1 = snapshot1->GetSortedEntriesList();
- List<HeapEntry*>* entries2 = snapshot2->GetSortedEntriesList();
- int i = 0, j = 0;
- List<HeapEntry*> added_entries, deleted_entries;
- while (i < entries1->length() && j < entries2->length()) {
- uint64_t id1 = entries1->at(i)->id();
- uint64_t id2 = entries2->at(j)->id();
- if (id1 == id2) {
- HeapEntry* entry1 = entries1->at(i++);
- HeapEntry* entry2 = entries2->at(j++);
- if (entry1->painted_reachable() != entry2->painted_reachable()) {
- if (entry1->painted_reachable())
- deleted_entries.Add(entry1);
- else
- added_entries.Add(entry2);
- }
- } else if (id1 < id2) {
- HeapEntry* entry = entries1->at(i++);
- deleted_entries.Add(entry);
- } else {
- HeapEntry* entry = entries2->at(j++);
- added_entries.Add(entry);
- }
- }
- while (i < entries1->length()) {
- HeapEntry* entry = entries1->at(i++);
- deleted_entries.Add(entry);
- }
- while (j < entries2->length()) {
- HeapEntry* entry = entries2->at(j++);
- added_entries.Add(entry);
- }
-
- HeapSnapshotsDiff* diff = new HeapSnapshotsDiff(snapshot1, snapshot2);
- diffs_.Add(diff);
- diff->CreateRoots(added_entries.length(), deleted_entries.length());
-
- for (int i = 0; i < deleted_entries.length(); ++i) {
- HeapEntry* entry = deleted_entries[i];
- diff->AddDeletedEntry(i, i + 1, entry);
- }
- for (int i = 0; i < added_entries.length(); ++i) {
- HeapEntry* entry = added_entries[i];
- diff->AddAddedEntry(i, i + 1, entry);
- }
- return diff;
-}
-
-
class OutputStreamWriter {
public:
explicit OutputStreamWriter(v8::OutputStream* stream)
@@ -2735,7 +3030,8 @@ void HeapSnapshotJSONSerializer::SerializeNodes() {
"," JSON_S("code")
"," JSON_S("closure")
"," JSON_S("regexp")
- "," JSON_S("number"))
+ "," JSON_S("number")
+ "," JSON_S("native"))
"," JSON_S("string")
"," JSON_S("number")
"," JSON_S("number")
@@ -2890,7 +3186,7 @@ void HeapSnapshotJSONSerializer::SortHashMap(
String* GetConstructorNameForHeapProfile(JSObject* object) {
- if (object->IsJSFunction()) return Heap::closure_symbol();
+ if (object->IsJSFunction()) return HEAP->closure_symbol();
return object->constructor_name();
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 4762eb634..6343d057c 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -30,6 +30,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
+#include "allocation.h"
#include "hashmap.h"
#include "../include/v8-profiler.h"
@@ -66,6 +67,9 @@ class StringsStorage {
StringsStorage();
~StringsStorage();
+ const char* GetCopy(const char* src);
+ const char* GetFormatted(const char* format, ...);
+ const char* GetVFormatted(const char* format, va_list args);
const char* GetName(String* name);
const char* GetName(int index);
inline const char* GetFunctionName(String* name);
@@ -76,11 +80,10 @@ class StringsStorage {
return strcmp(reinterpret_cast<char*>(key1),
reinterpret_cast<char*>(key2)) == 0;
}
+ const char* AddOrDisposeString(char* str, uint32_t hash);
// Mapping of strings by String::Hash to const char* strings.
HashMap names_;
- // Mapping from ints to char* strings.
- List<char*> index_names_;
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
};
@@ -112,7 +115,7 @@ class CodeEntry {
uint32_t GetCallUid() const;
bool IsSameAs(CodeEntry* entry) const;
- static const char* kEmptyNamePrefix;
+ static const char* const kEmptyNamePrefix;
private:
Logger::LogEventsAndTags tag_;
@@ -236,12 +239,12 @@ class CpuProfile {
class CodeMap {
public:
- CodeMap() : next_sfi_tag_(1) { }
+ CodeMap() : next_shared_id_(1) { }
INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size));
INLINE(void MoveCode(Address from, Address to));
INLINE(void DeleteCode(Address addr));
CodeEntry* FindEntry(Address addr);
- int GetSFITag(Address addr);
+ int GetSharedId(Address addr);
void Print();
@@ -269,11 +272,11 @@ class CodeMap {
void Call(const Address& key, const CodeEntryInfo& value);
};
- // Fake CodeEntry pointer to distinguish SFI entries.
- static CodeEntry* const kSfiCodeEntry;
+ // Fake CodeEntry pointer to distinguish shared function entries.
+ static CodeEntry* const kSharedFunctionCodeEntry;
CodeTree tree_;
- int next_sfi_tag_;
+ int next_shared_id_;
DISALLOW_COPY_AND_ASSIGN(CodeMap);
};
@@ -298,6 +301,8 @@ class CpuProfilesCollection {
}
CpuProfile* GetProfile(int security_token_id, unsigned uid);
bool IsLastProfile(const char* title);
+ void RemoveProfile(CpuProfile* profile);
+ bool HasDetachedProfiles() { return detached_profiles_.length() > 0; }
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
String* name, String* resource_name, int line_number);
@@ -320,6 +325,7 @@ class CpuProfilesCollection {
const char* GetFunctionName(const char* name) {
return function_and_resource_names_.GetFunctionName(name);
}
+ int GetProfileIndex(unsigned uid);
List<CpuProfile*>* GetProfilesList(int security_token_id);
int TokenToIndex(int security_token_id);
@@ -333,6 +339,7 @@ class CpuProfilesCollection {
// Mapping from profiles' uids to indexes in the second nested list
// of profiles_by_token_.
HashMap profiles_uids_;
+ List<CpuProfile*> detached_profiles_;
// Accessed by VM thread and profile generator thread.
List<CpuProfile*> current_profiles_;
@@ -420,9 +427,9 @@ class ProfileGenerator {
return sample_rate_calc_.ticks_per_ms();
}
- static const char* kAnonymousFunctionName;
- static const char* kProgramEntryName;
- static const char* kGarbageCollectorEntryName;
+ static const char* const kAnonymousFunctionName;
+ static const char* const kProgramEntryName;
+ static const char* const kGarbageCollectorEntryName;
private:
INLINE(CodeEntry* EntryForVMState(StateTag tag));
@@ -484,8 +491,6 @@ class HeapGraphEdge BASE_EMBEDDED {
};
-class CachedHeapGraphPath;
-class HeapGraphPath;
class HeapSnapshot;
// HeapEntry instances represent an entity from the heap (or a special
@@ -517,7 +522,8 @@ class HeapEntry BASE_EMBEDDED {
kCode = v8::HeapGraphNode::kCode,
kClosure = v8::HeapGraphNode::kClosure,
kRegExp = v8::HeapGraphNode::kRegExp,
- kHeapNumber = v8::HeapGraphNode::kHeapNumber
+ kHeapNumber = v8::HeapGraphNode::kHeapNumber,
+ kNative = v8::HeapGraphNode::kNative
};
HeapEntry() { }
@@ -544,7 +550,6 @@ class HeapEntry BASE_EMBEDDED {
return Vector<HeapGraphEdge>(children_arr(), children_count_); }
Vector<HeapGraphEdge*> retainers() {
return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
- List<HeapGraphPath*>* GetRetainingPaths();
HeapEntry* dominator() { return dominator_; }
void set_dominator(HeapEntry* entry) { dominator_ = entry; }
@@ -578,18 +583,12 @@ class HeapEntry BASE_EMBEDDED {
int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
int RetainedSize(bool exact);
- List<HeapGraphPath*>* CalculateRetainingPaths();
void Print(int max_depth, int indent);
static int EntriesSize(int entries_count,
int children_count,
int retainers_count);
- static uint32_t Hash(HeapEntry* entry) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(entry)));
- }
- static bool Match(void* entry1, void* entry2) { return entry1 == entry2; }
private:
HeapGraphEdge* children_arr() {
@@ -599,13 +598,11 @@ class HeapEntry BASE_EMBEDDED {
return reinterpret_cast<HeapGraphEdge**>(children_arr() + children_count_);
}
void CalculateExactRetainedSize();
- void FindRetainingPaths(CachedHeapGraphPath* prev_path,
- List<HeapGraphPath*>* retaining_paths);
const char* TypeAsString();
unsigned painted_: 2;
- unsigned type_: 3;
- int children_count_: 27;
+ unsigned type_: 4;
+ int children_count_: 26;
int retainers_count_;
int self_size_;
union {
@@ -631,27 +628,7 @@ class HeapEntry BASE_EMBEDDED {
};
-class HeapGraphPath {
- public:
- HeapGraphPath()
- : path_(8) { }
- explicit HeapGraphPath(const List<HeapGraphEdge*>& path);
-
- void Add(HeapGraphEdge* edge) { path_.Add(edge); }
- void Set(int index, HeapGraphEdge* edge) { path_[index] = edge; }
- const List<HeapGraphEdge*>* path() { return &path_; }
-
- void Print();
-
- private:
- List<HeapGraphEdge*> path_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapGraphPath);
-};
-
-
class HeapSnapshotsCollection;
-class HeapSnapshotsDiff;
// HeapSnapshot represents a single heap snapshot. It is stored in
// HeapSnapshotsCollection, which is also a factory for
@@ -661,8 +638,7 @@ class HeapSnapshotsDiff;
class HeapSnapshot {
public:
enum Type {
- kFull = v8::HeapSnapshot::kFull,
- kAggregated = v8::HeapSnapshot::kAggregated
+ kFull = v8::HeapSnapshot::kFull
};
HeapSnapshot(HeapSnapshotsCollection* collection,
@@ -670,6 +646,7 @@ class HeapSnapshot {
const char* title,
unsigned uid);
~HeapSnapshot();
+ void Delete();
HeapSnapshotsCollection* collection() { return collection_; }
Type type() { return type_; }
@@ -677,6 +654,7 @@ class HeapSnapshot {
unsigned uid() { return uid_; }
HeapEntry* root() { return root_entry_; }
HeapEntry* gc_roots() { return gc_roots_entry_; }
+ HeapEntry* natives_root() { return natives_root_entry_; }
List<HeapEntry*>* entries() { return &entries_; }
void AllocateEntries(
@@ -689,10 +667,9 @@ class HeapSnapshot {
int retainers_count);
HeapEntry* AddRootEntry(int children_count);
HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
+ HeapEntry* AddNativesRootEntry(int children_count, int retainers_count);
void ClearPaint();
- HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
HeapEntry* GetEntryById(uint64_t id);
- List<HeapGraphPath*>* GetRetainingPaths(HeapEntry* entry);
List<HeapEntry*>* GetSortedEntriesList();
template<class Visitor>
void IterateEntries(Visitor* visitor) { entries_.Iterate(visitor); }
@@ -710,10 +687,10 @@ class HeapSnapshot {
unsigned uid_;
HeapEntry* root_entry_;
HeapEntry* gc_roots_entry_;
+ HeapEntry* natives_root_entry_;
char* raw_entries_;
List<HeapEntry*> entries_;
bool entries_sorted_;
- HashMap retaining_paths_;
#ifdef DEBUG
int raw_entries_size_;
#endif
@@ -733,8 +710,11 @@ class HeapObjectsMap {
uint64_t FindObject(Address addr);
void MoveObject(Address from, Address to);
+ static uint64_t GenerateId(v8::RetainedObjectInfo* info);
+
static const uint64_t kInternalRootObjectId;
static const uint64_t kGcRootsObjectId;
+ static const uint64_t kNativesRootObjectId;
static const uint64_t kFirstAvailableObjectId;
private:
@@ -767,58 +747,6 @@ class HeapObjectsMap {
};
-class HeapSnapshotsDiff {
- public:
- HeapSnapshotsDiff(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2)
- : snapshot1_(snapshot1),
- snapshot2_(snapshot2),
- raw_additions_root_(NULL),
- raw_deletions_root_(NULL) { }
-
- ~HeapSnapshotsDiff() {
- DeleteArray(raw_deletions_root_);
- DeleteArray(raw_additions_root_);
- }
-
- void AddAddedEntry(int child_index, int index, HeapEntry* entry) {
- additions_root()->SetUnidirElementReference(child_index, index, entry);
- }
-
- void AddDeletedEntry(int child_index, int index, HeapEntry* entry) {
- deletions_root()->SetUnidirElementReference(child_index, index, entry);
- }
-
- void CreateRoots(int additions_count, int deletions_count);
-
- HeapEntry* additions_root() {
- return reinterpret_cast<HeapEntry*>(raw_additions_root_);
- }
- HeapEntry* deletions_root() {
- return reinterpret_cast<HeapEntry*>(raw_deletions_root_);
- }
-
- private:
- HeapSnapshot* snapshot1_;
- HeapSnapshot* snapshot2_;
- char* raw_additions_root_;
- char* raw_deletions_root_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsDiff);
-};
-
-
-class HeapSnapshotsComparator {
- public:
- HeapSnapshotsComparator() { }
- ~HeapSnapshotsComparator();
- HeapSnapshotsDiff* Compare(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2);
- private:
- List<HeapSnapshotsDiff*> diffs_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsComparator);
-};
-
-
class HeapSnapshotsCollection {
public:
HeapSnapshotsCollection();
@@ -831,21 +759,14 @@ class HeapSnapshotsCollection {
void SnapshotGenerationFinished(HeapSnapshot* snapshot);
List<HeapSnapshot*>* snapshots() { return &snapshots_; }
HeapSnapshot* GetSnapshot(unsigned uid);
+ void RemoveSnapshot(HeapSnapshot* snapshot);
- const char* GetName(String* name) { return names_.GetName(name); }
- const char* GetName(int index) { return names_.GetName(index); }
- const char* GetFunctionName(String* name) {
- return names_.GetFunctionName(name);
- }
-
+ StringsStorage* names() { return &names_; }
TokenEnumerator* token_enumerator() { return token_enumerator_; }
uint64_t GetObjectId(Address addr) { return ids_.FindObject(addr); }
void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
- HeapSnapshotsDiff* CompareSnapshots(HeapSnapshot* snapshot1,
- HeapSnapshot* snapshot2);
-
private:
INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
return key1 == key2;
@@ -859,7 +780,6 @@ class HeapSnapshotsCollection {
TokenEnumerator* token_enumerator_;
// Mapping from HeapObject addresses to objects' uids.
HeapObjectsMap ids_;
- HeapSnapshotsComparator comparator_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
};
@@ -938,6 +858,8 @@ class HeapObjectsSet {
void Clear();
bool Contains(Object* object);
void Insert(Object* obj);
+ const char* GetTag(Object* obj);
+ void SetTag(Object* obj, const char* tag);
private:
HashMap entries_;
@@ -950,8 +872,11 @@ class HeapObjectsSet {
class SnapshotFillerInterface {
public:
virtual ~SnapshotFillerInterface() { }
- virtual HeapEntry* AddEntry(HeapThing ptr) = 0;
- virtual HeapEntry* FindOrAddEntry(HeapThing ptr) = 0;
+ virtual HeapEntry* AddEntry(HeapThing ptr,
+ HeapEntriesAllocator* allocator) = 0;
+ virtual HeapEntry* FindEntry(HeapThing ptr) = 0;
+ virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
+ HeapEntriesAllocator* allocator) = 0;
virtual void SetIndexedReference(HeapGraphEdge::Type type,
HeapThing parent_ptr,
HeapEntry* parent_entry,
@@ -990,12 +915,15 @@ class V8HeapExplorer : public HeapEntriesAllocator {
public:
V8HeapExplorer(HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress);
- ~V8HeapExplorer();
+ virtual ~V8HeapExplorer();
virtual HeapEntry* AllocateEntry(
HeapThing ptr, int children_count, int retainers_count);
void AddRootEntries(SnapshotFillerInterface* filler);
int EstimateObjectsCount();
bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+ void TagGlobalObjects();
+
+ static HeapObject* const kInternalRootObject;
private:
HeapEntry* AddEntry(
@@ -1005,6 +933,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
const char* name,
int children_count,
int retainers_count);
+ const char* GetSystemEntryName(HeapObject* object);
void ExtractReferences(HeapObject* obj);
void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
@@ -1021,11 +950,13 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void SetInternalReference(HeapObject* parent_obj,
HeapEntry* parent,
const char* reference_name,
- Object* child);
+ Object* child,
+ int field_offset = -1);
void SetInternalReference(HeapObject* parent_obj,
HeapEntry* parent,
int index,
- Object* child);
+ Object* child,
+ int field_offset = -1);
void SetHiddenReference(HeapObject* parent_obj,
HeapEntry* parent,
int index,
@@ -1033,7 +964,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void SetPropertyReference(HeapObject* parent_obj,
HeapEntry* parent,
String* reference_name,
- Object* child);
+ Object* child,
+ int field_offset = -1);
void SetPropertyShortcutReference(HeapObject* parent_obj,
HeapEntry* parent,
String* reference_name,
@@ -1047,12 +979,9 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
SnapshottingProgressReportingInterface* progress_;
- // Used during references extraction to mark heap objects that
- // are references via non-hidden properties.
- HeapObjectsSet known_references_;
SnapshotFillerInterface* filler_;
+ HeapObjectsSet objects_tags_;
- static HeapObject* const kInternalRootObject;
static HeapObject* const kGcRootsObject;
friend class IndexedReferencesExtractor;
@@ -1062,6 +991,54 @@ class V8HeapExplorer : public HeapEntriesAllocator {
};
+// An implementation of retained native objects extractor.
+class NativeObjectsExplorer : public HeapEntriesAllocator {
+ public:
+ NativeObjectsExplorer(HeapSnapshot* snapshot,
+ SnapshottingProgressReportingInterface* progress);
+ virtual ~NativeObjectsExplorer();
+ virtual HeapEntry* AllocateEntry(
+ HeapThing ptr, int children_count, int retainers_count);
+ void AddRootEntries(SnapshotFillerInterface* filler);
+ int EstimateObjectsCount();
+ bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+
+ private:
+ void FillRetainedObjects();
+ List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
+ void SetNativeRootReference(v8::RetainedObjectInfo* info);
+ void SetRootNativesRootReference();
+ void SetWrapperNativeReferences(HeapObject* wrapper,
+ v8::RetainedObjectInfo* info);
+ void VisitSubtreeWrapper(Object** p, uint16_t class_id);
+
+ static uint32_t InfoHash(v8::RetainedObjectInfo* info) {
+ return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()));
+ }
+ static bool RetainedInfosMatch(void* key1, void* key2) {
+ return key1 == key2 ||
+ (reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent(
+ reinterpret_cast<v8::RetainedObjectInfo*>(key2));
+ }
+
+ HeapSnapshot* snapshot_;
+ HeapSnapshotsCollection* collection_;
+ SnapshottingProgressReportingInterface* progress_;
+ bool embedder_queried_;
+ HeapObjectsSet in_groups_;
+ // RetainedObjectInfo* -> List<HeapObject*>*
+ HashMap objects_by_info_;
+ // Used during references extraction.
+ SnapshotFillerInterface* filler_;
+
+ static HeapThing const kNativesRootObject;
+
+ friend class GlobalHandlesExtractor;
+
+ DISALLOW_COPY_AND_ASSIGN(NativeObjectsExplorer);
+};
+
+
class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
public:
HeapSnapshotGenerator(HeapSnapshot* snapshot,
@@ -1083,6 +1060,7 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
HeapSnapshot* snapshot_;
v8::ActivityControl* control_;
V8HeapExplorer v8_heap_explorer_;
+ NativeObjectsExplorer dom_explorer_;
// Mapping from HeapThing pointers to HeapEntry* pointers.
HeapEntriesMap entries_;
// Used during snapshot generation.
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 96774333e..dd232093b 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -52,6 +52,12 @@ void LookupResult::Print(FILE* out) {
GetTransitionMap()->Print(out);
PrintF(out, "\n");
break;
+ case EXTERNAL_ARRAY_TRANSITION:
+ PrintF(out, " -type = external array transition\n");
+ PrintF(out, " -map:\n");
+ GetTransitionMap()->Print(out);
+ PrintF(out, "\n");
+ break;
case CONSTANT_FUNCTION:
PrintF(out, " -type = constant function\n");
PrintF(out, " -function:\n");
@@ -68,6 +74,9 @@ void LookupResult::Print(FILE* out) {
PrintF(out, " -callback object:\n");
GetCallbackObject()->Print(out);
break;
+ case HANDLER:
+ PrintF(out, " -type = lookup proxy\n");
+ break;
case INTERCEPTOR:
PrintF(out, " -type = lookup interceptor\n");
break;
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index c39fe41e7..87f9ea3d5 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -28,6 +28,8 @@
#ifndef V8_PROPERTY_H_
#define V8_PROPERTY_H_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
@@ -48,7 +50,7 @@ class Descriptor BASE_EMBEDDED {
MUST_USE_RESULT MaybeObject* KeyToSymbol() {
if (!StringShape(key_).IsSymbol()) {
Object* result;
- { MaybeObject* maybe_result = Heap::LookupSymbol(key_);
+ { MaybeObject* maybe_result = HEAP->LookupSymbol(key_);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
key_ = String::cast(result);
@@ -110,6 +112,16 @@ class MapTransitionDescriptor: public Descriptor {
: Descriptor(key, map, attributes, MAP_TRANSITION) { }
};
+class ExternalArrayTransitionDescriptor: public Descriptor {
+ public:
+ ExternalArrayTransitionDescriptor(String* key,
+ Map* map,
+ ExternalArrayType array_type)
+ : Descriptor(key, map, PropertyDetails(NONE,
+ EXTERNAL_ARRAY_TRANSITION,
+ array_type)) { }
+};
+
// Marks a field name in a map so that adding the field is guaranteed
// to create a FIELD descriptor in the new map. Used after adding
// a constant function the first time, creating a CONSTANT_FUNCTION
@@ -145,24 +157,15 @@ class ConstantFunctionDescriptor: public Descriptor {
class CallbacksDescriptor: public Descriptor {
public:
CallbacksDescriptor(String* key,
- Object* proxy,
+ Object* foreign,
PropertyAttributes attributes,
int index = 0)
- : Descriptor(key, proxy, attributes, CALLBACKS, index) {}
+ : Descriptor(key, foreign, attributes, CALLBACKS, index) {}
};
class LookupResult BASE_EMBEDDED {
public:
- // Where did we find the result;
- enum {
- NOT_FOUND,
- DESCRIPTOR_TYPE,
- DICTIONARY_TYPE,
- INTERCEPTOR_TYPE,
- CONSTANT_TYPE
- } lookup_type_;
-
LookupResult()
: lookup_type_(NOT_FOUND),
cacheable_(true),
@@ -175,6 +178,13 @@ class LookupResult BASE_EMBEDDED {
number_ = number;
}
+ void DescriptorResult(JSObject* holder, Smi* details, int number) {
+ lookup_type_ = DESCRIPTOR_TYPE;
+ holder_ = holder;
+ details_ = PropertyDetails(details);
+ number_ = number;
+ }
+
void ConstantResult(JSObject* holder) {
lookup_type_ = CONSTANT_TYPE;
holder_ = holder;
@@ -192,6 +202,12 @@ class LookupResult BASE_EMBEDDED {
number_ = entry;
}
+ void HandlerResult() {
+ lookup_type_ = HANDLER_TYPE;
+ holder_ = NULL;
+ details_ = PropertyDetails(NONE, HANDLER);
+ }
+
void InterceptorResult(JSObject* holder) {
lookup_type_ = INTERCEPTOR_TYPE;
holder_ = holder;
@@ -226,6 +242,7 @@ class LookupResult BASE_EMBEDDED {
bool IsDontEnum() { return details_.IsDontEnum(); }
bool IsDeleted() { return details_.IsDeleted(); }
bool IsFound() { return lookup_type_ != NOT_FOUND; }
+ bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
// Is the result is a property excluding transitions and the null
// descriptor?
@@ -262,7 +279,8 @@ class LookupResult BASE_EMBEDDED {
Map* GetTransitionMap() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == MAP_TRANSITION || type() == CONSTANT_TRANSITION);
+ ASSERT(type() == MAP_TRANSITION || type() == CONSTANT_TRANSITION ||
+ type() == EXTERNAL_ARRAY_TRANSITION);
return Map::cast(GetValue());
}
@@ -305,7 +323,7 @@ class LookupResult BASE_EMBEDDED {
Object* GetCallbackObject() {
if (lookup_type_ == CONSTANT_TYPE) {
// For now we only have the __proto__ as constant type.
- return Heap::prototype_accessors();
+ return HEAP->prototype_accessors();
}
return GetValue();
}
@@ -325,6 +343,16 @@ class LookupResult BASE_EMBEDDED {
}
private:
+ // Where did we find the result;
+ enum {
+ NOT_FOUND,
+ DESCRIPTOR_TYPE,
+ DICTIONARY_TYPE,
+ HANDLER_TYPE,
+ INTERCEPTOR_TYPE,
+ CONSTANT_TYPE
+ } lookup_type_;
+
JSObject* holder_;
int number_;
bool cacheable_;
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
new file mode 100644
index 000000000..cb9c020e3
--- /dev/null
+++ b/deps/v8/src/proxy.js
@@ -0,0 +1,137 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+global.Proxy = new $Object();
+
+var $Proxy = global.Proxy
+
+var fundamentalTraps = [
+ "getOwnPropertyDescriptor",
+ "getPropertyDescriptor",
+ "getOwnPropertyNames",
+ "getPropertyNames",
+ "defineProperty",
+ "delete",
+ "fix",
+]
+
+var derivedTraps = [
+ "has",
+ "hasOwn",
+ "get",
+ "set",
+ "enumerate",
+ "keys",
+]
+
+var functionTraps = [
+ "callTrap",
+ "constructTrap",
+]
+
+$Proxy.createFunction = function(handler, callTrap, constructTrap) {
+ handler.callTrap = callTrap
+ handler.constructTrap = constructTrap
+ $Proxy.create(handler)
+}
+
+$Proxy.create = function(handler, proto) {
+ if (!IS_SPEC_OBJECT(handler))
+ throw MakeTypeError("handler_non_object", ["create"])
+ if (!IS_SPEC_OBJECT(proto)) proto = null // Mozilla does this...
+ return %CreateJSProxy(handler, proto)
+}
+
+
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Builtins
+////////////////////////////////////////////////////////////////////////////////
+
+function DerivedGetTrap(receiver, name) {
+ var desc = this.getPropertyDescriptor(name)
+ if (IS_UNDEFINED(desc)) { return desc }
+ if ('value' in desc) {
+ return desc.value
+ } else {
+ if (IS_UNDEFINED(desc.get)) { return desc.get }
+ // The proposal says: desc.get.call(receiver)
+ return %_CallFunction(receiver, desc.get)
+ }
+}
+
+function DerivedSetTrap(receiver, name, val) {
+ var desc = this.getOwnPropertyDescriptor(name)
+ if (desc) {
+ if ('writable' in desc) {
+ if (desc.writable) {
+ desc.value = val
+ this.defineProperty(name, desc)
+ return true
+ } else {
+ return false
+ }
+ } else { // accessor
+ if (desc.set) {
+ // The proposal says: desc.set.call(receiver, val)
+ %_CallFunction(receiver, val, desc.set)
+ return true
+ } else {
+ return false
+ }
+ }
+ }
+ desc = this.getPropertyDescriptor(name)
+ if (desc) {
+ if ('writable' in desc) {
+ if (desc.writable) {
+ // fall through
+ } else {
+ return false
+ }
+ } else { // accessor
+ if (desc.set) {
+ // The proposal says: desc.set.call(receiver, val)
+ %_CallFunction(receiver, val, desc.set)
+ return true
+ } else {
+ return false
+ }
+ }
+ }
+ this.defineProperty(name, {
+ value: val,
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ return true;
+}
+
+function DerivedHasTrap(name) {
+ return !!this.getPropertyDescriptor(name)
+}
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp-macro-assembler-irregexp.cc
index 6fbb14add..322efa136 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.cc
@@ -435,10 +435,11 @@ void RegExpMacroAssemblerIrregexp::IfRegisterEqPos(int register_index,
}
-Handle<Object> RegExpMacroAssemblerIrregexp::GetCode(Handle<String> source) {
+Handle<HeapObject> RegExpMacroAssemblerIrregexp::GetCode(
+ Handle<String> source) {
Bind(&backtrack_);
Emit(BC_POP_BT, 0);
- Handle<ByteArray> array = Factory::NewByteArray(length());
+ Handle<ByteArray> array = FACTORY->NewByteArray(length());
Copy(array->GetDataStartAddress());
return array;
}
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp-macro-assembler-irregexp.h
index 9deea86f4..75cf8bf97 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.h
@@ -106,7 +106,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
virtual void IfRegisterEqPos(int register_index, Label* if_eq);
virtual IrregexpImplementation Implementation();
- virtual Handle<Object> GetCode(Handle<String> source);
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
private:
void Expand();
// Code and bitmap emission.
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index fa2c65790..b32d71dba 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -365,7 +365,7 @@ RegExpMacroAssembler::IrregexpImplementation
}
-Handle<Object> RegExpMacroAssemblerTracer::GetCode(Handle<String> source) {
+Handle<HeapObject> RegExpMacroAssemblerTracer::GetCode(Handle<String> source) {
PrintF(" GetCode(%s);\n", *(source->ToCString()));
return assembler_->GetCode(source);
}
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp-macro-assembler-tracer.h
index 1fb6d5442..8c6cf3ab6 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp-macro-assembler-tracer.h
@@ -71,7 +71,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
- virtual Handle<Object> GetCode(Handle<String> source);
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index 51f4015f6..55782431b 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -35,7 +35,7 @@
namespace v8 {
namespace internal {
-RegExpMacroAssembler::RegExpMacroAssembler() {
+RegExpMacroAssembler::RegExpMacroAssembler() : slow_safe_compiler_(false) {
}
@@ -54,7 +54,8 @@ bool RegExpMacroAssembler::CanReadUnaligned() {
#ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
-NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() {
+NativeRegExpMacroAssembler::NativeRegExpMacroAssembler()
+ : RegExpMacroAssembler() {
}
@@ -64,7 +65,7 @@ NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() {
bool NativeRegExpMacroAssembler::CanReadUnaligned() {
#ifdef V8_TARGET_CAN_READ_UNALIGNED
- return true;
+ return !slow_safe();
#else
return false;
#endif
@@ -105,7 +106,8 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
Handle<String> subject,
int* offsets_vector,
int offsets_vector_length,
- int previous_index) {
+ int previous_index,
+ Isolate* isolate) {
ASSERT(subject->IsFlat());
ASSERT(previous_index >= 0);
@@ -142,7 +144,8 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
start_offset,
input_start,
input_end,
- offsets_vector);
+ offsets_vector,
+ isolate);
return res;
}
@@ -153,10 +156,12 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
int start_offset,
const byte* input_start,
const byte* input_end,
- int* output) {
+ int* output,
+ Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
// Ensure that the minimum stack has been allocated.
- RegExpStack stack;
- Address stack_base = RegExpStack::stack_base();
+ RegExpStackScope stack_scope(isolate);
+ Address stack_base = stack_scope.stack()->stack_base();
int direct_call = 0;
int result = CALL_GENERATED_REGEXP_CODE(code->entry(),
@@ -166,23 +171,21 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
input_end,
output,
stack_base,
- direct_call);
+ direct_call,
+ isolate);
ASSERT(result <= SUCCESS);
ASSERT(result >= RETRY);
- if (result == EXCEPTION && !Top::has_pending_exception()) {
+ if (result == EXCEPTION && !isolate->has_pending_exception()) {
// We detected a stack overflow (on the backtrack stack) in RegExp code,
// but haven't created the exception yet.
- Top::StackOverflow();
+ isolate->StackOverflow();
}
return static_cast<Result>(result);
}
-static unibrow::Mapping<unibrow::Ecma262Canonicalize> canonicalize;
-
-
-byte NativeRegExpMacroAssembler::word_character_map[] = {
+const byte NativeRegExpMacroAssembler::word_character_map[] = {
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
@@ -208,7 +211,11 @@ byte NativeRegExpMacroAssembler::word_character_map[] = {
int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
Address byte_offset1,
Address byte_offset2,
- size_t byte_length) {
+ size_t byte_length,
+ Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
+ isolate->regexp_macro_assembler_canonicalize();
// This function is not allowed to cause a garbage collection.
// A GC might move the calling generated code and invalidate the
// return address on the stack.
@@ -222,10 +229,10 @@ int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
unibrow::uchar c2 = substring2[i];
if (c1 != c2) {
unibrow::uchar s1[1] = { c1 };
- canonicalize.get(c1, '\0', s1);
+ canonicalize->get(c1, '\0', s1);
if (s1[0] != c2) {
unibrow::uchar s2[1] = { c2 };
- canonicalize.get(c2, '\0', s2);
+ canonicalize->get(c2, '\0', s2);
if (s1[0] != s2[0]) {
return 0;
}
@@ -237,13 +244,16 @@ int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
- Address* stack_base) {
- size_t size = RegExpStack::stack_capacity();
- Address old_stack_base = RegExpStack::stack_base();
+ Address* stack_base,
+ Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ RegExpStack* regexp_stack = isolate->regexp_stack();
+ size_t size = regexp_stack->stack_capacity();
+ Address old_stack_base = regexp_stack->stack_base();
ASSERT(old_stack_base == *stack_base);
ASSERT(stack_pointer <= old_stack_base);
ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
- Address new_stack_base = RegExpStack::EnsureCapacity(size * 2);
+ Address new_stack_base = regexp_stack->EnsureCapacity(size * 2);
if (new_stack_base == NULL) {
return NULL;
}
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index ef85d27e5..0314c707c 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -48,6 +48,7 @@ class RegExpMacroAssembler {
enum IrregexpImplementation {
kIA32Implementation,
kARMImplementation,
+ kMIPSImplementation,
kX64Implementation,
kBytecodeImplementation
};
@@ -129,7 +130,7 @@ class RegExpMacroAssembler {
return false;
}
virtual void Fail() = 0;
- virtual Handle<Object> GetCode(Handle<String> source) = 0;
+ virtual Handle<HeapObject> GetCode(Handle<String> source) = 0;
virtual void GoTo(Label* label) = 0;
// Check whether a register is >= a given constant and go to a label if it
// is. Backtracks instead if the label is NULL.
@@ -161,6 +162,13 @@ class RegExpMacroAssembler {
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset) = 0;
virtual void ClearRegisters(int reg_from, int reg_to) = 0;
virtual void WriteStackPointerToRegister(int reg) = 0;
+
+ // Controls the generation of large inlined constants in the code.
+ void set_slow_safe(bool ssc) { slow_safe_compiler_ = ssc; }
+ bool slow_safe() { return slow_safe_compiler_; }
+
+ private:
+ bool slow_safe_compiler_;
};
@@ -190,30 +198,33 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
Handle<String> subject,
int* offsets_vector,
int offsets_vector_length,
- int previous_index);
+ int previous_index,
+ Isolate* isolate);
// Compares two-byte strings case insensitively.
// Called from generated RegExp code.
static int CaseInsensitiveCompareUC16(Address byte_offset1,
Address byte_offset2,
- size_t byte_length);
+ size_t byte_length,
+ Isolate* isolate);
// Called from RegExp if the backtrack stack limit is hit.
// Tries to expand the stack. Returns the new stack-pointer if
// successful, and updates the stack_top address, or returns 0 if unable
// to grow the stack.
// This function must not trigger a garbage collection.
- static Address GrowStack(Address stack_pointer, Address* stack_top);
+ static Address GrowStack(Address stack_pointer, Address* stack_top,
+ Isolate* isolate);
static const byte* StringCharacterPosition(String* subject, int start_index);
// Byte map of ASCII characters with a 0xff if the character is a word
// character (digit, letter or underscore) and 0x00 otherwise.
// Used by generated RegExp code.
- static byte word_character_map[128];
+ static const byte word_character_map[128];
static Address word_character_map_address() {
- return &word_character_map[0];
+ return const_cast<Address>(&word_character_map[0]);
}
static Result Execute(Code* code,
@@ -221,7 +232,8 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
int start_offset,
const byte* input_start,
const byte* input_end,
- int* output);
+ int* output,
+ Isolate* isolate);
};
#endif // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/regexp-stack.cc b/deps/v8/src/regexp-stack.cc
index 7696279a1..ff9547f3a 100644
--- a/deps/v8/src/regexp-stack.cc
+++ b/deps/v8/src/regexp-stack.cc
@@ -26,21 +26,31 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
-#include "top.h"
#include "regexp-stack.h"
namespace v8 {
namespace internal {
-RegExpStack::RegExpStack() {
+RegExpStackScope::RegExpStackScope(Isolate* isolate)
+ : regexp_stack_(isolate->regexp_stack()) {
// Initialize, if not already initialized.
- RegExpStack::EnsureCapacity(0);
+ regexp_stack_->EnsureCapacity(0);
}
-RegExpStack::~RegExpStack() {
+RegExpStackScope::~RegExpStackScope() {
+ ASSERT(Isolate::Current() == regexp_stack_->isolate_);
// Reset the buffer if it has grown.
- RegExpStack::Reset();
+ regexp_stack_->Reset();
+}
+
+
+RegExpStack::RegExpStack()
+ : isolate_(NULL) {
+}
+
+
+RegExpStack::~RegExpStack() {
}
@@ -70,9 +80,9 @@ void RegExpStack::Reset() {
void RegExpStack::ThreadLocal::Free() {
- if (thread_local_.memory_size_ > 0) {
- DeleteArray(thread_local_.memory_);
- thread_local_ = ThreadLocal();
+ if (memory_size_ > 0) {
+ DeleteArray(memory_);
+ Clear();
}
}
@@ -98,6 +108,4 @@ Address RegExpStack::EnsureCapacity(size_t size) {
}
-RegExpStack::ThreadLocal RegExpStack::thread_local_;
-
}} // namespace v8::internal
diff --git a/deps/v8/src/regexp-stack.h b/deps/v8/src/regexp-stack.h
index b4fa2e920..59432067e 100644
--- a/deps/v8/src/regexp-stack.h
+++ b/deps/v8/src/regexp-stack.h
@@ -31,11 +31,30 @@
namespace v8 {
namespace internal {
+class RegExpStack;
+
// Maintains a per-v8thread stack area that can be used by irregexp
// implementation for its backtracking stack.
// Since there is only one stack area, the Irregexp implementation is not
// re-entrant. I.e., no regular expressions may be executed in the same thread
// during a preempted Irregexp execution.
+class RegExpStackScope {
+ public:
+ // Create and delete an instance to control the life-time of a growing stack.
+
+ // Initializes the stack memory area if necessary.
+ explicit RegExpStackScope(Isolate* isolate);
+ ~RegExpStackScope(); // Releases the stack if it has grown.
+
+ RegExpStack* stack() const { return regexp_stack_; }
+
+ private:
+ RegExpStack* regexp_stack_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegExpStackScope);
+};
+
+
class RegExpStack {
public:
// Number of allocated locations on the stack below the limit.
@@ -43,39 +62,37 @@ class RegExpStack {
// check.
static const int kStackLimitSlack = 32;
- // Create and delete an instance to control the life-time of a growing stack.
- RegExpStack(); // Initializes the stack memory area if necessary.
- ~RegExpStack(); // Releases the stack if it has grown.
-
// Gives the top of the memory used as stack.
- static Address stack_base() {
+ Address stack_base() {
ASSERT(thread_local_.memory_size_ != 0);
return thread_local_.memory_ + thread_local_.memory_size_;
}
// The total size of the memory allocated for the stack.
- static size_t stack_capacity() { return thread_local_.memory_size_; }
+ size_t stack_capacity() { return thread_local_.memory_size_; }
// If the stack pointer gets below the limit, we should react and
// either grow the stack or report an out-of-stack exception.
// There is only a limited number of locations below the stack limit,
// so users of the stack should check the stack limit during any
// sequence of pushes longer that this.
- static Address* limit_address() { return &(thread_local_.limit_); }
+ Address* limit_address() { return &(thread_local_.limit_); }
// Ensures that there is a memory area with at least the specified size.
// If passing zero, the default/minimum size buffer is allocated.
- static Address EnsureCapacity(size_t size);
+ Address EnsureCapacity(size_t size);
// Thread local archiving.
static int ArchiveSpacePerThread() {
- return static_cast<int>(sizeof(thread_local_));
+ return static_cast<int>(sizeof(ThreadLocal));
}
- static char* ArchiveStack(char* to);
- static char* RestoreStack(char* from);
- static void FreeThreadResources() { thread_local_.Free(); }
-
+ char* ArchiveStack(char* to);
+ char* RestoreStack(char* from);
+ void FreeThreadResources() { thread_local_.Free(); }
private:
+ RegExpStack();
+ ~RegExpStack();
+
// Artificial limit used when no memory has been allocated.
static const uintptr_t kMemoryTop = static_cast<uintptr_t>(-1);
@@ -87,35 +104,42 @@ class RegExpStack {
// Structure holding the allocated memory, size and limit.
struct ThreadLocal {
- ThreadLocal()
- : memory_(NULL),
- memory_size_(0),
- limit_(reinterpret_cast<Address>(kMemoryTop)) {}
+ ThreadLocal() { Clear(); }
// If memory_size_ > 0 then memory_ must be non-NULL.
Address memory_;
size_t memory_size_;
Address limit_;
+ void Clear() {
+ memory_ = NULL;
+ memory_size_ = 0;
+ limit_ = reinterpret_cast<Address>(kMemoryTop);
+ }
void Free();
};
// Address of allocated memory.
- static Address memory_address() {
+ Address memory_address() {
return reinterpret_cast<Address>(&thread_local_.memory_);
}
// Address of size of allocated memory.
- static Address memory_size_address() {
+ Address memory_size_address() {
return reinterpret_cast<Address>(&thread_local_.memory_size_);
}
// Resets the buffer if it has grown beyond the default/minimum size.
// After this, the buffer is either the default size, or it is empty, so
// you have to call EnsureCapacity before using it again.
- static void Reset();
+ void Reset();
- static ThreadLocal thread_local_;
+ ThreadLocal thread_local_;
+ Isolate* isolate_;
friend class ExternalReference;
+ friend class Isolate;
+ friend class RegExpStackScope;
+
+ DISALLOW_COPY_AND_ASSIGN(RegExpStack);
};
}} // namespace v8::internal
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index 5b7e3a9d2..f68dee613 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -384,13 +384,13 @@ function RegExpMakeCaptureGetter(n) {
// pairs for the match and all the captured substrings), the invariant is
// that there are at least two capture indeces. The array also contains
// the subject string for the last successful match.
-var lastMatchInfo = [
+var lastMatchInfo = new InternalArray(
2, // REGEXP_NUMBER_OF_CAPTURES
"", // Last subject.
void 0, // Last input - settable with RegExpSetInput.
0, // REGEXP_FIRST_CAPTURE + 0
- 0, // REGEXP_FIRST_CAPTURE + 1
-];
+ 0 // REGEXP_FIRST_CAPTURE + 1
+);
// Override last match info with an array of actual substrings.
// Used internally by replace regexp with function.
diff --git a/deps/v8/src/register-allocator-inl.h b/deps/v8/src/register-allocator-inl.h
deleted file mode 100644
index e0ea9e189..000000000
--- a/deps/v8/src/register-allocator-inl.h
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGISTER_ALLOCATOR_INL_H_
-#define V8_REGISTER_ALLOCATOR_INL_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/register-allocator-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/register-allocator-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/register-allocator-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/register-allocator-mips-inl.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-
-namespace v8 {
-namespace internal {
-
-Result::Result(const Result& other) {
- other.CopyTo(this);
-}
-
-
-Result& Result::operator=(const Result& other) {
- if (this != &other) {
- Unuse();
- other.CopyTo(this);
- }
- return *this;
-}
-
-
-Result::~Result() {
- if (is_register()) {
- CodeGeneratorScope::Current()->allocator()->Unuse(reg());
- }
-}
-
-
-void Result::Unuse() {
- if (is_register()) {
- CodeGeneratorScope::Current()->allocator()->Unuse(reg());
- }
- invalidate();
-}
-
-
-void Result::CopyTo(Result* destination) const {
- destination->value_ = value_;
- if (is_register()) {
- CodeGeneratorScope::Current()->allocator()->Use(reg());
- }
-}
-
-
-bool RegisterAllocator::is_used(Register reg) {
- return registers_.is_used(ToNumber(reg));
-}
-
-
-int RegisterAllocator::count(Register reg) {
- return registers_.count(ToNumber(reg));
-}
-
-
-void RegisterAllocator::Use(Register reg) {
- registers_.Use(ToNumber(reg));
-}
-
-
-void RegisterAllocator::Unuse(Register reg) {
- registers_.Unuse(ToNumber(reg));
-}
-
-
-TypeInfo Result::type_info() const {
- ASSERT(is_valid());
- return TypeInfo::FromInt(TypeInfoField::decode(value_));
-}
-
-
-void Result::set_type_info(TypeInfo info) {
- ASSERT(is_valid());
- value_ &= ~TypeInfoField::mask();
- value_ |= TypeInfoField::encode(info.ToInt());
-}
-
-
-bool Result::is_number() const {
- return type_info().IsNumber();
-}
-
-
-bool Result::is_smi() const {
- return type_info().IsSmi();
-}
-
-
-bool Result::is_integer32() const {
- return type_info().IsInteger32();
-}
-
-
-bool Result::is_double() const {
- return type_info().IsDouble();
-}
-
-} } // namespace v8::internal
-
-#endif // V8_REGISTER_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/register-allocator.cc b/deps/v8/src/register-allocator.cc
deleted file mode 100644
index 31d0a49fa..000000000
--- a/deps/v8/src/register-allocator.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-
-Result::Result(Register reg, TypeInfo info) {
- ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
- CodeGeneratorScope::Current()->allocator()->Use(reg);
- value_ = TypeField::encode(REGISTER)
- | TypeInfoField::encode(info.ToInt())
- | DataField::encode(reg.code_);
-}
-
-
-Result::ZoneObjectList* Result::ConstantList() {
- static ZoneObjectList list(10);
- return &list;
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-
-Result RegisterAllocator::AllocateWithoutSpilling() {
- // Return the first free register, if any.
- int num = registers_.ScanForFreeRegister();
- if (num == RegisterAllocator::kInvalidRegister) {
- return Result();
- }
- return Result(RegisterAllocator::ToRegister(num));
-}
-
-
-Result RegisterAllocator::Allocate() {
- Result result = AllocateWithoutSpilling();
- if (!result.is_valid()) {
- // Ask the current frame to spill a register.
- ASSERT(cgen_->has_valid_frame());
- Register free_reg = cgen_->frame()->SpillAnyRegister();
- if (free_reg.is_valid()) {
- ASSERT(!is_used(free_reg));
- return Result(free_reg);
- }
- }
- return result;
-}
-
-
-Result RegisterAllocator::Allocate(Register target) {
- // If the target is not referenced, it can simply be allocated.
- if (!is_used(RegisterAllocator::ToNumber(target))) {
- return Result(target);
- }
- // If the target is only referenced in the frame, it can be spilled and
- // then allocated.
- ASSERT(cgen_->has_valid_frame());
- if (cgen_->frame()->is_used(RegisterAllocator::ToNumber(target)) &&
- count(target) == 1) {
- cgen_->frame()->Spill(target);
- ASSERT(!is_used(RegisterAllocator::ToNumber(target)));
- return Result(target);
- }
- // Otherwise (if it's referenced outside the frame) we cannot allocate it.
- return Result();
-}
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/register-allocator.h b/deps/v8/src/register-allocator.h
deleted file mode 100644
index a03a9d2fb..000000000
--- a/deps/v8/src/register-allocator.h
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGISTER_ALLOCATOR_H_
-#define V8_REGISTER_ALLOCATOR_H_
-
-#include "macro-assembler.h"
-#include "type-info.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/register-allocator-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/register-allocator-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/register-allocator-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/register-allocator-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-// -------------------------------------------------------------------------
-// Results
-//
-// Results encapsulate the compile-time values manipulated by the code
-// generator. They can represent registers or constants.
-
-class Result BASE_EMBEDDED {
- public:
- enum Type {
- INVALID,
- REGISTER,
- CONSTANT
- };
-
- // Construct an invalid result.
- Result() { invalidate(); }
-
- // Construct a register Result.
- explicit Result(Register reg, TypeInfo info = TypeInfo::Unknown());
-
- // Construct a Result whose value is a compile-time constant.
- explicit Result(Handle<Object> value) {
- TypeInfo info = TypeInfo::TypeFromValue(value);
- value_ = TypeField::encode(CONSTANT)
- | TypeInfoField::encode(info.ToInt())
- | IsUntaggedInt32Field::encode(false)
- | DataField::encode(ConstantList()->length());
- ConstantList()->Add(value);
- }
-
- // The copy constructor and assignment operators could each create a new
- // register reference.
- inline Result(const Result& other);
-
- inline Result& operator=(const Result& other);
-
- inline ~Result();
-
- // Static indirection table for handles to constants. If a Result
- // represents a constant, the data contains an index into this table
- // of handles to the actual constants.
- typedef ZoneList<Handle<Object> > ZoneObjectList;
-
- static ZoneObjectList* ConstantList();
-
- // Clear the constants indirection table.
- static void ClearConstantList() {
- ConstantList()->Clear();
- }
-
- inline void Unuse();
-
- Type type() const { return TypeField::decode(value_); }
-
- void invalidate() { value_ = TypeField::encode(INVALID); }
-
- inline TypeInfo type_info() const;
- inline void set_type_info(TypeInfo info);
- inline bool is_number() const;
- inline bool is_smi() const;
- inline bool is_integer32() const;
- inline bool is_double() const;
-
- bool is_valid() const { return type() != INVALID; }
- bool is_register() const { return type() == REGISTER; }
- bool is_constant() const { return type() == CONSTANT; }
-
- // An untagged int32 Result contains a signed int32 in a register
- // or as a constant. These are only allowed in a side-effect-free
- // int32 calculation, and if a non-int32 input shows up or an overflow
- // occurs, we bail out and drop all the int32 values. Constants are
- // not converted to int32 until they are loaded into a register.
- bool is_untagged_int32() const {
- return IsUntaggedInt32Field::decode(value_);
- }
- void set_untagged_int32(bool value) {
- value_ &= ~IsUntaggedInt32Field::mask();
- value_ |= IsUntaggedInt32Field::encode(value);
- }
-
- Register reg() const {
- ASSERT(is_register());
- uint32_t reg = DataField::decode(value_);
- Register result;
- result.code_ = reg;
- return result;
- }
-
- Handle<Object> handle() const {
- ASSERT(type() == CONSTANT);
- return ConstantList()->at(DataField::decode(value_));
- }
-
- // Move this result to an arbitrary register. The register is not
- // necessarily spilled from the frame or even singly-referenced outside
- // it.
- void ToRegister();
-
- // Move this result to a specified register. The register is spilled from
- // the frame, and the register is singly-referenced (by this result)
- // outside the frame.
- void ToRegister(Register reg);
-
- private:
- uint32_t value_;
-
- // Declare BitFields with template parameters <type, start, size>.
- class TypeField: public BitField<Type, 0, 2> {};
- class TypeInfoField : public BitField<int, 2, 6> {};
- class IsUntaggedInt32Field : public BitField<bool, 8, 1> {};
- class DataField: public BitField<uint32_t, 9, 32 - 9> {};
-
- inline void CopyTo(Result* destination) const;
-
- friend class CodeGeneratorScope;
-};
-
-
-// -------------------------------------------------------------------------
-// Register file
-//
-// The register file tracks reference counts for the processor registers.
-// It is used by both the register allocator and the virtual frame.
-
-class RegisterFile BASE_EMBEDDED {
- public:
- RegisterFile() { Reset(); }
-
- void Reset() {
- for (int i = 0; i < kNumRegisters; i++) {
- ref_counts_[i] = 0;
- }
- }
-
- // Predicates and accessors for the reference counts.
- bool is_used(int num) {
- ASSERT(0 <= num && num < kNumRegisters);
- return ref_counts_[num] > 0;
- }
-
- int count(int num) {
- ASSERT(0 <= num && num < kNumRegisters);
- return ref_counts_[num];
- }
-
- // Record a use of a register by incrementing its reference count.
- void Use(int num) {
- ASSERT(0 <= num && num < kNumRegisters);
- ref_counts_[num]++;
- }
-
- // Record that a register will no longer be used by decrementing its
- // reference count.
- void Unuse(int num) {
- ASSERT(is_used(num));
- ref_counts_[num]--;
- }
-
- // Copy the reference counts from this register file to the other.
- void CopyTo(RegisterFile* other) {
- for (int i = 0; i < kNumRegisters; i++) {
- other->ref_counts_[i] = ref_counts_[i];
- }
- }
-
- private:
- // C++ doesn't like zero length arrays, so we make the array length 1 even if
- // we don't need it.
- static const int kNumRegisters =
- (RegisterAllocatorConstants::kNumRegisters == 0) ?
- 1 : RegisterAllocatorConstants::kNumRegisters;
-
- int ref_counts_[kNumRegisters];
-
- // Very fast inlined loop to find a free register. Used in
- // RegisterAllocator::AllocateWithoutSpilling. Returns
- // kInvalidRegister if no free register found.
- int ScanForFreeRegister() {
- for (int i = 0; i < RegisterAllocatorConstants::kNumRegisters; i++) {
- if (!is_used(i)) return i;
- }
- return RegisterAllocatorConstants::kInvalidRegister;
- }
-
- friend class RegisterAllocator;
-};
-
-
-// -------------------------------------------------------------------------
-// Register allocator
-//
-
-class RegisterAllocator BASE_EMBEDDED {
- public:
- static const int kNumRegisters =
- RegisterAllocatorConstants::kNumRegisters;
- static const int kInvalidRegister =
- RegisterAllocatorConstants::kInvalidRegister;
-
- explicit RegisterAllocator(CodeGenerator* cgen) : cgen_(cgen) {}
-
- // True if the register is reserved by the code generator, false if it
- // can be freely used by the allocator Defined in the
- // platform-specific XXX-inl.h files..
- static inline bool IsReserved(Register reg);
-
- // Convert between (unreserved) assembler registers and allocator
- // numbers. Defined in the platform-specific XXX-inl.h files.
- static inline int ToNumber(Register reg);
- static inline Register ToRegister(int num);
-
- // Predicates and accessors for the registers' reference counts.
- bool is_used(int num) { return registers_.is_used(num); }
- inline bool is_used(Register reg);
-
- int count(int num) { return registers_.count(num); }
- inline int count(Register reg);
-
- // Explicitly record a reference to a register.
- void Use(int num) { registers_.Use(num); }
- inline void Use(Register reg);
-
- // Explicitly record that a register will no longer be used.
- void Unuse(int num) { registers_.Unuse(num); }
- inline void Unuse(Register reg);
-
- // Reset the register reference counts to free all non-reserved registers.
- void Reset() { registers_.Reset(); }
-
- // Initialize the register allocator for entry to a JS function. On
- // entry, the (non-reserved) registers used by the JS calling
- // convention are referenced and the other (non-reserved) registers
- // are free.
- inline void Initialize();
-
- // Allocate a free register and return a register result if possible or
- // fail and return an invalid result.
- Result Allocate();
-
- // Allocate a specific register if possible, spilling it from the
- // current frame if necessary, or else fail and return an invalid
- // result.
- Result Allocate(Register target);
-
- // Allocate a free register without spilling any from the current
- // frame or fail and return an invalid result.
- Result AllocateWithoutSpilling();
-
- // Allocate a free byte register without spilling any from the current
- // frame or fail and return an invalid result.
- Result AllocateByteRegisterWithoutSpilling();
-
- // Copy the internal state to a register file, to be restored later by
- // RestoreFrom.
- void SaveTo(RegisterFile* register_file) {
- registers_.CopyTo(register_file);
- }
-
- // Restore the internal state.
- void RestoreFrom(RegisterFile* register_file) {
- register_file->CopyTo(&registers_);
- }
-
- private:
- CodeGenerator* cgen_;
- RegisterFile registers_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_REGISTER_ALLOCATOR_H_
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index fd40cdc3f..aa274d484 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -36,649 +36,6 @@
namespace v8 {
namespace internal {
-class AstOptimizer: public AstVisitor {
- public:
- explicit AstOptimizer() : has_function_literal_(false) {}
-
- void Optimize(ZoneList<Statement*>* statements);
-
- private:
- // Used for loop condition analysis. Cleared before visiting a loop
- // condition, set when a function literal is visited.
- bool has_function_literal_;
-
- // Helpers
- void OptimizeArguments(ZoneList<Expression*>* arguments);
-
- // Node visitors.
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- DISALLOW_COPY_AND_ASSIGN(AstOptimizer);
-};
-
-
-void AstOptimizer::Optimize(ZoneList<Statement*>* statements) {
- int len = statements->length();
- for (int i = 0; i < len; i++) {
- Visit(statements->at(i));
- }
-}
-
-
-void AstOptimizer::OptimizeArguments(ZoneList<Expression*>* arguments) {
- for (int i = 0; i < arguments->length(); i++) {
- Visit(arguments->at(i));
- }
-}
-
-
-void AstOptimizer::VisitBlock(Block* node) {
- Optimize(node->statements());
-}
-
-
-void AstOptimizer::VisitExpressionStatement(ExpressionStatement* node) {
- node->expression()->set_no_negative_zero(true);
- Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitIfStatement(IfStatement* node) {
- node->condition()->set_no_negative_zero(true);
- Visit(node->condition());
- Visit(node->then_statement());
- if (node->HasElseStatement()) {
- Visit(node->else_statement());
- }
-}
-
-
-void AstOptimizer::VisitDoWhileStatement(DoWhileStatement* node) {
- node->cond()->set_no_negative_zero(true);
- Visit(node->cond());
- Visit(node->body());
-}
-
-
-void AstOptimizer::VisitWhileStatement(WhileStatement* node) {
- has_function_literal_ = false;
- node->cond()->set_no_negative_zero(true);
- Visit(node->cond());
- node->set_may_have_function_literal(has_function_literal_);
- Visit(node->body());
-}
-
-
-void AstOptimizer::VisitForStatement(ForStatement* node) {
- if (node->init() != NULL) {
- Visit(node->init());
- }
- if (node->cond() != NULL) {
- has_function_literal_ = false;
- node->cond()->set_no_negative_zero(true);
- Visit(node->cond());
- node->set_may_have_function_literal(has_function_literal_);
- }
- Visit(node->body());
- if (node->next() != NULL) {
- Visit(node->next());
- }
-}
-
-
-void AstOptimizer::VisitForInStatement(ForInStatement* node) {
- Visit(node->each());
- Visit(node->enumerable());
- Visit(node->body());
-}
-
-
-void AstOptimizer::VisitTryCatchStatement(TryCatchStatement* node) {
- Visit(node->try_block());
- Visit(node->catch_var());
- Visit(node->catch_block());
-}
-
-
-void AstOptimizer::VisitTryFinallyStatement(TryFinallyStatement* node) {
- Visit(node->try_block());
- Visit(node->finally_block());
-}
-
-
-void AstOptimizer::VisitSwitchStatement(SwitchStatement* node) {
- node->tag()->set_no_negative_zero(true);
- Visit(node->tag());
- for (int i = 0; i < node->cases()->length(); i++) {
- CaseClause* clause = node->cases()->at(i);
- if (!clause->is_default()) {
- Visit(clause->label());
- }
- Optimize(clause->statements());
- }
-}
-
-
-void AstOptimizer::VisitContinueStatement(ContinueStatement* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitBreakStatement(BreakStatement* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitDeclaration(Declaration* node) {
- // Will not be reached by the current optimizations.
- USE(node);
-}
-
-
-void AstOptimizer::VisitEmptyStatement(EmptyStatement* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitReturnStatement(ReturnStatement* node) {
- Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitWithEnterStatement(WithEnterStatement* node) {
- Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitWithExitStatement(WithExitStatement* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitDebuggerStatement(DebuggerStatement* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitFunctionLiteral(FunctionLiteral* node) {
- has_function_literal_ = true;
-}
-
-
-void AstOptimizer::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitConditional(Conditional* node) {
- node->condition()->set_no_negative_zero(true);
- Visit(node->condition());
- Visit(node->then_expression());
- Visit(node->else_expression());
-}
-
-
-void AstOptimizer::VisitVariableProxy(VariableProxy* node) {
- Variable* var = node->AsVariable();
- if (var != NULL) {
- if (var->type()->IsKnown()) {
- node->type()->CopyFrom(var->type());
- } else if (node->type()->IsLikelySmi()) {
- var->type()->SetAsLikelySmi();
- }
-
- if (FLAG_safe_int32_compiler) {
- if (var->IsStackAllocated() &&
- !var->is_arguments() &&
- var->mode() != Variable::CONST) {
- node->set_side_effect_free(true);
- }
- }
- }
-}
-
-
-void AstOptimizer::VisitLiteral(Literal* node) {
- Handle<Object> literal = node->handle();
- if (literal->IsSmi()) {
- node->type()->SetAsLikelySmi();
- node->set_side_effect_free(true);
- } else if (literal->IsHeapNumber()) {
- if (node->to_int32()) {
- // Any HeapNumber has an int32 value if it is the input to a bit op.
- node->set_side_effect_free(true);
- } else {
- double double_value = HeapNumber::cast(*literal)->value();
- int32_t int32_value = DoubleToInt32(double_value);
- node->set_side_effect_free(double_value == int32_value);
- }
- }
-}
-
-
-void AstOptimizer::VisitRegExpLiteral(RegExpLiteral* node) {
- USE(node);
-}
-
-
-void AstOptimizer::VisitArrayLiteral(ArrayLiteral* node) {
- for (int i = 0; i < node->values()->length(); i++) {
- Visit(node->values()->at(i));
- }
-}
-
-void AstOptimizer::VisitObjectLiteral(ObjectLiteral* node) {
- for (int i = 0; i < node->properties()->length(); i++) {
- Visit(node->properties()->at(i)->key());
- Visit(node->properties()->at(i)->value());
- }
-}
-
-
-void AstOptimizer::VisitCatchExtensionObject(CatchExtensionObject* node) {
- Visit(node->key());
- Visit(node->value());
-}
-
-
-void AstOptimizer::VisitAssignment(Assignment* node) {
- switch (node->op()) {
- case Token::INIT_VAR:
- case Token::INIT_CONST:
- case Token::ASSIGN:
- // No type can be infered from the general assignment.
- break;
- case Token::ASSIGN_BIT_OR:
- case Token::ASSIGN_BIT_XOR:
- case Token::ASSIGN_BIT_AND:
- case Token::ASSIGN_SHL:
- case Token::ASSIGN_SAR:
- case Token::ASSIGN_SHR:
- node->type()->SetAsLikelySmiIfUnknown();
- node->target()->type()->SetAsLikelySmiIfUnknown();
- node->value()->type()->SetAsLikelySmiIfUnknown();
- node->value()->set_to_int32(true);
- node->value()->set_no_negative_zero(true);
- break;
- case Token::ASSIGN_ADD:
- case Token::ASSIGN_SUB:
- case Token::ASSIGN_MUL:
- case Token::ASSIGN_DIV:
- case Token::ASSIGN_MOD:
- if (node->type()->IsLikelySmi()) {
- node->target()->type()->SetAsLikelySmiIfUnknown();
- node->value()->type()->SetAsLikelySmiIfUnknown();
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- Visit(node->target());
- Visit(node->value());
-
- switch (node->op()) {
- case Token::INIT_VAR:
- case Token::INIT_CONST:
- case Token::ASSIGN:
- // Pure assignment copies the type from the value.
- node->type()->CopyFrom(node->value()->type());
- break;
- case Token::ASSIGN_BIT_OR:
- case Token::ASSIGN_BIT_XOR:
- case Token::ASSIGN_BIT_AND:
- case Token::ASSIGN_SHL:
- case Token::ASSIGN_SAR:
- case Token::ASSIGN_SHR:
- // Should have been setup above already.
- break;
- case Token::ASSIGN_ADD:
- case Token::ASSIGN_SUB:
- case Token::ASSIGN_MUL:
- case Token::ASSIGN_DIV:
- case Token::ASSIGN_MOD:
- if (node->type()->IsUnknown()) {
- if (node->target()->type()->IsLikelySmi() ||
- node->value()->type()->IsLikelySmi()) {
- node->type()->SetAsLikelySmi();
- }
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // Since this is an assignment. We have to propagate this node's type to the
- // variable.
- VariableProxy* proxy = node->target()->AsVariableProxy();
- if (proxy != NULL) {
- Variable* var = proxy->AsVariable();
- if (var != NULL) {
- StaticType* var_type = var->type();
- if (var_type->IsUnknown()) {
- var_type->CopyFrom(node->type());
- } else if (var_type->IsLikelySmi()) {
- // We do not reset likely types to Unknown.
- }
- }
- }
-}
-
-
-void AstOptimizer::VisitThrow(Throw* node) {
- Visit(node->exception());
-}
-
-
-void AstOptimizer::VisitProperty(Property* node) {
- node->key()->set_no_negative_zero(true);
- Visit(node->obj());
- Visit(node->key());
-}
-
-
-void AstOptimizer::VisitCall(Call* node) {
- Visit(node->expression());
- OptimizeArguments(node->arguments());
-}
-
-
-void AstOptimizer::VisitCallNew(CallNew* node) {
- Visit(node->expression());
- OptimizeArguments(node->arguments());
-}
-
-
-void AstOptimizer::VisitCallRuntime(CallRuntime* node) {
- OptimizeArguments(node->arguments());
-}
-
-
-void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
- if (node->op() == Token::ADD || node->op() == Token::SUB) {
- node->expression()->set_no_negative_zero(node->no_negative_zero());
- } else {
- node->expression()->set_no_negative_zero(true);
- }
- Visit(node->expression());
- if (FLAG_safe_int32_compiler) {
- switch (node->op()) {
- case Token::BIT_NOT:
- node->expression()->set_no_negative_zero(true);
- node->expression()->set_to_int32(true);
- // Fall through.
- case Token::ADD:
- case Token::SUB:
- node->set_side_effect_free(node->expression()->side_effect_free());
- break;
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- case Token::VOID:
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else if (node->op() == Token::BIT_NOT) {
- node->expression()->set_to_int32(true);
- }
-}
-
-
-void AstOptimizer::VisitIncrementOperation(IncrementOperation* node) {
- UNREACHABLE();
-}
-
-
-void AstOptimizer::VisitCountOperation(CountOperation* node) {
- // Count operations assume that they work on Smis.
- node->expression()->set_no_negative_zero(node->is_prefix() ?
- true :
- node->no_negative_zero());
- node->type()->SetAsLikelySmiIfUnknown();
- node->expression()->type()->SetAsLikelySmiIfUnknown();
- Visit(node->expression());
-}
-
-
-static bool CouldBeNegativeZero(AstNode* node) {
- Literal* literal = node->AsLiteral();
- if (literal != NULL) {
- Handle<Object> handle = literal->handle();
- if (handle->IsString() || handle->IsSmi()) {
- return false;
- } else if (handle->IsHeapNumber()) {
- double double_value = HeapNumber::cast(*handle)->value();
- if (double_value != 0) {
- return false;
- }
- }
- }
- BinaryOperation* binary = node->AsBinaryOperation();
- if (binary != NULL && Token::IsBitOp(binary->op())) {
- return false;
- }
- return true;
-}
-
-
-static bool CouldBePositiveZero(AstNode* node) {
- Literal* literal = node->AsLiteral();
- if (literal != NULL) {
- Handle<Object> handle = literal->handle();
- if (handle->IsSmi()) {
- if (Smi::cast(*handle) != Smi::FromInt(0)) {
- return false;
- }
- } else if (handle->IsHeapNumber()) {
- // Heap number literal can't be +0, because that's a Smi.
- return false;
- }
- }
- return true;
-}
-
-
-void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
- // Depending on the operation we can propagate this node's type down the
- // AST nodes.
- Token::Value op = node->op();
- switch (op) {
- case Token::COMMA:
- case Token::OR:
- node->left()->set_no_negative_zero(true);
- node->right()->set_no_negative_zero(node->no_negative_zero());
- break;
- case Token::AND:
- node->left()->set_no_negative_zero(node->no_negative_zero());
- node->right()->set_no_negative_zero(node->no_negative_zero());
- break;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- node->type()->SetAsLikelySmiIfUnknown();
- node->left()->type()->SetAsLikelySmiIfUnknown();
- node->right()->type()->SetAsLikelySmiIfUnknown();
- node->left()->set_to_int32(true);
- node->right()->set_to_int32(true);
- node->left()->set_no_negative_zero(true);
- node->right()->set_no_negative_zero(true);
- break;
- case Token::MUL: {
- VariableProxy* lvar_proxy = node->left()->AsVariableProxy();
- VariableProxy* rvar_proxy = node->right()->AsVariableProxy();
- if (lvar_proxy != NULL && rvar_proxy != NULL) {
- Variable* lvar = lvar_proxy->AsVariable();
- Variable* rvar = rvar_proxy->AsVariable();
- if (lvar != NULL && rvar != NULL) {
- if (lvar->mode() == Variable::VAR && rvar->mode() == Variable::VAR) {
- Slot* lslot = lvar->AsSlot();
- Slot* rslot = rvar->AsSlot();
- if (lslot->type() == rslot->type() &&
- (lslot->type() == Slot::PARAMETER ||
- lslot->type() == Slot::LOCAL) &&
- lslot->index() == rslot->index()) {
- // A number squared doesn't give negative zero.
- node->set_no_negative_zero(true);
- }
- }
- }
- }
- }
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD: {
- if (node->type()->IsLikelySmi()) {
- node->left()->type()->SetAsLikelySmiIfUnknown();
- node->right()->type()->SetAsLikelySmiIfUnknown();
- }
- if (op == Token::ADD && (!CouldBeNegativeZero(node->left()) ||
- !CouldBeNegativeZero(node->right()))) {
- node->left()->set_no_negative_zero(true);
- node->right()->set_no_negative_zero(true);
- } else if (op == Token::SUB && (!CouldBeNegativeZero(node->left()) ||
- !CouldBePositiveZero(node->right()))) {
- node->left()->set_no_negative_zero(true);
- node->right()->set_no_negative_zero(true);
- } else {
- node->left()->set_no_negative_zero(node->no_negative_zero());
- node->right()->set_no_negative_zero(node->no_negative_zero());
- }
- if (node->op() == Token::DIV) {
- node->right()->set_no_negative_zero(false);
- } else if (node->op() == Token::MOD) {
- node->right()->set_no_negative_zero(true);
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- Visit(node->left());
- Visit(node->right());
-
- // After visiting the operand nodes we have to check if this node's type
- // can be updated. If it does, then we can push that information down
- // towards the leaves again if the new information is an upgrade over the
- // previous type of the operand nodes.
- if (node->type()->IsUnknown()) {
- if (node->left()->type()->IsLikelySmi() ||
- node->right()->type()->IsLikelySmi()) {
- node->type()->SetAsLikelySmi();
- }
- if (node->type()->IsLikelySmi()) {
- // The type of this node changed to LIKELY_SMI. Propagate this knowledge
- // down through the nodes.
- if (node->left()->type()->IsUnknown()) {
- node->left()->type()->SetAsLikelySmi();
- Visit(node->left());
- }
- if (node->right()->type()->IsUnknown()) {
- node->right()->type()->SetAsLikelySmi();
- Visit(node->right());
- }
- }
- }
-
- if (FLAG_safe_int32_compiler) {
- switch (node->op()) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- break;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Add one to the number of bit operations in this expression.
- node->set_num_bit_ops(1);
- // Fall through.
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- node->set_side_effect_free(node->left()->side_effect_free() &&
- node->right()->side_effect_free());
- node->set_num_bit_ops(node->num_bit_ops() +
- node->left()->num_bit_ops() +
- node->right()->num_bit_ops());
- if (!node->no_negative_zero() && node->op() == Token::MUL) {
- node->set_side_effect_free(false);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
- if (node->type()->IsKnown()) {
- // Propagate useful information down towards the leaves.
- node->left()->type()->SetAsLikelySmiIfUnknown();
- node->right()->type()->SetAsLikelySmiIfUnknown();
- }
-
- node->left()->set_no_negative_zero(true);
- // Only [[HasInstance]] has the right argument passed unchanged to it.
- node->right()->set_no_negative_zero(true);
-
- Visit(node->left());
- Visit(node->right());
-
- // After visiting the operand nodes we have to check if this node's type
- // can be updated. If it does, then we can push that information down
- // towards the leaves again if the new information is an upgrade over the
- // previous type of the operand nodes.
- if (node->type()->IsUnknown()) {
- if (node->left()->type()->IsLikelySmi() ||
- node->right()->type()->IsLikelySmi()) {
- node->type()->SetAsLikelySmi();
- }
- if (node->type()->IsLikelySmi()) {
- // The type of this node changed to LIKELY_SMI. Propagate this knowledge
- // down through the nodes.
- if (node->left()->type()->IsUnknown()) {
- node->left()->type()->SetAsLikelySmi();
- Visit(node->left());
- }
- if (node->right()->type()->IsUnknown()) {
- node->right()->type()->SetAsLikelySmi();
- Visit(node->right());
- }
- }
- }
-}
-
-
-void AstOptimizer::VisitCompareToNull(CompareToNull* node) {
- Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitThisFunction(ThisFunction* node) {
- USE(node);
-}
-
-
class Processor: public AstVisitor {
public:
explicit Processor(Variable* result)
@@ -840,142 +197,18 @@ void Processor::VisitBreakStatement(BreakStatement* node) {
void Processor::VisitDeclaration(Declaration* node) {}
void Processor::VisitEmptyStatement(EmptyStatement* node) {}
void Processor::VisitReturnStatement(ReturnStatement* node) {}
-void Processor::VisitWithEnterStatement(WithEnterStatement* node) {}
-void Processor::VisitWithExitStatement(WithExitStatement* node) {}
+void Processor::VisitEnterWithContextStatement(
+ EnterWithContextStatement* node) {
+}
+void Processor::VisitExitContextStatement(ExitContextStatement* node) {}
void Processor::VisitDebuggerStatement(DebuggerStatement* node) {}
// Expressions are never visited yet.
-void Processor::VisitFunctionLiteral(FunctionLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitConditional(Conditional* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitVariableProxy(VariableProxy* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitLiteral(Literal* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitRegExpLiteral(RegExpLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitArrayLiteral(ArrayLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitObjectLiteral(ObjectLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCatchExtensionObject(CatchExtensionObject* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitAssignment(Assignment* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitThrow(Throw* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitProperty(Property* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCall(Call* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCallNew(CallNew* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCallRuntime(CallRuntime* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitUnaryOperation(UnaryOperation* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitIncrementOperation(IncrementOperation* node) {
- UNREACHABLE();
-}
-
-
-void Processor::VisitCountOperation(CountOperation* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitBinaryOperation(BinaryOperation* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCompareOperation(CompareOperation* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCompareToNull(CompareToNull* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitThisFunction(ThisFunction* node) {
- USE(node);
- UNREACHABLE();
-}
+#define DEF_VISIT(type) \
+ void Processor::Visit##type(type* expr) { UNREACHABLE(); }
+EXPRESSION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
// Assumes code has been parsed and scopes have been analyzed. Mutates the
@@ -989,7 +222,8 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
ZoneList<Statement*>* body = function->body();
if (!body->is_empty()) {
- Variable* result = scope->NewTemporary(Factory::result_symbol());
+ Variable* result = scope->NewTemporary(
+ info->isolate()->factory()->result_symbol());
Processor processor(result);
processor.Process(body);
if (processor.HasStackOverflow()) return false;
@@ -1004,20 +238,4 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
}
-// Assumes code has been parsed and scopes have been analyzed. Mutates the
-// AST, so the AST should not continue to be used in the case of failure.
-bool Rewriter::Analyze(CompilationInfo* info) {
- FunctionLiteral* function = info->function();
- ASSERT(function != NULL && function->scope() != NULL);
-
- ZoneList<Statement*>* body = function->body();
- if (FLAG_optimize_ast && !body->is_empty()) {
- AstOptimizer optimizer;
- optimizer.Optimize(body);
- if (optimizer.HasStackOverflow()) return false;
- }
- return true;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/rewriter.h b/deps/v8/src/rewriter.h
index 62e1b7f72..59914d97f 100644
--- a/deps/v8/src/rewriter.h
+++ b/deps/v8/src/rewriter.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -42,15 +42,6 @@ class Rewriter {
// Assumes code has been parsed and scopes have been analyzed. Mutates the
// AST, so the AST should not continue to be used in the case of failure.
static bool Rewrite(CompilationInfo* info);
-
- // Perform a suite of simple non-iterative analyses of the AST. Mark
- // expressions that are likely smis, expressions without side effects,
- // expressions whose value will be converted to Int32, and expressions in a
- // context where +0 and -0 are treated the same.
- //
- // Assumes code has been parsed and scopes have been analyzed. Mutates the
- // AST, so the AST should not continue to be used in the case of failure.
- static bool Analyze(CompilationInfo* info);
};
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index df6471e9d..816569a02 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -36,8 +36,8 @@
#include "execution.h"
#include "global-handles.h"
#include "mark-compact.h"
+#include "platform.h"
#include "scopeinfo.h"
-#include "top.h"
namespace v8 {
namespace internal {
@@ -69,16 +69,9 @@ class PendingListNode : public Malloced {
};
-enum SamplerState {
- IN_NON_JS_STATE = 0,
- IN_JS_STATE = 1
-};
-
-
// Optimization sampler constants.
static const int kSamplerFrameCount = 2;
static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
-static const int kSamplerWindowSize = 16;
static const int kSamplerTicksBetweenThresholdAdjustment = 32;
@@ -92,34 +85,19 @@ static const int kSamplerThresholdSizeFactorDelta = 1;
static const int kSizeLimit = 1500;
-static int sampler_threshold = kSamplerThresholdInit;
-static int sampler_threshold_size_factor = kSamplerThresholdSizeFactorInit;
-
-static int sampler_ticks_until_threshold_adjustment =
- kSamplerTicksBetweenThresholdAdjustment;
-
-// The ratio of ticks spent in JS code in percent.
-static Atomic32 js_ratio;
-
-static Object* sampler_window[kSamplerWindowSize] = { NULL, };
-static int sampler_window_position = 0;
-static int sampler_window_weight[kSamplerWindowSize] = { 0, };
-
-
-// Support for pending 'optimize soon' requests.
-static PendingListNode* optimize_soon_list = NULL;
-
PendingListNode::PendingListNode(JSFunction* function) : next_(NULL) {
- function_ = GlobalHandles::Create(function);
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ function_ = global_handles->Create(function);
start_ = OS::Ticks();
- GlobalHandles::MakeWeak(function_.location(), this, &WeakCallback);
+ global_handles->MakeWeak(function_.location(), this, &WeakCallback);
}
void PendingListNode::Destroy() {
if (!IsValid()) return;
- GlobalHandles::Destroy(function_.location());
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ global_handles->Destroy(function_.location());
function_= Handle<Object>::null();
}
@@ -129,17 +107,53 @@ void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
}
-static bool IsOptimizable(JSFunction* function) {
- Code* code = function->code();
- return code->kind() == Code::FUNCTION && code->optimizable();
+Atomic32 RuntimeProfiler::state_ = 0;
+// TODO(isolates): Create the semaphore lazily and clean it up when no
+// longer required.
+#ifdef ENABLE_LOGGING_AND_PROFILING
+Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
+#endif
+
+#ifdef DEBUG
+bool RuntimeProfiler::has_been_globally_setup_ = false;
+#endif
+bool RuntimeProfiler::enabled_ = false;
+
+
+RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
+ : isolate_(isolate),
+ sampler_threshold_(kSamplerThresholdInit),
+ sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
+ sampler_ticks_until_threshold_adjustment_(
+ kSamplerTicksBetweenThresholdAdjustment),
+ js_ratio_(0),
+ sampler_window_position_(0),
+ optimize_soon_list_(NULL),
+ state_window_position_(0),
+ state_window_ticks_(0) {
+ state_counts_[IN_NON_JS_STATE] = kStateWindowSize;
+ state_counts_[IN_JS_STATE] = 0;
+ STATIC_ASSERT(IN_NON_JS_STATE == 0);
+ memset(state_window_, 0, sizeof(state_window_));
+ ClearSampleBuffer();
+}
+
+
+void RuntimeProfiler::GlobalSetup() {
+ ASSERT(!has_been_globally_setup_);
+ enabled_ = V8::UseCrankshaft() && FLAG_opt;
+#ifdef DEBUG
+ has_been_globally_setup_ = true;
+#endif
}
-static void Optimize(JSFunction* function, bool eager, int delay) {
- ASSERT(IsOptimizable(function));
+void RuntimeProfiler::Optimize(JSFunction* function, bool eager, int delay) {
+ ASSERT(function->IsOptimizable());
if (FLAG_trace_opt) {
PrintF("[marking (%s) ", eager ? "eagerly" : "lazily");
function->PrintName();
+ PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
PrintF(" for recompilation");
if (delay > 0) {
PrintF(" (delayed %0.3f ms)", static_cast<double>(delay) / 1000);
@@ -152,11 +166,13 @@ static void Optimize(JSFunction* function, bool eager, int delay) {
}
-static void AttemptOnStackReplacement(JSFunction* function) {
+void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// See AlwaysFullCompiler (in compiler.cc) comment on why we need
// Debug::has_break_points().
ASSERT(function->IsMarkedForLazyRecompilation());
- if (!FLAG_use_osr || Debug::has_break_points() || function->IsBuiltin()) {
+ if (!FLAG_use_osr ||
+ isolate_->DebuggerHasBreakPoints() ||
+ function->IsBuiltin()) {
return;
}
@@ -169,7 +185,7 @@ static void AttemptOnStackReplacement(JSFunction* function) {
// We are not prepared to do OSR for a function that already has an
// allocated arguments object. The optimized code would bypass it for
// arguments accesses, which is unsound. Don't try OSR.
- if (shared->scope_info()->HasArgumentsShadow()) return;
+ if (shared->uses_arguments()) return;
// We're using on-stack replacement: patch the unoptimized code so that
// any back edge in any unoptimized frame will trigger on-stack
@@ -186,7 +202,8 @@ static void AttemptOnStackReplacement(JSFunction* function) {
Object* check_code;
MaybeObject* maybe_check_code = check_stub.TryGetCode();
if (maybe_check_code->ToObject(&check_code)) {
- Code* replacement_code = Builtins::builtin(Builtins::OnStackReplacement);
+ Code* replacement_code =
+ isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
Code* unoptimized_code = shared->code();
Deoptimizer::PatchStackCheckCode(unoptimized_code,
Code::cast(check_code),
@@ -195,21 +212,19 @@ static void AttemptOnStackReplacement(JSFunction* function) {
}
-static void ClearSampleBuffer() {
- for (int i = 0; i < kSamplerWindowSize; i++) {
- sampler_window[i] = NULL;
- sampler_window_weight[i] = 0;
- }
+void RuntimeProfiler::ClearSampleBuffer() {
+ memset(sampler_window_, 0, sizeof(sampler_window_));
+ memset(sampler_window_weight_, 0, sizeof(sampler_window_weight_));
}
-static int LookupSample(JSFunction* function) {
+int RuntimeProfiler::LookupSample(JSFunction* function) {
int weight = 0;
for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* sample = sampler_window[i];
+ Object* sample = sampler_window_[i];
if (sample != NULL) {
if (function == sample) {
- weight += sampler_window_weight[i];
+ weight += sampler_window_weight_[i];
}
}
}
@@ -217,31 +232,31 @@ static int LookupSample(JSFunction* function) {
}
-static void AddSample(JSFunction* function, int weight) {
+void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
ASSERT(IsPowerOf2(kSamplerWindowSize));
- sampler_window[sampler_window_position] = function;
- sampler_window_weight[sampler_window_position] = weight;
- sampler_window_position = (sampler_window_position + 1) &
+ sampler_window_[sampler_window_position_] = function;
+ sampler_window_weight_[sampler_window_position_] = weight;
+ sampler_window_position_ = (sampler_window_position_ + 1) &
(kSamplerWindowSize - 1);
}
void RuntimeProfiler::OptimizeNow() {
- HandleScope scope;
- PendingListNode* current = optimize_soon_list;
+ HandleScope scope(isolate_);
+ PendingListNode* current = optimize_soon_list_;
while (current != NULL) {
PendingListNode* next = current->next();
if (current->IsValid()) {
Handle<JSFunction> function = current->function();
int delay = current->Delay();
- if (IsOptimizable(*function)) {
+ if (function->IsOptimizable()) {
Optimize(*function, true, delay);
}
}
delete current;
current = next;
}
- optimize_soon_list = NULL;
+ optimize_soon_list_ = NULL;
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
@@ -249,7 +264,7 @@ void RuntimeProfiler::OptimizeNow() {
JSFunction* samples[kSamplerFrameCount];
int sample_count = 0;
int frame_count = 0;
- for (JavaScriptFrameIterator it;
+ for (JavaScriptFrameIterator it(isolate_);
frame_count++ < kSamplerFrameCount && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
@@ -257,14 +272,14 @@ void RuntimeProfiler::OptimizeNow() {
// Adjust threshold each time we have processed
// a certain number of ticks.
- if (sampler_ticks_until_threshold_adjustment > 0) {
- sampler_ticks_until_threshold_adjustment--;
- if (sampler_ticks_until_threshold_adjustment <= 0) {
+ if (sampler_ticks_until_threshold_adjustment_ > 0) {
+ sampler_ticks_until_threshold_adjustment_--;
+ if (sampler_ticks_until_threshold_adjustment_ <= 0) {
// If the threshold is not already at the minimum
// modify and reset the ticks until next adjustment.
- if (sampler_threshold > kSamplerThresholdMin) {
- sampler_threshold -= kSamplerThresholdDelta;
- sampler_ticks_until_threshold_adjustment =
+ if (sampler_threshold_ > kSamplerThresholdMin) {
+ sampler_threshold_ -= kSamplerThresholdDelta;
+ sampler_ticks_until_threshold_adjustment_ =
kSamplerTicksBetweenThresholdAdjustment;
}
}
@@ -279,16 +294,16 @@ void RuntimeProfiler::OptimizeNow() {
}
// Do not record non-optimizable functions.
- if (!IsOptimizable(function)) continue;
+ if (!function->IsOptimizable()) continue;
samples[sample_count++] = function;
int function_size = function->shared()->SourceSize();
int threshold_size_factor = (function_size > kSizeLimit)
- ? sampler_threshold_size_factor
+ ? sampler_threshold_size_factor_
: 1;
- int threshold = sampler_threshold * threshold_size_factor;
- int current_js_ratio = NoBarrier_Load(&js_ratio);
+ int threshold = sampler_threshold_ * threshold_size_factor;
+ int current_js_ratio = NoBarrier_Load(&js_ratio_);
// Adjust threshold depending on the ratio of time spent
// in JS code.
@@ -304,7 +319,8 @@ void RuntimeProfiler::OptimizeNow() {
if (LookupSample(function) >= threshold) {
Optimize(function, false, 0);
- CompilationCache::MarkForEagerOptimizing(Handle<JSFunction>(function));
+ isolate_->compilation_cache()->MarkForEagerOptimizing(
+ Handle<JSFunction>(function));
}
}
@@ -318,29 +334,28 @@ void RuntimeProfiler::OptimizeNow() {
void RuntimeProfiler::OptimizeSoon(JSFunction* function) {
- if (!IsOptimizable(function)) return;
+ if (!function->IsOptimizable()) return;
PendingListNode* node = new PendingListNode(function);
- node->set_next(optimize_soon_list);
- optimize_soon_list = node;
+ node->set_next(optimize_soon_list_);
+ optimize_soon_list_ = node;
}
#ifdef ENABLE_LOGGING_AND_PROFILING
-static void UpdateStateRatio(SamplerState current_state) {
- static const int kStateWindowSize = 128;
- static SamplerState state_window[kStateWindowSize];
- static int state_window_position = 0;
- static int state_counts[2] = { kStateWindowSize, 0 };
-
- SamplerState old_state = state_window[state_window_position];
- state_counts[old_state]--;
- state_window[state_window_position] = current_state;
- state_counts[current_state]++;
+void RuntimeProfiler::UpdateStateRatio(SamplerState current_state) {
+ SamplerState old_state = state_window_[state_window_position_];
+ state_counts_[old_state]--;
+ state_window_[state_window_position_] = current_state;
+ state_counts_[current_state]++;
ASSERT(IsPowerOf2(kStateWindowSize));
- state_window_position = (state_window_position + 1) &
+ state_window_position_ = (state_window_position_ + 1) &
(kStateWindowSize - 1);
- NoBarrier_Store(&js_ratio, state_counts[IN_JS_STATE] * 100 /
- kStateWindowSize);
+ // Note: to calculate correct ratio we have to track how many valid
+ // ticks are actually in the state window, because on profiler
+ // startup this number can be less than the window size.
+ state_window_ticks_ = Min(kStateWindowSize, state_window_ticks_ + 1);
+ NoBarrier_Store(&js_ratio_, state_counts_[IN_JS_STATE] * 100 /
+ state_window_ticks_);
}
#endif
@@ -348,28 +363,29 @@ static void UpdateStateRatio(SamplerState current_state) {
void RuntimeProfiler::NotifyTick() {
#ifdef ENABLE_LOGGING_AND_PROFILING
// Record state sample.
- SamplerState state = Top::IsInJSState()
+ SamplerState state = IsSomeIsolateInJS()
? IN_JS_STATE
: IN_NON_JS_STATE;
UpdateStateRatio(state);
- StackGuard::RequestRuntimeProfilerTick();
+ isolate_->stack_guard()->RequestRuntimeProfilerTick();
#endif
}
void RuntimeProfiler::Setup() {
+ ASSERT(has_been_globally_setup_);
ClearSampleBuffer();
// If the ticker hasn't already started, make sure to do so to get
// the ticks for the runtime profiler.
- if (IsEnabled()) Logger::EnsureTickerStarted();
+ if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
}
void RuntimeProfiler::Reset() {
- sampler_threshold = kSamplerThresholdInit;
- sampler_ticks_until_threshold_adjustment =
+ sampler_threshold_ = kSamplerThresholdInit;
+ sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
+ sampler_ticks_until_threshold_adjustment_ =
kSamplerTicksBetweenThresholdAdjustment;
- sampler_threshold_size_factor = kSamplerThresholdSizeFactorInit;
}
@@ -386,24 +402,61 @@ int RuntimeProfiler::SamplerWindowSize() {
// Update the pointers in the sampler window after a GC.
void RuntimeProfiler::UpdateSamplesAfterScavenge() {
for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* function = sampler_window[i];
- if (function != NULL && Heap::InNewSpace(function)) {
+ Object* function = sampler_window_[i];
+ if (function != NULL && isolate_->heap()->InNewSpace(function)) {
MapWord map_word = HeapObject::cast(function)->map_word();
if (map_word.IsForwardingAddress()) {
- sampler_window[i] = map_word.ToForwardingAddress();
+ sampler_window_[i] = map_word.ToForwardingAddress();
} else {
- sampler_window[i] = NULL;
+ sampler_window_[i] = NULL;
}
}
}
}
+void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // The profiler thread must still be waiting.
+ ASSERT(NoBarrier_Load(&state_) >= 0);
+ // In IsolateEnteredJS we have already incremented the counter and
+ // undid the decrement done by the profiler thread. Increment again
+ // to get the right count of active isolates.
+ NoBarrier_AtomicIncrement(&state_, 1);
+ semaphore_->Signal();
+ isolate->ResetEagerOptimizingData();
+#endif
+}
+
+
+bool RuntimeProfiler::IsSomeIsolateInJS() {
+ return NoBarrier_Load(&state_) > 0;
+}
+
+
+bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
+ ASSERT(old_state >= -1);
+ if (old_state != 0) return false;
+ semaphore_->Wait();
+#endif
+ return true;
+}
+
+
+void RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ semaphore_->Signal();
+#endif
+}
+
+
void RuntimeProfiler::RemoveDeadSamples() {
for (int i = 0; i < kSamplerWindowSize; i++) {
- Object* function = sampler_window[i];
+ Object* function = sampler_window_[i];
if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
- sampler_window[i] = NULL;
+ sampler_window_[i] = NULL;
}
}
}
@@ -411,7 +464,7 @@ void RuntimeProfiler::RemoveDeadSamples() {
void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
for (int i = 0; i < kSamplerWindowSize; i++) {
- visitor->VisitPointer(&sampler_window[i]);
+ visitor->VisitPointer(&sampler_window_[i]);
}
}
@@ -419,20 +472,13 @@ void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
#ifdef ENABLE_LOGGING_AND_PROFILING
static const int kNonJSTicksThreshold = 100;
- // We suspend the runtime profiler thread when not running
- // JavaScript. If the CPU profiler is active we must not do this
- // because it samples both JavaScript and C++ code.
- if (RuntimeProfiler::IsEnabled() &&
- !CpuProfiler::is_profiling() &&
- !(FLAG_prof && FLAG_prof_auto)) {
- if (Top::IsInJSState()) {
- non_js_ticks_ = 0;
+ if (RuntimeProfiler::IsSomeIsolateInJS()) {
+ non_js_ticks_ = 0;
+ } else {
+ if (non_js_ticks_ < kNonJSTicksThreshold) {
+ ++non_js_ticks_;
} else {
- if (non_js_ticks_ < kNonJSTicksThreshold) {
- ++non_js_ticks_;
- } else {
- if (Top::WaitForJSState()) return true;
- }
+ return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
}
}
#endif
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index 02defc9b2..692b4ffac 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -28,29 +28,131 @@
#ifndef V8_RUNTIME_PROFILER_H_
#define V8_RUNTIME_PROFILER_H_
-#include "v8.h"
#include "allocation.h"
+#include "atomicops.h"
namespace v8 {
namespace internal {
-class RuntimeProfiler : public AllStatic {
+class Isolate;
+class JSFunction;
+class Object;
+class PendingListNode;
+class Semaphore;
+
+class RuntimeProfiler {
public:
- static bool IsEnabled() { return V8::UseCrankshaft() && FLAG_opt; }
+ explicit RuntimeProfiler(Isolate* isolate);
+
+ static void GlobalSetup();
+
+ static inline bool IsEnabled() {
+ ASSERT(has_been_globally_setup_);
+ return enabled_;
+ }
+
+ void OptimizeNow();
+ void OptimizeSoon(JSFunction* function);
+
+ void NotifyTick();
+
+ void Setup();
+ void Reset();
+ void TearDown();
+
+ Object** SamplerWindowAddress();
+ int SamplerWindowSize();
+
+ // Rate limiting support.
+
+ // VM thread interface.
+ //
+ // Called by isolates when their states change.
+ static inline void IsolateEnteredJS(Isolate* isolate);
+ static inline void IsolateExitedJS(Isolate* isolate);
+
+ // Profiler thread interface.
+ //
+ // IsSomeIsolateInJS():
+ // The profiler thread can query whether some isolate is currently
+ // running JavaScript code.
+ //
+ // WaitForSomeIsolateToEnterJS():
+ // When no isolates are running JavaScript code for some time the
+ // profiler thread suspends itself by calling the wait function. The
+ // wait function returns true after it waited or false immediately.
+ // While the function was waiting the profiler may have been
+ // disabled so it *must check* whether it is allowed to continue.
+ static bool IsSomeIsolateInJS();
+ static bool WaitForSomeIsolateToEnterJS();
+
+ // When shutting down we join the profiler thread. Doing so while
+ // it's waiting on a semaphore will cause a deadlock, so we have to
+ // wake it up first.
+ static void WakeUpRuntimeProfilerThreadBeforeShutdown();
+
+ void UpdateSamplesAfterScavenge();
+ void RemoveDeadSamples();
+ void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
+
+ private:
+ static const int kSamplerWindowSize = 16;
+ static const int kStateWindowSize = 128;
+
+ enum SamplerState {
+ IN_NON_JS_STATE = 0,
+ IN_JS_STATE = 1
+ };
- static void OptimizeNow();
- static void OptimizeSoon(JSFunction* function);
+ static void HandleWakeUp(Isolate* isolate);
- static void NotifyTick();
+ void Optimize(JSFunction* function, bool eager, int delay);
- static void Setup();
- static void Reset();
- static void TearDown();
+ void AttemptOnStackReplacement(JSFunction* function);
- static int SamplerWindowSize();
- static void UpdateSamplesAfterScavenge();
- static void RemoveDeadSamples();
- static void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
+ void ClearSampleBuffer();
+
+ void ClearSampleBufferNewSpaceEntries();
+
+ int LookupSample(JSFunction* function);
+
+ void AddSample(JSFunction* function, int weight);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ void UpdateStateRatio(SamplerState current_state);
+#endif
+
+ Isolate* isolate_;
+
+ int sampler_threshold_;
+ int sampler_threshold_size_factor_;
+ int sampler_ticks_until_threshold_adjustment_;
+
+ // The ratio of ticks spent in JS code in percent.
+ Atomic32 js_ratio_;
+
+ Object* sampler_window_[kSamplerWindowSize];
+ int sampler_window_position_;
+ int sampler_window_weight_[kSamplerWindowSize];
+
+ // Support for pending 'optimize soon' requests.
+ PendingListNode* optimize_soon_list_;
+
+ SamplerState state_window_[kStateWindowSize];
+ int state_window_position_;
+ int state_window_ticks_;
+ int state_counts_[2];
+
+ // Possible state values:
+ // -1 => the profiler thread is waiting on the semaphore
+ // 0 or positive => the number of isolates running JavaScript code.
+ static Atomic32 state_;
+ static Semaphore* semaphore_;
+
+#ifdef DEBUG
+ static bool has_been_globally_setup_;
+#endif
+ static bool enabled_;
};
@@ -59,9 +161,10 @@ class RuntimeProfilerRateLimiter BASE_EMBEDDED {
public:
RuntimeProfilerRateLimiter() : non_js_ticks_(0) { }
- // Suspends the current thread when not executing JavaScript to
- // minimize CPU usage. Returns whether this thread was suspended
- // (and so might have to check whether profiling is still active.)
+ // Suspends the current thread (which must be the profiler thread)
+ // when not executing JavaScript to minimize CPU usage. Returns
+ // whether the thread was suspended (and so must check whether
+ // profiling is still active.)
//
// Does nothing when runtime profiling is not enabled.
bool SuspendIfNecessary();
@@ -72,6 +175,27 @@ class RuntimeProfilerRateLimiter BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter);
};
+
+// Implementation of RuntimeProfiler inline functions.
+
+void RuntimeProfiler::IsolateEnteredJS(Isolate* isolate) {
+ Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
+ if (new_state == 0) {
+ // Just incremented from -1 to 0. -1 can only be set by the
+ // profiler thread before it suspends itself and starts waiting on
+ // the semaphore.
+ HandleWakeUp(isolate);
+ }
+ ASSERT(new_state >= 0);
+}
+
+
+void RuntimeProfiler::IsolateExitedJS(Isolate* isolate) {
+ Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, -1);
+ ASSERT(new_state >= 0);
+ USE(new_state);
+}
+
} } // namespace v8::internal
#endif // V8_RUNTIME_PROFILER_H_
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index dd49d727e..df99fdc67 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -42,24 +42,27 @@
#include "execution.h"
#include "global-handles.h"
#include "jsregexp.h"
+#include "json-parser.h"
#include "liveedit.h"
#include "liveobjectlist-inl.h"
+#include "misc-intrinsics.h"
#include "parser.h"
#include "platform.h"
-#include "runtime.h"
#include "runtime-profiler.h"
+#include "runtime.h"
#include "scopeinfo.h"
#include "smart-pointer.h"
+#include "string-search.h"
#include "stub-cache.h"
#include "v8threads.h"
-#include "string-search.h"
+#include "vm-state-inl.h"
namespace v8 {
namespace internal {
#define RUNTIME_ASSERT(value) \
- if (!(value)) return Top::ThrowIllegalOperation();
+ if (!(value)) return isolate->ThrowIllegalOperation();
// Cast the given object to a value of the specified type and store
// it in a variable with the given name. If the object is not of the
@@ -79,19 +82,19 @@ namespace internal {
RUNTIME_ASSERT(obj->IsBoolean()); \
bool name = (obj)->IsTrue();
-// Cast the given object to a Smi and store its value in an int variable
-// with the given name. If the object is not a Smi call IllegalOperation
+// Cast the given argument to a Smi and store its value in an int variable
+// with the given name. If the argument is not a Smi call IllegalOperation
// and return.
-#define CONVERT_SMI_CHECKED(name, obj) \
- RUNTIME_ASSERT(obj->IsSmi()); \
- int name = Smi::cast(obj)->value();
+#define CONVERT_SMI_ARG_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsSmi()); \
+ int name = args.smi_at(index);
-// Cast the given object to a double and store it in a variable with
-// the given name. If the object is not a number (as opposed to
+// Cast the given argument to a double and store it in a variable with
+// the given name. If the argument is not a number (as opposed to
// the number not-a-number) call IllegalOperation and return.
-#define CONVERT_DOUBLE_CHECKED(name, obj) \
- RUNTIME_ASSERT(obj->IsNumber()); \
- double name = (obj)->Number();
+#define CONVERT_DOUBLE_ARG_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsNumber()); \
+ double name = args.number_at(index);
// Call the specified converter on the object *comand store the result in
// a variable of the specified type with the given name. If the
@@ -100,16 +103,15 @@ namespace internal {
RUNTIME_ASSERT(obj->IsNumber()); \
type name = NumberTo##Type(obj);
-// Non-reentrant string buffer for efficient general use in this file.
-static StaticResource<StringInputBuffer> runtime_string_input_buffer;
+MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
+ JSObject* boilerplate) {
+ StackLimitCheck check(isolate);
+ if (check.HasOverflowed()) return isolate->StackOverflow();
-MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(JSObject* boilerplate) {
- StackLimitCheck check;
- if (check.HasOverflowed()) return Top::StackOverflow();
-
+ Heap* heap = isolate->heap();
Object* result;
- { MaybeObject* maybe_result = Heap::CopyJSObject(boilerplate);
+ { MaybeObject* maybe_result = heap->CopyJSObject(boilerplate);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSObject* copy = JSObject::cast(result);
@@ -121,7 +123,7 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(JSObject* boilerplate) {
Object* value = properties->get(i);
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(js_object);
+ { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
properties->set(i, result);
@@ -132,7 +134,7 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(JSObject* boilerplate) {
Object* value = copy->InObjectPropertyAt(i);
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(js_object);
+ { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
copy->InObjectPropertyAtPut(i, result);
@@ -140,7 +142,7 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(JSObject* boilerplate) {
}
} else {
{ MaybeObject* maybe_result =
- Heap::AllocateFixedArray(copy->NumberOfLocalProperties(NONE));
+ heap->AllocateFixedArray(copy->NumberOfLocalProperties(NONE));
if (!maybe_result->ToObject(&result)) return maybe_result;
}
FixedArray* names = FixedArray::cast(result);
@@ -158,7 +160,7 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(JSObject* boilerplate) {
copy->GetProperty(key_string, &attributes)->ToObjectUnchecked();
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(js_object);
+ { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, js_object);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
{ MaybeObject* maybe_result =
@@ -172,12 +174,12 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(JSObject* boilerplate) {
// Deep copy local elements.
// Pixel elements cannot be created using an object literal.
- ASSERT(!copy->HasPixelElements() && !copy->HasExternalArrayElements());
+ ASSERT(!copy->HasExternalArrayElements());
switch (copy->GetElementsKind()) {
case JSObject::FAST_ELEMENTS: {
FixedArray* elements = FixedArray::cast(copy->elements());
- if (elements->map() == Heap::fixed_cow_array_map()) {
- Counters::cow_arrays_created_runtime.Increment();
+ if (elements->map() == heap->fixed_cow_array_map()) {
+ isolate->counters()->cow_arrays_created_runtime()->Increment();
#ifdef DEBUG
for (int i = 0; i < elements->length(); i++) {
ASSERT(!elements->get(i)->IsJSObject());
@@ -188,7 +190,8 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(JSObject* boilerplate) {
Object* value = elements->get(i);
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(js_object);
+ { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
+ js_object);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
elements->set(i, result);
@@ -206,7 +209,8 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(JSObject* boilerplate) {
Object* value = element_dictionary->ValueAt(i);
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = DeepCopyBoilerplate(js_object);
+ { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
+ js_object);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
element_dictionary->ValueAtPut(i, result);
@@ -223,15 +227,15 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(JSObject* boilerplate) {
}
-static MaybeObject* Runtime_CloneLiteralBoilerplate(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneLiteralBoilerplate) {
CONVERT_CHECKED(JSObject, boilerplate, args[0]);
- return DeepCopyBoilerplate(boilerplate);
+ return DeepCopyBoilerplate(isolate, boilerplate);
}
-static MaybeObject* Runtime_CloneShallowLiteralBoilerplate(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneShallowLiteralBoilerplate) {
CONVERT_CHECKED(JSObject, boilerplate, args[0]);
- return Heap::CopyJSObject(boilerplate);
+ return isolate->heap()->CopyJSObject(boilerplate);
}
@@ -239,6 +243,7 @@ static Handle<Map> ComputeObjectLiteralMap(
Handle<Context> context,
Handle<FixedArray> constant_properties,
bool* is_result_from_cache) {
+ Isolate* isolate = context->GetIsolate();
int properties_length = constant_properties->length();
int number_of_properties = properties_length / 2;
if (FLAG_canonicalize_object_literal_maps) {
@@ -265,7 +270,8 @@ static Handle<Map> ComputeObjectLiteralMap(
if ((number_of_symbol_keys == number_of_properties) &&
(number_of_symbol_keys < kMaxKeys)) {
// Create the fixed array with the key.
- Handle<FixedArray> keys = Factory::NewFixedArray(number_of_symbol_keys);
+ Handle<FixedArray> keys =
+ isolate->factory()->NewFixedArray(number_of_symbol_keys);
if (number_of_symbol_keys > 0) {
int index = 0;
for (int p = 0; p < properties_length; p += 2) {
@@ -277,25 +283,28 @@ static Handle<Map> ComputeObjectLiteralMap(
ASSERT(index == number_of_symbol_keys);
}
*is_result_from_cache = true;
- return Factory::ObjectLiteralMapFromCache(context, keys);
+ return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
}
}
*is_result_from_cache = false;
- return Factory::CopyMap(
+ return isolate->factory()->CopyMap(
Handle<Map>(context->object_function()->initial_map()),
number_of_properties);
}
static Handle<Object> CreateLiteralBoilerplate(
+ Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> constant_properties);
static Handle<Object> CreateObjectLiteralBoilerplate(
+ Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> constant_properties,
- bool should_have_fast_elements) {
+ bool should_have_fast_elements,
+ bool has_function_literal) {
// Get the global context from the literals array. This is the
// context in which the function was created and we use the object
// function from this context to create the object literal. We do
@@ -305,63 +314,90 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
Handle<Context> context =
Handle<Context>(JSFunction::GlobalContextFromLiterals(*literals));
- bool is_result_from_cache;
- Handle<Map> map = ComputeObjectLiteralMap(context,
- constant_properties,
- &is_result_from_cache);
+ // In case we have function literals, we want the object to be in
+ // slow properties mode for now. We don't go in the map cache because
+ // maps with constant functions can't be shared if the functions are
+ // not the same (which is the common case).
+ bool is_result_from_cache = false;
+ Handle<Map> map = has_function_literal
+ ? Handle<Map>(context->object_function()->initial_map())
+ : ComputeObjectLiteralMap(context,
+ constant_properties,
+ &is_result_from_cache);
- Handle<JSObject> boilerplate = Factory::NewJSObjectFromMap(map);
+ Handle<JSObject> boilerplate = isolate->factory()->NewJSObjectFromMap(map);
// Normalize the elements of the boilerplate to save space if needed.
if (!should_have_fast_elements) NormalizeElements(boilerplate);
- { // Add the constant properties to the boilerplate.
- int length = constant_properties->length();
- OptimizedObjectForAddingMultipleProperties opt(boilerplate,
- length / 2,
- !is_result_from_cache);
- for (int index = 0; index < length; index +=2) {
- Handle<Object> key(constant_properties->get(index+0));
- Handle<Object> value(constant_properties->get(index+1));
- if (value->IsFixedArray()) {
- // The value contains the constant_properties of a
- // simple object literal.
- Handle<FixedArray> array = Handle<FixedArray>::cast(value);
- value = CreateLiteralBoilerplate(literals, array);
- if (value.is_null()) return value;
- }
- Handle<Object> result;
- uint32_t element_index = 0;
- if (key->IsSymbol()) {
- if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
- // Array index as string (uint32).
- result = SetOwnElement(boilerplate, element_index, value);
- } else {
- Handle<String> name(String::cast(*key));
- ASSERT(!name->AsArrayIndex(&element_index));
- result = SetLocalPropertyIgnoreAttributes(boilerplate, name,
- value, NONE);
- }
- } else if (key->ToArrayIndex(&element_index)) {
- // Array index (uint32).
- result = SetOwnElement(boilerplate, element_index, value);
+ // Add the constant properties to the boilerplate.
+ int length = constant_properties->length();
+ bool should_transform =
+ !is_result_from_cache && boilerplate->HasFastProperties();
+ if (should_transform || has_function_literal) {
+ // Normalize the properties of object to avoid n^2 behavior
+ // when extending the object multiple properties. Indicate the number of
+ // properties to be added.
+ NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
+ }
+
+ for (int index = 0; index < length; index +=2) {
+ Handle<Object> key(constant_properties->get(index+0), isolate);
+ Handle<Object> value(constant_properties->get(index+1), isolate);
+ if (value->IsFixedArray()) {
+ // The value contains the constant_properties of a
+ // simple object or array literal.
+ Handle<FixedArray> array = Handle<FixedArray>::cast(value);
+ value = CreateLiteralBoilerplate(isolate, literals, array);
+ if (value.is_null()) return value;
+ }
+ Handle<Object> result;
+ uint32_t element_index = 0;
+ if (key->IsSymbol()) {
+ if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
+ // Array index as string (uint32).
+ result = SetOwnElement(boilerplate,
+ element_index,
+ value,
+ kNonStrictMode);
} else {
- // Non-uint32 number.
- ASSERT(key->IsNumber());
- double num = key->Number();
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- const char* str = DoubleToCString(num, buffer);
- Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
+ Handle<String> name(String::cast(*key));
+ ASSERT(!name->AsArrayIndex(&element_index));
result = SetLocalPropertyIgnoreAttributes(boilerplate, name,
value, NONE);
}
- // If setting the property on the boilerplate throws an
- // exception, the exception is converted to an empty handle in
- // the handle based operations. In that case, we need to
- // convert back to an exception.
- if (result.is_null()) return result;
- }
+ } else if (key->ToArrayIndex(&element_index)) {
+ // Array index (uint32).
+ result = SetOwnElement(boilerplate,
+ element_index,
+ value,
+ kNonStrictMode);
+ } else {
+ // Non-uint32 number.
+ ASSERT(key->IsNumber());
+ double num = key->Number();
+ char arr[100];
+ Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ const char* str = DoubleToCString(num, buffer);
+ Handle<String> name =
+ isolate->factory()->NewStringFromAscii(CStrVector(str));
+ result = SetLocalPropertyIgnoreAttributes(boilerplate, name,
+ value, NONE);
+ }
+ // If setting the property on the boilerplate throws an
+ // exception, the exception is converted to an empty handle in
+ // the handle based operations. In that case, we need to
+ // convert back to an exception.
+ if (result.is_null()) return result;
+ }
+
+ // Transform to fast properties if necessary. For object literals with
+ // containing function literals we defer this operation until after all
+ // computed properties have been assigned so that we can generate
+ // constant function properties.
+ if (should_transform && !has_function_literal) {
+ TransformToFastProperties(boilerplate,
+ boilerplate->map()->unused_property_fields());
}
return boilerplate;
@@ -369,16 +405,18 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
static Handle<Object> CreateArrayLiteralBoilerplate(
+ Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> elements) {
// Create the JSArray.
Handle<JSFunction> constructor(
JSFunction::GlobalContextFromLiterals(*literals)->array_function());
- Handle<Object> object = Factory::NewJSObject(constructor);
+ Handle<Object> object = isolate->factory()->NewJSObject(constructor);
- const bool is_cow = (elements->map() == Heap::fixed_cow_array_map());
+ const bool is_cow =
+ (elements->map() == isolate->heap()->fixed_cow_array_map());
Handle<FixedArray> copied_elements =
- is_cow ? elements : Factory::CopyFixedArray(elements);
+ is_cow ? elements : isolate->factory()->CopyFixedArray(elements);
Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements);
if (is_cow) {
@@ -392,10 +430,10 @@ static Handle<Object> CreateArrayLiteralBoilerplate(
for (int i = 0; i < content->length(); i++) {
if (content->get(i)->IsFixedArray()) {
// The value contains the constant_properties of a
- // simple object literal.
+ // simple object or array literal.
Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
Handle<Object> result =
- CreateLiteralBoilerplate(literals, fa);
+ CreateLiteralBoilerplate(isolate, literals, fa);
if (result.is_null()) return result;
content->set(i, *result);
}
@@ -409,16 +447,26 @@ static Handle<Object> CreateArrayLiteralBoilerplate(
static Handle<Object> CreateLiteralBoilerplate(
+ Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> array) {
Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
+ const bool kHasNoFunctionLiteral = false;
switch (CompileTimeValue::GetType(array)) {
case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
- return CreateObjectLiteralBoilerplate(literals, elements, true);
+ return CreateObjectLiteralBoilerplate(isolate,
+ literals,
+ elements,
+ true,
+ kHasNoFunctionLiteral);
case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
- return CreateObjectLiteralBoilerplate(literals, elements, false);
+ return CreateObjectLiteralBoilerplate(isolate,
+ literals,
+ elements,
+ false,
+ kHasNoFunctionLiteral);
case CompileTimeValue::ARRAY_LITERAL:
- return CreateArrayLiteralBoilerplate(literals, elements);
+ return CreateArrayLiteralBoilerplate(isolate, literals, elements);
default:
UNREACHABLE();
return Handle<Object>::null();
@@ -426,19 +474,20 @@ static Handle<Object> CreateLiteralBoilerplate(
}
-static MaybeObject* Runtime_CreateArrayLiteralBoilerplate(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralBoilerplate) {
// Takes a FixedArray of elements containing the literal elements of
// the array literal and produces JSArray with those elements.
// Additionally takes the literals array of the surrounding function
// which contains the context from which to get the Array function
// to use for creating the array literal.
- HandleScope scope;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
- Handle<Object> object = CreateArrayLiteralBoilerplate(literals, elements);
+ Handle<Object> object =
+ CreateArrayLiteralBoilerplate(isolate, literals, elements);
if (object.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
@@ -447,127 +496,148 @@ static MaybeObject* Runtime_CreateArrayLiteralBoilerplate(Arguments args) {
}
-static MaybeObject* Runtime_CreateObjectLiteral(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
- CONVERT_SMI_CHECKED(fast_elements, args[3]);
- bool should_have_fast_elements = fast_elements == 1;
+ CONVERT_SMI_ARG_CHECKED(flags, 3);
+ bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
+ bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index));
- if (*boilerplate == Heap::undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(literals,
+ Handle<Object> boilerplate(literals->get(literals_index), isolate);
+ if (*boilerplate == isolate->heap()->undefined_value()) {
+ boilerplate = CreateObjectLiteralBoilerplate(isolate,
+ literals,
constant_properties,
- should_have_fast_elements);
+ should_have_fast_elements,
+ has_function_literal);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
- return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
+ return DeepCopyBoilerplate(isolate, JSObject::cast(*boilerplate));
}
-static MaybeObject* Runtime_CreateObjectLiteralShallow(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
- CONVERT_SMI_CHECKED(fast_elements, args[3]);
- bool should_have_fast_elements = fast_elements == 1;
+ CONVERT_SMI_ARG_CHECKED(flags, 3);
+ bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
+ bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index));
- if (*boilerplate == Heap::undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(literals,
+ Handle<Object> boilerplate(literals->get(literals_index), isolate);
+ if (*boilerplate == isolate->heap()->undefined_value()) {
+ boilerplate = CreateObjectLiteralBoilerplate(isolate,
+ literals,
constant_properties,
- should_have_fast_elements);
+ should_have_fast_elements,
+ has_function_literal);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
- return Heap::CopyJSObject(JSObject::cast(*boilerplate));
+ return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
}
-static MaybeObject* Runtime_CreateArrayLiteral(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index));
- if (*boilerplate == Heap::undefined_value()) {
- boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
+ Handle<Object> boilerplate(literals->get(literals_index), isolate);
+ if (*boilerplate == isolate->heap()->undefined_value()) {
+ boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
- return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
+ return DeepCopyBoilerplate(isolate, JSObject::cast(*boilerplate));
}
-static MaybeObject* Runtime_CreateArrayLiteralShallow(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index));
- if (*boilerplate == Heap::undefined_value()) {
- boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
+ Handle<Object> boilerplate(literals->get(literals_index), isolate);
+ if (*boilerplate == isolate->heap()->undefined_value()) {
+ boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
if (JSObject::cast(*boilerplate)->elements()->map() ==
- Heap::fixed_cow_array_map()) {
- Counters::cow_arrays_created_runtime.Increment();
+ isolate->heap()->fixed_cow_array_map()) {
+ isolate->counters()->cow_arrays_created_runtime()->Increment();
}
- return Heap::CopyJSObject(JSObject::cast(*boilerplate));
+ return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
}
-static MaybeObject* Runtime_CreateCatchExtensionObject(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
ASSERT(args.length() == 2);
- CONVERT_CHECKED(String, key, args[0]);
- Object* value = args[1];
- // Create a catch context extension object.
- JSFunction* constructor =
- Top::context()->global_context()->context_extension_function();
- Object* object;
- { MaybeObject* maybe_object = Heap::AllocateJSObject(constructor);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- // Assign the exception value to the catch variable and make sure
- // that the catch variable is DontDelete.
- { MaybeObject* maybe_value =
- // Passing non-strict per ECMA-262 5th Ed. 12.14. Catch, bullet #4.
- JSObject::cast(object)->SetProperty(
- key, value, DONT_DELETE, kNonStrictMode);
- if (!maybe_value->ToObject(&value)) return maybe_value;
- }
- return object;
+ Object* handler = args[0];
+ Object* prototype = args[1];
+ Object* used_prototype =
+ prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value();
+ return isolate->heap()->AllocateJSProxy(handler, used_prototype);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) {
+ ASSERT(args.length() == 1);
+ Object* obj = args[0];
+ return obj->IsJSProxy()
+ ? isolate->heap()->true_value() : isolate->heap()->false_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSProxy, proxy, args[0]);
+ return proxy->handler();
}
-static MaybeObject* Runtime_ClassOf(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
Object* obj = args[0];
- if (!obj->IsJSObject()) return Heap::null_value();
+ if (!obj->IsJSObject()) return isolate->heap()->null_value();
return JSObject::cast(obj)->class_name();
}
-static MaybeObject* Runtime_IsInPrototypeChain(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ Object* obj = args[0];
+ obj = obj->GetPrototype();
+ while (obj->IsJSObject() &&
+ JSObject::cast(obj)->map()->is_hidden_prototype()) {
+ obj = obj->GetPrototype();
+ }
+ return obj;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
// See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
@@ -575,15 +645,15 @@ static MaybeObject* Runtime_IsInPrototypeChain(Arguments args) {
Object* V = args[1];
while (true) {
Object* prototype = V->GetPrototype();
- if (prototype->IsNull()) return Heap::false_value();
- if (O == prototype) return Heap::true_value();
+ if (prototype->IsNull()) return isolate->heap()->false_value();
+ if (O == prototype) return isolate->heap()->true_value();
V = prototype;
}
}
// Inserts an object as the hidden prototype of another object.
-static MaybeObject* Runtime_SetHiddenPrototype(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHiddenPrototype) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSObject, jsobject, args[0]);
@@ -621,15 +691,15 @@ static MaybeObject* Runtime_SetHiddenPrototype(Arguments args) {
new_map->set_prototype(proto);
jsobject->set_map(new_map);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_IsConstructCall(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConstructCall) {
NoHandleAllocation ha;
ASSERT(args.length() == 0);
- JavaScriptFrameIterator it;
- return Heap::ToBoolean(it.frame()->IsConstructor());
+ JavaScriptFrameIterator it(isolate);
+ return isolate->heap()->ToBoolean(it.frame()->IsConstructor());
}
@@ -676,9 +746,10 @@ static bool CheckAccess(JSObject* obj,
JSObject* holder = result->holder();
JSObject* current = obj;
+ Isolate* isolate = obj->GetIsolate();
while (true) {
if (current->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(current, name, access_type)) {
+ !isolate->MayNamedAccess(current, name, access_type)) {
// Access check callback denied the access, but some properties
// can have a special permissions which override callbacks descision
// (currently see v8::AccessControl).
@@ -715,7 +786,7 @@ static bool CheckAccess(JSObject* obj,
break;
}
- Top::ReportFailedAccessCheck(current, access_type);
+ isolate->ReportFailedAccessCheck(current, access_type);
return false;
}
@@ -725,7 +796,7 @@ static bool CheckElementAccess(JSObject* obj,
uint32_t index,
v8::AccessType access_type) {
if (obj->IsAccessCheckNeeded() &&
- !Top::MayIndexedAccess(obj, index, access_type)) {
+ !obj->GetIsolate()->MayIndexedAccess(obj, index, access_type)) {
return false;
}
@@ -752,11 +823,12 @@ enum PropertyDescriptorIndices {
// [false, value, Writeable, Enumerable, Configurable]
// if args[1] is an accessor on args[0]
// [true, GetFunction, SetFunction, Enumerable, Configurable]
-static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
ASSERT(args.length() == 2);
- HandleScope scope;
- Handle<FixedArray> elms = Factory::NewFixedArray(DESCRIPTOR_SIZE);
- Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms);
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+ Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
+ Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms);
LookupResult result;
CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_ARG_CHECKED(String, name, 1);
@@ -766,7 +838,7 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
if (name->AsArrayIndex(&index)) {
switch (obj->HasLocalElement(index)) {
case JSObject::UNDEFINED_ELEMENT:
- return Heap::undefined_value();
+ return heap->undefined_value();
case JSObject::STRING_CHARACTER_ELEMENT: {
// Special handling of string objects according to ECMAScript 5
@@ -777,22 +849,23 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
Handle<String> str(String::cast(js_value->value()));
Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED);
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(IS_ACCESSOR_INDEX, heap->false_value());
elms->set(VALUE_INDEX, *substr);
- elms->set(WRITABLE_INDEX, Heap::false_value());
- elms->set(ENUMERABLE_INDEX, Heap::false_value());
- elms->set(CONFIGURABLE_INDEX, Heap::false_value());
+ elms->set(WRITABLE_INDEX, heap->false_value());
+ elms->set(ENUMERABLE_INDEX, heap->false_value());
+ elms->set(CONFIGURABLE_INDEX, heap->false_value());
return *desc;
}
case JSObject::INTERCEPTED_ELEMENT:
case JSObject::FAST_ELEMENT: {
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(IS_ACCESSOR_INDEX, heap->false_value());
Handle<Object> value = GetElement(obj, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
elms->set(VALUE_INDEX, *value);
- elms->set(WRITABLE_INDEX, Heap::true_value());
- elms->set(ENUMERABLE_INDEX, Heap::true_value());
- elms->set(CONFIGURABLE_INDEX, Heap::true_value());
+ elms->set(WRITABLE_INDEX, heap->true_value());
+ elms->set(ENUMERABLE_INDEX, heap->true_value());
+ elms->set(CONFIGURABLE_INDEX, heap->true_value());
return *desc;
}
@@ -800,11 +873,17 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
Handle<JSObject> holder = obj;
if (obj->IsJSGlobalProxy()) {
Object* proto = obj->GetPrototype();
- if (proto->IsNull()) return Heap::undefined_value();
+ if (proto->IsNull()) return heap->undefined_value();
ASSERT(proto->IsJSGlobalObject());
holder = Handle<JSObject>(JSObject::cast(proto));
}
- NumberDictionary* dictionary = holder->element_dictionary();
+ FixedArray* elements = FixedArray::cast(holder->elements());
+ NumberDictionary* dictionary = NULL;
+ if (elements->map() == heap->non_strict_arguments_elements_map()) {
+ dictionary = NumberDictionary::cast(elements->get(1));
+ } else {
+ dictionary = NumberDictionary::cast(elements);
+ }
int entry = dictionary->FindEntry(index);
ASSERT(entry != NumberDictionary::kNotFound);
PropertyDetails details = dictionary->DetailsAt(entry);
@@ -813,7 +892,7 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
// This is an accessor property with getter and/or setter.
FixedArray* callbacks =
FixedArray::cast(dictionary->ValueAt(entry));
- elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
+ elms->set(IS_ACCESSOR_INDEX, heap->true_value());
if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
elms->set(GETTER_INDEX, callbacks->get(0));
}
@@ -824,18 +903,19 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
}
case NORMAL: {
// This is a data property.
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(IS_ACCESSOR_INDEX, heap->false_value());
Handle<Object> value = GetElement(obj, index);
+ ASSERT(!value.is_null());
elms->set(VALUE_INDEX, *value);
- elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
+ elms->set(WRITABLE_INDEX, heap->ToBoolean(!details.IsReadOnly()));
break;
}
default:
UNREACHABLE();
break;
}
- elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
+ elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!details.IsDontEnum()));
+ elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!details.IsDontDelete()));
return *desc;
}
}
@@ -845,22 +925,22 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
GetOwnPropertyImplementation(*obj, *name, &result);
if (!result.IsProperty()) {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) {
- return Heap::false_value();
+ return heap->false_value();
}
- elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!result.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!result.IsDontDelete()));
+ elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!result.IsDontEnum()));
+ elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete()));
bool is_js_accessor = (result.type() == CALLBACKS) &&
(result.GetCallbackObject()->IsFixedArray());
if (is_js_accessor) {
// __defineGetter__/__defineSetter__ callback.
- elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
+ elms->set(IS_ACCESSOR_INDEX, heap->true_value());
FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
@@ -870,8 +950,8 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
elms->set(SETTER_INDEX, structure->get(1));
}
} else {
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
- elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
+ elms->set(IS_ACCESSOR_INDEX, heap->false_value());
+ elms->set(WRITABLE_INDEX, heap->ToBoolean(!result.IsReadOnly()));
PropertyAttributes attrs;
Object* value;
@@ -886,29 +966,29 @@ static MaybeObject* Runtime_GetOwnProperty(Arguments args) {
}
-static MaybeObject* Runtime_PreventExtensions(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, obj, args[0]);
return obj->PreventExtensions();
}
-static MaybeObject* Runtime_IsExtensible(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, obj, args[0]);
if (obj->IsJSGlobalProxy()) {
Object* proto = obj->GetPrototype();
- if (proto->IsNull()) return Heap::false_value();
+ if (proto->IsNull()) return isolate->heap()->false_value();
ASSERT(proto->IsJSGlobalObject());
obj = JSObject::cast(proto);
}
- return obj->map()->is_extensible() ? Heap::true_value()
- : Heap::false_value();
+ return obj->map()->is_extensible() ? isolate->heap()->true_value()
+ : isolate->heap()->false_value();
}
-static MaybeObject* Runtime_RegExpCompile(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSRegExp, re, 0);
CONVERT_ARG_CHECKED(String, pattern, 1);
@@ -919,23 +999,23 @@ static MaybeObject* Runtime_RegExpCompile(Arguments args) {
}
-static MaybeObject* Runtime_CreateApiFunction(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(FunctionTemplateInfo, data, 0);
- return *Factory::CreateApiFunction(data);
+ return *isolate->factory()->CreateApiFunction(data);
}
-static MaybeObject* Runtime_IsTemplate(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) {
ASSERT(args.length() == 1);
Object* arg = args[0];
bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
- return Heap::ToBoolean(result);
+ return isolate->heap()->ToBoolean(result);
}
-static MaybeObject* Runtime_GetTemplateField(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(HeapObject, templ, args[0]);
CONVERT_CHECKED(Smi, field, args[1]);
@@ -954,7 +1034,7 @@ static MaybeObject* Runtime_GetTemplateField(Arguments args) {
}
-static MaybeObject* Runtime_DisableAccessChecks(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(HeapObject, object, args[0]);
Map* old_map = object->map();
@@ -969,11 +1049,12 @@ static MaybeObject* Runtime_DisableAccessChecks(Arguments args) {
Map::cast(new_map)->set_is_access_check_needed(false);
object->set_map(Map::cast(new_map));
}
- return needs_access_checks ? Heap::true_value() : Heap::false_value();
+ return needs_access_checks ? isolate->heap()->true_value()
+ : isolate->heap()->false_value();
}
-static MaybeObject* Runtime_EnableAccessChecks(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(HeapObject, object, args[0]);
Map* old_map = object->map();
@@ -987,30 +1068,33 @@ static MaybeObject* Runtime_EnableAccessChecks(Arguments args) {
Map::cast(new_map)->set_is_access_check_needed(true);
object->set_map(Map::cast(new_map));
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static Failure* ThrowRedeclarationError(const char* type, Handle<String> name) {
- HandleScope scope;
- Handle<Object> type_handle = Factory::NewStringFromAscii(CStrVector(type));
+static Failure* ThrowRedeclarationError(Isolate* isolate,
+ const char* type,
+ Handle<String> name) {
+ HandleScope scope(isolate);
+ Handle<Object> type_handle =
+ isolate->factory()->NewStringFromAscii(CStrVector(type));
Handle<Object> args[2] = { type_handle, name };
Handle<Object> error =
- Factory::NewTypeError("redeclaration", HandleVector(args, 2));
- return Top::Throw(*error);
+ isolate->factory()->NewTypeError("redeclaration", HandleVector(args, 2));
+ return isolate->Throw(*error);
}
-static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
ASSERT(args.length() == 4);
- HandleScope scope;
- Handle<GlobalObject> global = Handle<GlobalObject>(Top::context()->global());
+ HandleScope scope(isolate);
+ Handle<GlobalObject> global = Handle<GlobalObject>(
+ isolate->context()->global());
Handle<Context> context = args.at<Context>(0);
CONVERT_ARG_CHECKED(FixedArray, pairs, 1);
- bool is_eval = Smi::cast(args[2])->value() == 1;
- StrictModeFlag strict_mode =
- static_cast<StrictModeFlag>(Smi::cast(args[3])->value());
+ bool is_eval = args.smi_at(2) == 1;
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
// Compute the property attributes. According to ECMA-262, section
@@ -1022,9 +1106,9 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
// Traverse the name/value pairs and set the properties.
int length = pairs->length();
for (int i = 0; i < length; i += 2) {
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<String> name(String::cast(pairs->get(i)));
- Handle<Object> value(pairs->get(i + 1));
+ Handle<Object> value(pairs->get(i + 1), isolate);
// We have to declare a global const property. To capture we only
// assign to it when evaluating the assignment for "const x =
@@ -1054,7 +1138,7 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
// Check if the existing property conflicts with regards to const.
if (is_local && (is_read_only || is_const_property)) {
const char* type = (is_read_only) ? "const" : "var";
- return ThrowRedeclarationError(type, name);
+ return ThrowRedeclarationError(isolate, type, name);
};
// The property already exists without conflicting: Go to
// the next declaration.
@@ -1066,12 +1150,12 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
// For const properties, we treat a callback with this name
// even in the prototype as a conflicting declaration.
if (is_const_property && (lookup.type() == CALLBACKS)) {
- return ThrowRedeclarationError("const", name);
+ return ThrowRedeclarationError(isolate, "const", name);
}
// Otherwise, we check for locally conflicting declarations.
if (is_local && (is_read_only || is_const_property)) {
const char* type = (is_read_only) ? "const" : "var";
- return ThrowRedeclarationError(type, name);
+ return ThrowRedeclarationError(isolate, type, name);
}
// The property already exists without conflicting: Go to
// the next declaration.
@@ -1083,7 +1167,9 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>::cast(value);
Handle<JSFunction> function =
- Factory::NewFunctionFromSharedFunctionInfo(shared, context, TENURED);
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
+ context,
+ TENURED);
value = function;
}
@@ -1105,7 +1191,7 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
(lookup.type() != INTERCEPTOR) &&
(lookup.IsReadOnly() || is_const_property)) {
const char* type = (lookup.IsReadOnly()) ? "const" : "var";
- return ThrowRedeclarationError(type, name);
+ return ThrowRedeclarationError(isolate, type, name);
}
// Safari does not allow the invocation of callback setters for
@@ -1121,12 +1207,14 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
attributes = static_cast<PropertyAttributes>(
attributes | (lookup.GetAttributes() & DONT_DELETE));
}
- RETURN_IF_EMPTY_HANDLE(SetLocalPropertyIgnoreAttributes(global,
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ SetLocalPropertyIgnoreAttributes(global,
name,
value,
attributes));
} else {
- RETURN_IF_EMPTY_HANDLE(SetProperty(global,
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ SetProperty(global,
name,
value,
attributes,
@@ -1134,24 +1222,23 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
}
}
- ASSERT(!Top::has_pending_exception());
- return Heap::undefined_value();
+ ASSERT(!isolate->has_pending_exception());
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_DeclareContextSlot(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(Context, context, 0);
Handle<String> name(String::cast(args[1]));
- PropertyAttributes mode =
- static_cast<PropertyAttributes>(Smi::cast(args[2])->value());
+ PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2));
RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
- Handle<Object> initial_value(args[3]);
+ Handle<Object> initial_value(args[3], isolate);
- // Declarations are always done in the function context.
- context = Handle<Context>(context->fcontext());
+ // Declarations are always done in a function or global context.
+ context = Handle<Context>(context->declaration_context());
int index;
PropertyAttributes attributes;
@@ -1167,7 +1254,7 @@ static MaybeObject* Runtime_DeclareContextSlot(Arguments args) {
// Functions are not read-only.
ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
const char* type = ((attributes & READ_ONLY) != 0) ? "const" : "var";
- return ThrowRedeclarationError(type, name);
+ return ThrowRedeclarationError(isolate, type, name);
}
// Initialize it if necessary.
@@ -1184,13 +1271,15 @@ static MaybeObject* Runtime_DeclareContextSlot(Arguments args) {
} else {
// The holder is an arguments object.
Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
- Handle<Object> result = SetElement(arguments, index, initial_value);
+ Handle<Object> result = SetElement(arguments, index, initial_value,
+ kNonStrictMode);
if (result.is_null()) return Failure::Exception();
}
} else {
// Slow case: The property is not in the FixedArray part of the context.
Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
RETURN_IF_EMPTY_HANDLE(
+ isolate,
SetProperty(context_ext, name, initial_value,
mode, kNonStrictMode));
}
@@ -1203,11 +1292,12 @@ static MaybeObject* Runtime_DeclareContextSlot(Arguments args) {
Handle<JSObject> context_ext;
if (context->has_extension()) {
// The function context's extension context exists - use it.
- context_ext = Handle<JSObject>(context->extension());
+ context_ext = Handle<JSObject>(JSObject::cast(context->extension()));
} else {
// The function context's extension context does not exists - allocate
// it.
- context_ext = Factory::NewJSObject(Top::context_extension_function());
+ context_ext = isolate->factory()->NewJSObject(
+ isolate->context_extension_function());
// And store it in the extension slot.
context->set_extension(*context_ext);
}
@@ -1217,7 +1307,7 @@ static MaybeObject* Runtime_DeclareContextSlot(Arguments args) {
// or undefined, and use the correct mode (e.g. READ_ONLY attribute for
// constant declarations).
ASSERT(!context_ext->HasLocalProperty(*name));
- Handle<Object> value(Heap::undefined_value());
+ Handle<Object> value(isolate->heap()->undefined_value(), isolate);
if (*initial_value != NULL) value = initial_value;
// Declaring a const context slot is a conflicting declaration if
// there is a callback with that name in a prototype. It is
@@ -1230,18 +1320,19 @@ static MaybeObject* Runtime_DeclareContextSlot(Arguments args) {
LookupResult lookup;
context_ext->Lookup(*name, &lookup);
if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) {
- return ThrowRedeclarationError("const", name);
+ return ThrowRedeclarationError(isolate, "const", name);
}
}
- RETURN_IF_EMPTY_HANDLE(SetProperty(context_ext, name, value, mode,
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ SetProperty(context_ext, name, value, mode,
kNonStrictMode));
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
NoHandleAllocation nha;
// args[0] == name
// args[1] == strict_mode
@@ -1253,10 +1344,9 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) {
bool assign = args.length() == 3;
CONVERT_ARG_CHECKED(String, name, 0);
- GlobalObject* global = Top::context()->global();
+ GlobalObject* global = isolate->context()->global();
RUNTIME_ASSERT(args[1]->IsSmi());
- StrictModeFlag strict_mode =
- static_cast<StrictModeFlag>(Smi::cast(args[1])->value());
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(1));
ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
// According to ECMA-262, section 12.2, page 62, the property must
@@ -1279,8 +1369,8 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) {
if (lookup.IsReadOnly()) {
// If we found readonly property on one of hidden prototypes,
// just shadow it.
- if (real_holder != Top::context()->global()) break;
- return ThrowRedeclarationError("const", name);
+ if (real_holder != isolate->context()->global()) break;
+ return ThrowRedeclarationError(isolate, "const", name);
}
// Determine if this is a redeclaration of an intercepted read-only
@@ -1288,7 +1378,7 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) {
bool found = true;
PropertyType type = lookup.type();
if (type == INTERCEPTOR) {
- HandleScope handle_scope;
+ HandleScope handle_scope(isolate);
Handle<JSObject> holder(real_holder);
PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
real_holder = *holder;
@@ -1301,19 +1391,19 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) {
// overwrite it with a variable declaration we must throw a
// re-declaration error. However if we found readonly property
// on one of hidden prototypes, just shadow it.
- if (real_holder != Top::context()->global()) break;
- return ThrowRedeclarationError("const", name);
+ if (real_holder != isolate->context()->global()) break;
+ return ThrowRedeclarationError(isolate, "const", name);
}
}
if (found && !assign) {
// The global property is there and we're not assigning any value
// to it. Just return.
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Assign the value (or undefined) to the property.
- Object* value = (assign) ? args[2] : Heap::undefined_value();
+ Object* value = (assign) ? args[2] : isolate->heap()->undefined_value();
return real_holder->SetProperty(
&lookup, *name, value, attributes, strict_mode);
}
@@ -1328,15 +1418,15 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) {
real_holder = JSObject::cast(proto);
}
- global = Top::context()->global();
+ global = isolate->context()->global();
if (assign) {
return global->SetProperty(*name, args[2], attributes, strict_mode);
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_InitializeConstGlobal(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// All constants are declared with an initial value. The name
// of the constant is the first argument and the initial value
// is the second.
@@ -1345,7 +1435,7 @@ static MaybeObject* Runtime_InitializeConstGlobal(Arguments args) {
Handle<Object> value = args.at<Object>(1);
// Get the current global object from top.
- GlobalObject* global = Top::context()->global();
+ GlobalObject* global = isolate->context()->global();
// According to ECMA-262, section 12.2, page 62, the property must
// not be deletable. Since it's a const, it must be READ_ONLY too.
@@ -1370,7 +1460,7 @@ static MaybeObject* Runtime_InitializeConstGlobal(Arguments args) {
// need to ask it for the property attributes.
if (!lookup.IsReadOnly()) {
if (lookup.type() != INTERCEPTOR) {
- return ThrowRedeclarationError("var", name);
+ return ThrowRedeclarationError(isolate, "var", name);
}
PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
@@ -1378,20 +1468,21 @@ static MaybeObject* Runtime_InitializeConstGlobal(Arguments args) {
// Throw re-declaration error if the intercepted property is present
// but not read-only.
if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
- return ThrowRedeclarationError("var", name);
+ return ThrowRedeclarationError(isolate, "var", name);
}
// Restore global object from context (in case of GC) and continue
// with setting the value because the property is either absent or
// read-only. We also have to do redo the lookup.
- HandleScope handle_scope;
- Handle<GlobalObject> global(Top::context()->global());
+ HandleScope handle_scope(isolate);
+ Handle<GlobalObject> global(isolate->context()->global());
// BUG 1213575: Handle the case where we have to set a read-only
// property through an interceptor and only do it if it's
// uninitialized, e.g. the hole. Nirk...
// Passing non-strict mode because the property is writable.
- RETURN_IF_EMPTY_HANDLE(SetProperty(global,
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ SetProperty(global,
name,
value,
attributes,
@@ -1425,17 +1516,17 @@ static MaybeObject* Runtime_InitializeConstGlobal(Arguments args) {
}
-static MaybeObject* Runtime_InitializeConstContextSlot(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- Handle<Object> value(args[0]);
+ Handle<Object> value(args[0], isolate);
ASSERT(!value->IsTheHole());
CONVERT_ARG_CHECKED(Context, context, 1);
Handle<String> name(String::cast(args[2]));
- // Initializations are always done in the function context.
- context = Handle<Context>(context->fcontext());
+ // Initializations are always done in a function or global context.
+ context = Handle<Context>(context->declaration_context());
int index;
PropertyAttributes attributes;
@@ -1456,20 +1547,20 @@ static MaybeObject* Runtime_InitializeConstContextSlot(Arguments args) {
// In that case, the initialization behaves like a normal assignment
// to property 'x'.
if (index >= 0) {
- // Property was found in a context.
if (holder->IsContext()) {
- // The holder cannot be the function context. If it is, there
- // should have been a const redeclaration error when declaring
- // the const property.
- ASSERT(!holder.is_identical_to(context));
- if ((attributes & READ_ONLY) == 0) {
- Handle<Context>::cast(holder)->set(index, *value);
+ // Property was found in a context. Perform the assignment if we
+ // found some non-constant or an uninitialized constant.
+ Handle<Context> context = Handle<Context>::cast(holder);
+ if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
+ context->set(index, *value);
}
} else {
// The holder is an arguments object.
ASSERT((attributes & READ_ONLY) == 0);
Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
- SetElement(arguments, index, value);
+ RETURN_IF_EMPTY_HANDLE(
+ isolate,
+ SetElement(arguments, index, value, kNonStrictMode));
}
return *value;
}
@@ -1477,9 +1568,11 @@ static MaybeObject* Runtime_InitializeConstContextSlot(Arguments args) {
// The property could not be found, we introduce it in the global
// context.
if (attributes == ABSENT) {
- Handle<JSObject> global = Handle<JSObject>(Top::context()->global());
+ Handle<JSObject> global = Handle<JSObject>(
+ isolate->context()->global());
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
+ isolate,
SetProperty(global, name, value, NONE, kNonStrictMode));
return *value;
}
@@ -1519,6 +1612,7 @@ static MaybeObject* Runtime_InitializeConstContextSlot(Arguments args) {
if ((attributes & READ_ONLY) == 0) {
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
+ isolate,
SetProperty(context_ext, name, value, attributes, kNonStrictMode));
}
}
@@ -1527,12 +1621,12 @@ static MaybeObject* Runtime_InitializeConstContextSlot(Arguments args) {
}
-static MaybeObject* Runtime_OptimizeObjectForAddingMultipleProperties(
- Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*,
+ Runtime_OptimizeObjectForAddingMultipleProperties) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_SMI_CHECKED(properties, args[1]);
+ CONVERT_SMI_ARG_CHECKED(properties, 1);
if (object->HasFastProperties()) {
NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
}
@@ -1540,19 +1634,19 @@ static MaybeObject* Runtime_OptimizeObjectForAddingMultipleProperties(
}
-static MaybeObject* Runtime_RegExpExec(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_CHECKED(String, subject, 1);
// Due to the way the JS calls are constructed this must be less than the
// length of a string, i.e. it is always a Smi. We check anyway for security.
- CONVERT_SMI_CHECKED(index, args[2]);
+ CONVERT_SMI_ARG_CHECKED(index, 2);
CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
RUNTIME_ASSERT(last_match_info->HasFastElements());
RUNTIME_ASSERT(index >= 0);
RUNTIME_ASSERT(index <= subject->length());
- Counters::regexp_entry_runtime.Increment();
+ isolate->counters()->regexp_entry_runtime()->Increment();
Handle<Object> result = RegExpImpl::Exec(regexp,
subject,
index,
@@ -1562,31 +1656,30 @@ static MaybeObject* Runtime_RegExpExec(Arguments args) {
}
-static MaybeObject* Runtime_RegExpConstructResult(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
ASSERT(args.length() == 3);
- CONVERT_SMI_CHECKED(elements_count, args[0]);
+ CONVERT_SMI_ARG_CHECKED(elements_count, 0);
if (elements_count > JSArray::kMaxFastElementsLength) {
- return Top::ThrowIllegalOperation();
+ return isolate->ThrowIllegalOperation();
}
Object* new_object;
{ MaybeObject* maybe_new_object =
- Heap::AllocateFixedArrayWithHoles(elements_count);
+ isolate->heap()->AllocateFixedArrayWithHoles(elements_count);
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
FixedArray* elements = FixedArray::cast(new_object);
- { MaybeObject* maybe_new_object = Heap::AllocateRaw(JSRegExpResult::kSize,
- NEW_SPACE,
- OLD_POINTER_SPACE);
+ { MaybeObject* maybe_new_object = isolate->heap()->AllocateRaw(
+ JSRegExpResult::kSize, NEW_SPACE, OLD_POINTER_SPACE);
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
{
AssertNoAllocation no_gc;
- HandleScope scope;
+ HandleScope scope(isolate);
reinterpret_cast<HeapObject*>(new_object)->
- set_map(Top::global_context()->regexp_result_map());
+ set_map(isolate->global_context()->regexp_result_map());
}
JSArray* array = JSArray::cast(new_object);
- array->set_properties(Heap::empty_fixed_array());
+ array->set_properties(isolate->heap()->empty_fixed_array());
array->set_elements(elements);
array->set_length(Smi::FromInt(elements_count));
// Write in-object properties after the length of the array.
@@ -1596,20 +1689,20 @@ static MaybeObject* Runtime_RegExpConstructResult(Arguments args) {
}
-static MaybeObject* Runtime_RegExpInitializeObject(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
AssertNoAllocation no_alloc;
ASSERT(args.length() == 5);
CONVERT_CHECKED(JSRegExp, regexp, args[0]);
CONVERT_CHECKED(String, source, args[1]);
Object* global = args[2];
- if (!global->IsTrue()) global = Heap::false_value();
+ if (!global->IsTrue()) global = isolate->heap()->false_value();
Object* ignoreCase = args[3];
- if (!ignoreCase->IsTrue()) ignoreCase = Heap::false_value();
+ if (!ignoreCase->IsTrue()) ignoreCase = isolate->heap()->false_value();
Object* multiline = args[4];
- if (!multiline->IsTrue()) multiline = Heap::false_value();
+ if (!multiline->IsTrue()) multiline = isolate->heap()->false_value();
Map* map = regexp->map();
Object* constructor = map->constructor();
@@ -1628,33 +1721,32 @@ static MaybeObject* Runtime_RegExpInitializeObject(Arguments args) {
return regexp;
}
- // Map has changed, so use generic, but slower, method. Since these
- // properties were all added as DONT_DELETE they must be present and
- // normal so no failures can be expected.
+ // Map has changed, so use generic, but slower, method.
PropertyAttributes final =
static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ Heap* heap = isolate->heap();
MaybeObject* result;
- result = regexp->SetLocalPropertyIgnoreAttributes(Heap::source_symbol(),
+ result = regexp->SetLocalPropertyIgnoreAttributes(heap->source_symbol(),
source,
final);
ASSERT(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(Heap::global_symbol(),
+ result = regexp->SetLocalPropertyIgnoreAttributes(heap->global_symbol(),
global,
final);
ASSERT(!result->IsFailure());
result =
- regexp->SetLocalPropertyIgnoreAttributes(Heap::ignore_case_symbol(),
+ regexp->SetLocalPropertyIgnoreAttributes(heap->ignore_case_symbol(),
ignoreCase,
final);
ASSERT(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(Heap::multiline_symbol(),
+ result = regexp->SetLocalPropertyIgnoreAttributes(heap->multiline_symbol(),
multiline,
final);
ASSERT(!result->IsFailure());
result =
- regexp->SetLocalPropertyIgnoreAttributes(Heap::last_index_symbol(),
+ regexp->SetLocalPropertyIgnoreAttributes(heap->last_index_symbol(),
Smi::FromInt(0),
writable);
ASSERT(!result->IsFailure());
@@ -1663,62 +1755,65 @@ static MaybeObject* Runtime_RegExpInitializeObject(Arguments args) {
}
-static MaybeObject* Runtime_FinishArrayPrototypeSetup(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FinishArrayPrototypeSetup) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSArray, prototype, 0);
// This is necessary to enable fast checks for absence of elements
// on Array.prototype and below.
- prototype->set_elements(Heap::empty_fixed_array());
+ prototype->set_elements(isolate->heap()->empty_fixed_array());
return Smi::FromInt(0);
}
-static Handle<JSFunction> InstallBuiltin(Handle<JSObject> holder,
+static Handle<JSFunction> InstallBuiltin(Isolate* isolate,
+ Handle<JSObject> holder,
const char* name,
Builtins::Name builtin_name) {
- Handle<String> key = Factory::LookupAsciiSymbol(name);
- Handle<Code> code(Builtins::builtin(builtin_name));
- Handle<JSFunction> optimized = Factory::NewFunction(key,
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize,
- code,
- false);
+ Handle<String> key = isolate->factory()->LookupAsciiSymbol(name);
+ Handle<Code> code(isolate->builtins()->builtin(builtin_name));
+ Handle<JSFunction> optimized =
+ isolate->factory()->NewFunction(key,
+ JS_OBJECT_TYPE,
+ JSObject::kHeaderSize,
+ code,
+ false);
optimized->shared()->DontAdaptArguments();
SetProperty(holder, key, optimized, NONE, kStrictMode);
return optimized;
}
-static MaybeObject* Runtime_SpecialArrayFunctions(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, holder, 0);
- InstallBuiltin(holder, "pop", Builtins::ArrayPop);
- InstallBuiltin(holder, "push", Builtins::ArrayPush);
- InstallBuiltin(holder, "shift", Builtins::ArrayShift);
- InstallBuiltin(holder, "unshift", Builtins::ArrayUnshift);
- InstallBuiltin(holder, "slice", Builtins::ArraySlice);
- InstallBuiltin(holder, "splice", Builtins::ArraySplice);
- InstallBuiltin(holder, "concat", Builtins::ArrayConcat);
+ InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop);
+ InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush);
+ InstallBuiltin(isolate, holder, "shift", Builtins::kArrayShift);
+ InstallBuiltin(isolate, holder, "unshift", Builtins::kArrayUnshift);
+ InstallBuiltin(isolate, holder, "slice", Builtins::kArraySlice);
+ InstallBuiltin(isolate, holder, "splice", Builtins::kArraySplice);
+ InstallBuiltin(isolate, holder, "concat", Builtins::kArrayConcat);
return *holder;
}
-static MaybeObject* Runtime_GetGlobalReceiver(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetGlobalReceiver) {
// Returns a real global receiver, not one of builtins object.
- Context* global_context = Top::context()->global()->global_context();
+ Context* global_context =
+ isolate->context()->global()->global_context();
return global_context->global()->global_receiver();
}
-static MaybeObject* Runtime_MaterializeRegExpLiteral(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- int index = Smi::cast(args[1])->value();
+ int index = args.smi_at(1);
Handle<String> pattern = args.at<String>(2);
Handle<String> flags = args.at<String>(3);
@@ -1736,7 +1831,7 @@ static MaybeObject* Runtime_MaterializeRegExpLiteral(Arguments args) {
RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags,
&has_pending_exception);
if (has_pending_exception) {
- ASSERT(Top::has_pending_exception());
+ ASSERT(isolate->has_pending_exception());
return Failure::Exception();
}
literals->set(index, *regexp);
@@ -1744,7 +1839,7 @@ static MaybeObject* Runtime_MaterializeRegExpLiteral(Arguments args) {
}
-static MaybeObject* Runtime_FunctionGetName(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -1753,44 +1848,51 @@ static MaybeObject* Runtime_FunctionGetName(Arguments args) {
}
-static MaybeObject* Runtime_FunctionSetName(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSFunction, f, args[0]);
CONVERT_CHECKED(String, name, args[1]);
f->shared()->set_name(name);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_FunctionRemovePrototype(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetBound) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSFunction, fun, args[0]);
+ fun->shared()->set_bound(true);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, f, args[0]);
- Object* obj;
- { MaybeObject* maybe_obj = f->RemovePrototype();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ Object* obj = f->RemovePrototype();
+ if (obj->IsFailure()) return obj;
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_FunctionGetScript(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScript) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, fun, args[0]);
- Handle<Object> script = Handle<Object>(fun->shared()->script());
- if (!script->IsScript()) return Heap::undefined_value();
+ Handle<Object> script = Handle<Object>(fun->shared()->script(), isolate);
+ if (!script->IsScript()) return isolate->heap()->undefined_value();
return *GetScriptWrapper(Handle<Script>::cast(script));
}
-static MaybeObject* Runtime_FunctionGetSourceCode(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -1799,7 +1901,7 @@ static MaybeObject* Runtime_FunctionGetSourceCode(Arguments args) {
}
-static MaybeObject* Runtime_FunctionGetScriptSourcePosition(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -1809,7 +1911,7 @@ static MaybeObject* Runtime_FunctionGetScriptSourcePosition(Arguments args) {
}
-static MaybeObject* Runtime_FunctionGetPositionForOffset(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(Code, code, args[0]);
@@ -1822,19 +1924,18 @@ static MaybeObject* Runtime_FunctionGetPositionForOffset(Arguments args) {
}
-
-static MaybeObject* Runtime_FunctionSetInstanceClassName(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSFunction, fun, args[0]);
CONVERT_CHECKED(String, name, args[1]);
fun->SetInstanceClassName(name);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_FunctionSetLength(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -1845,7 +1946,7 @@ static MaybeObject* Runtime_FunctionSetLength(Arguments args) {
}
-static MaybeObject* Runtime_FunctionSetPrototype(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -1860,26 +1961,28 @@ static MaybeObject* Runtime_FunctionSetPrototype(Arguments args) {
}
-static MaybeObject* Runtime_FunctionIsAPIFunction(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, f, args[0]);
- return f->shared()->IsApiFunction() ? Heap::true_value()
- : Heap::false_value();
+ return f->shared()->IsApiFunction() ? isolate->heap()->true_value()
+ : isolate->heap()->false_value();
}
-static MaybeObject* Runtime_FunctionIsBuiltin(Arguments args) {
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, f, args[0]);
- return f->IsBuiltin() ? Heap::true_value() : Heap::false_value();
+ return f->IsBuiltin() ? isolate->heap()->true_value() :
+ isolate->heap()->false_value();
}
-static MaybeObject* Runtime_SetCode(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, target, 0);
@@ -1911,7 +2014,7 @@ static MaybeObject* Runtime_SetCode(Arguments args) {
// SetCode is only used for built-in constructors like String,
// Array, and Object, and some web code
// doesn't like seeing source code for constructors.
- target->shared()->set_script(Heap::undefined_value());
+ target->shared()->set_script(isolate->heap()->undefined_value());
target->shared()->code()->set_optimizable(false);
// Clear the optimization hints related to the compiled code as these are no
// longer valid when the code is overwritten.
@@ -1922,7 +2025,7 @@ static MaybeObject* Runtime_SetCode(Arguments args) {
// cross context contamination.
int number_of_literals = fun->NumberOfLiterals();
Handle<FixedArray> literals =
- Factory::NewFixedArray(number_of_literals, TENURED);
+ isolate->factory()->NewFixedArray(number_of_literals, TENURED);
if (number_of_literals > 0) {
// Insert the object, regexp and array functions in the literals
// array prefix. These are the functions that will be used when
@@ -1933,7 +2036,7 @@ static MaybeObject* Runtime_SetCode(Arguments args) {
// It's okay to skip the write barrier here because the literals
// are guaranteed to be in old space.
target->set_literals(*literals, SKIP_WRITE_BARRIER);
- target->set_next_function_link(Heap::undefined_value());
+ target->set_next_function_link(isolate->heap()->undefined_value());
}
target->set_context(*context);
@@ -1941,29 +2044,30 @@ static MaybeObject* Runtime_SetCode(Arguments args) {
}
-static MaybeObject* Runtime_SetExpectedNumberOfProperties(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- CONVERT_SMI_CHECKED(num, args[1]);
+ CONVERT_SMI_ARG_CHECKED(num, 1);
RUNTIME_ASSERT(num >= 0);
SetExpectedNofProperties(function, num);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-MUST_USE_RESULT static MaybeObject* CharFromCode(Object* char_code) {
+MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate,
+ Object* char_code) {
uint32_t code;
if (char_code->ToArrayIndex(&code)) {
if (code <= 0xffff) {
- return Heap::LookupSingleCharacterStringFromCode(code);
+ return isolate->heap()->LookupSingleCharacterStringFromCode(code);
}
}
- return Heap::empty_string();
+ return isolate->heap()->empty_string();
}
-static MaybeObject* Runtime_StringCharCodeAt(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -1974,7 +2078,7 @@ static MaybeObject* Runtime_StringCharCodeAt(Arguments args) {
uint32_t i = 0;
if (index->IsSmi()) {
int value = Smi::cast(index)->value();
- if (value < 0) return Heap::nan_value();
+ if (value < 0) return isolate->heap()->nan_value();
i = value;
} else {
ASSERT(index->IsHeapNumber());
@@ -1992,24 +2096,24 @@ static MaybeObject* Runtime_StringCharCodeAt(Arguments args) {
subject = String::cast(flat);
if (i >= static_cast<uint32_t>(subject->length())) {
- return Heap::nan_value();
+ return isolate->heap()->nan_value();
}
return Smi::FromInt(subject->Get(i));
}
-static MaybeObject* Runtime_CharFromCode(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CharFromCode) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- return CharFromCode(args[0]);
+ return CharFromCode(isolate, args[0]);
}
class FixedArrayBuilder {
public:
- explicit FixedArrayBuilder(int initial_capacity)
- : array_(Factory::NewFixedArrayWithHoles(initial_capacity)),
+ explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
+ : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
length_(0) {
// Require a non-zero initial size. Ensures that doubling the size to
// extend the array will work.
@@ -2039,7 +2143,7 @@ class FixedArrayBuilder {
new_length *= 2;
} while (new_length < required_length);
Handle<FixedArray> extended_array =
- Factory::NewFixedArrayWithHoles(new_length);
+ array_->GetIsolate()->factory()->NewFixedArrayWithHoles(new_length);
array_->CopyTo(0, *extended_array, 0, length_);
array_ = extended_array;
}
@@ -2070,7 +2174,7 @@ class FixedArrayBuilder {
}
Handle<JSArray> ToJSArray() {
- Handle<JSArray> result_array = Factory::NewJSArrayWithElements(array_);
+ Handle<JSArray> result_array = FACTORY->NewJSArrayWithElements(array_);
result_array->set_length(Smi::FromInt(length_));
return result_array;
}
@@ -2107,8 +2211,11 @@ typedef BitField<int,
class ReplacementStringBuilder {
public:
- ReplacementStringBuilder(Handle<String> subject, int estimated_part_count)
- : array_builder_(estimated_part_count),
+ ReplacementStringBuilder(Heap* heap,
+ Handle<String> subject,
+ int estimated_part_count)
+ : heap_(heap),
+ array_builder_(heap->isolate(), estimated_part_count),
subject_(subject),
character_count_(0),
is_ascii_(subject->IsAsciiRepresentation()) {
@@ -2160,29 +2267,29 @@ class ReplacementStringBuilder {
Handle<String> ToString() {
if (array_builder_.length() == 0) {
- return Factory::empty_string();
+ return heap_->isolate()->factory()->empty_string();
}
Handle<String> joined_string;
if (is_ascii_) {
- joined_string = NewRawAsciiString(character_count_);
+ Handle<SeqAsciiString> seq = NewRawAsciiString(character_count_);
AssertNoAllocation no_alloc;
- SeqAsciiString* seq = SeqAsciiString::cast(*joined_string);
char* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
*array_builder_.array(),
array_builder_.length());
+ joined_string = Handle<String>::cast(seq);
} else {
// Non-ASCII.
- joined_string = NewRawTwoByteString(character_count_);
+ Handle<SeqTwoByteString> seq = NewRawTwoByteString(character_count_);
AssertNoAllocation no_alloc;
- SeqTwoByteString* seq = SeqTwoByteString::cast(*joined_string);
uc16* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
*array_builder_.array(),
array_builder_.length());
+ joined_string = Handle<String>::cast(seq);
}
return joined_string;
}
@@ -2200,13 +2307,13 @@ class ReplacementStringBuilder {
}
private:
- Handle<String> NewRawAsciiString(int size) {
- CALL_HEAP_FUNCTION(Heap::AllocateRawAsciiString(size), String);
+ Handle<SeqAsciiString> NewRawAsciiString(int length) {
+ return heap_->isolate()->factory()->NewRawAsciiString(length);
}
- Handle<String> NewRawTwoByteString(int size) {
- CALL_HEAP_FUNCTION(Heap::AllocateRawTwoByteString(size), String);
+ Handle<SeqTwoByteString> NewRawTwoByteString(int length) {
+ return heap_->isolate()->factory()->NewRawTwoByteString(length);
}
@@ -2216,6 +2323,7 @@ class ReplacementStringBuilder {
array_builder_.Add(element);
}
+ Heap* heap_;
FixedArrayBuilder array_builder_;
Handle<String> subject_;
int character_count_;
@@ -2241,6 +2349,7 @@ class CompiledReplacement {
int parts() {
return parts_.length();
}
+
private:
enum PartType {
SUBJECT_PREFIX = 1,
@@ -2426,6 +2535,7 @@ void CompiledReplacement::Compile(Handle<String> replacement,
capture_count,
subject_length);
}
+ Isolate* isolate = replacement->GetIsolate();
// Find substrings of replacement string and create them as String objects.
int substring_index = 0;
for (int i = 0, n = parts_.length(); i < n; i++) {
@@ -2433,7 +2543,8 @@ void CompiledReplacement::Compile(Handle<String> replacement,
if (tag <= 0) { // A replacement string slice.
int from = -tag;
int to = parts_[i].data;
- replacement_substrings_.Add(Factory::NewSubString(replacement, from, to));
+ replacement_substrings_.Add(
+ isolate->factory()->NewSubString(replacement, from, to));
parts_[i].tag = REPLACEMENT_SUBSTRING;
parts_[i].data = substring_index;
substring_index++;
@@ -2486,6 +2597,7 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
+ Isolate* isolate,
String* subject,
JSRegExp* regexp,
String* replacement,
@@ -2493,7 +2605,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
- HandleScope handles;
+ HandleScope handles(isolate);
int length = subject->length();
Handle<String> subject_handle(subject);
@@ -2514,7 +2626,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
int capture_count = regexp_handle->CaptureCount();
// CompiledReplacement uses zone allocation.
- CompilationZoneScope zone(DELETE_ON_EXIT);
+ ZoneScope zone(isolate, DELETE_ON_EXIT);
CompiledReplacement compiled_replacement;
compiled_replacement.Compile(replacement_handle,
capture_count,
@@ -2527,7 +2639,9 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
// conservatively.
int expected_parts =
(compiled_replacement.parts() + 1) * (is_global ? 4 : 1) + 1;
- ReplacementStringBuilder builder(subject_handle, expected_parts);
+ ReplacementStringBuilder builder(isolate->heap(),
+ subject_handle,
+ expected_parts);
// Index of end of last match.
int prev = 0;
@@ -2543,7 +2657,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
// so its internal buffer can safely allocate a new handle if it grows.
builder.EnsureCapacity(parts_added_per_loop);
- HandleScope loop_scope;
+ HandleScope loop_scope(isolate);
int start, end;
{
AssertNoAllocation match_info_array_is_not_in_a_handle;
@@ -2595,12 +2709,13 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
template <typename ResultSeqString>
MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
+ Isolate* isolate,
String* subject,
JSRegExp* regexp,
JSArray* last_match_info) {
ASSERT(subject->IsFlat());
- HandleScope handles;
+ HandleScope handles(isolate);
Handle<String> subject_handle(subject);
Handle<JSRegExp> regexp_handle(regexp);
@@ -2614,7 +2729,6 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
ASSERT(last_match_info_handle->HasFastElements());
- HandleScope loop_scope;
int start, end;
{
AssertNoAllocation match_info_array_is_not_in_a_handle;
@@ -2628,15 +2742,15 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
int length = subject_handle->length();
int new_length = length - (end - start);
if (new_length == 0) {
- return Heap::empty_string();
+ return isolate->heap()->empty_string();
}
Handle<ResultSeqString> answer;
if (ResultSeqString::kHasAsciiEncoding) {
- answer =
- Handle<ResultSeqString>::cast(Factory::NewRawAsciiString(new_length));
+ answer = Handle<ResultSeqString>::cast(
+ isolate->factory()->NewRawAsciiString(new_length));
} else {
- answer =
- Handle<ResultSeqString>::cast(Factory::NewRawTwoByteString(new_length));
+ answer = Handle<ResultSeqString>::cast(
+ isolate->factory()->NewRawTwoByteString(new_length));
}
// If the regexp isn't global, only match once.
@@ -2684,7 +2798,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
if (match->IsNull()) break;
ASSERT(last_match_info_handle->HasFastElements());
- HandleScope loop_scope;
+ HandleScope loop_scope(isolate);
{
AssertNoAllocation match_info_array_is_not_in_a_handle;
FixedArray* match_info_array =
@@ -2704,7 +2818,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
}
if (position == 0) {
- return Heap::empty_string();
+ return isolate->heap()->empty_string();
}
// Shorten string and fill
@@ -2716,13 +2830,13 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
if (delta == 0) return *answer;
Address end_of_string = answer->address() + string_size;
- Heap::CreateFillerObjectAt(end_of_string, delta);
+ isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
return *answer;
}
-static MaybeObject* Runtime_StringReplaceRegExpWithString(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
ASSERT(args.length() == 4);
CONVERT_CHECKED(String, subject, args[0]);
@@ -2755,14 +2869,15 @@ static MaybeObject* Runtime_StringReplaceRegExpWithString(Arguments args) {
if (replacement->length() == 0) {
if (subject->HasOnlyAsciiChars()) {
return StringReplaceRegExpWithEmptyString<SeqAsciiString>(
- subject, regexp, last_match_info);
+ isolate, subject, regexp, last_match_info);
} else {
return StringReplaceRegExpWithEmptyString<SeqTwoByteString>(
- subject, regexp, last_match_info);
+ isolate, subject, regexp, last_match_info);
}
}
- return StringReplaceRegExpWithString(subject,
+ return StringReplaceRegExpWithString(isolate,
+ subject,
regexp,
replacement,
last_match_info);
@@ -2772,7 +2887,8 @@ static MaybeObject* Runtime_StringReplaceRegExpWithString(Arguments args) {
// Perform string match of pattern on subject, starting at start index.
// Caller must ensure that 0 <= start_index <= sub->length(),
// and should check that pat->length() + start_index <= sub->length().
-int Runtime::StringMatch(Handle<String> sub,
+int Runtime::StringMatch(Isolate* isolate,
+ Handle<String> sub,
Handle<String> pat,
int start_index) {
ASSERT(0 <= start_index);
@@ -2798,20 +2914,32 @@ int Runtime::StringMatch(Handle<String> sub,
if (seq_pat->IsAsciiRepresentation()) {
Vector<const char> pat_vector = seq_pat->ToAsciiVector();
if (seq_sub->IsAsciiRepresentation()) {
- return SearchString(seq_sub->ToAsciiVector(), pat_vector, start_index);
+ return SearchString(isolate,
+ seq_sub->ToAsciiVector(),
+ pat_vector,
+ start_index);
}
- return SearchString(seq_sub->ToUC16Vector(), pat_vector, start_index);
+ return SearchString(isolate,
+ seq_sub->ToUC16Vector(),
+ pat_vector,
+ start_index);
}
Vector<const uc16> pat_vector = seq_pat->ToUC16Vector();
if (seq_sub->IsAsciiRepresentation()) {
- return SearchString(seq_sub->ToAsciiVector(), pat_vector, start_index);
+ return SearchString(isolate,
+ seq_sub->ToAsciiVector(),
+ pat_vector,
+ start_index);
}
- return SearchString(seq_sub->ToUC16Vector(), pat_vector, start_index);
+ return SearchString(isolate,
+ seq_sub->ToUC16Vector(),
+ pat_vector,
+ start_index);
}
-static MaybeObject* Runtime_StringIndexOf(Arguments args) {
- HandleScope scope; // create a new handle scope
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringIndexOf) {
+ HandleScope scope(isolate); // create a new handle scope
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(String, sub, 0);
@@ -2822,7 +2950,8 @@ static MaybeObject* Runtime_StringIndexOf(Arguments args) {
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
- int position = Runtime::StringMatch(sub, pat, start_index);
+ int position =
+ Runtime::StringMatch(isolate, sub, pat, start_index);
return Smi::FromInt(position);
}
@@ -2861,8 +2990,8 @@ static int StringMatchBackwards(Vector<const schar> subject,
return -1;
}
-static MaybeObject* Runtime_StringLastIndexOf(Arguments args) {
- HandleScope scope; // create a new handle scope
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
+ HandleScope scope(isolate); // create a new handle scope
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(String, sub, 0);
@@ -2918,7 +3047,7 @@ static MaybeObject* Runtime_StringLastIndexOf(Arguments args) {
}
-static MaybeObject* Runtime_StringLocaleCompare(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -2948,8 +3077,10 @@ static MaybeObject* Runtime_StringLocaleCompare(Arguments args) {
str1->TryFlatten();
str2->TryFlatten();
- static StringInputBuffer buf1;
- static StringInputBuffer buf2;
+ StringInputBuffer& buf1 =
+ *isolate->runtime_state()->string_locale_compare_buf1();
+ StringInputBuffer& buf2 =
+ *isolate->runtime_state()->string_locale_compare_buf2();
buf1.Reset(str1);
buf2.Reset(str2);
@@ -2964,34 +3095,34 @@ static MaybeObject* Runtime_StringLocaleCompare(Arguments args) {
}
-static MaybeObject* Runtime_SubString(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_CHECKED(String, value, args[0]);
- Object* from = args[1];
- Object* to = args[2];
int start, end;
// We have a fast integer-only case here to avoid a conversion to double in
// the common case where from and to are Smis.
- if (from->IsSmi() && to->IsSmi()) {
- start = Smi::cast(from)->value();
- end = Smi::cast(to)->value();
+ if (args[1]->IsSmi() && args[2]->IsSmi()) {
+ CONVERT_SMI_ARG_CHECKED(from_number, 1);
+ CONVERT_SMI_ARG_CHECKED(to_number, 2);
+ start = from_number;
+ end = to_number;
} else {
- CONVERT_DOUBLE_CHECKED(from_number, from);
- CONVERT_DOUBLE_CHECKED(to_number, to);
+ CONVERT_DOUBLE_ARG_CHECKED(from_number, 1);
+ CONVERT_DOUBLE_ARG_CHECKED(to_number, 2);
start = FastD2I(from_number);
end = FastD2I(to_number);
}
RUNTIME_ASSERT(end >= start);
RUNTIME_ASSERT(start >= 0);
RUNTIME_ASSERT(end <= value->length());
- Counters::sub_string_runtime.Increment();
+ isolate->counters()->sub_string_runtime()->Increment();
return value->SubString(start, end);
}
-static MaybeObject* Runtime_StringMatch(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
ASSERT_EQ(3, args.length());
CONVERT_ARG_CHECKED(String, subject, 0);
@@ -3005,15 +3136,15 @@ static MaybeObject* Runtime_StringMatch(Arguments args) {
return Failure::Exception();
}
if (match->IsNull()) {
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
int length = subject->length();
- CompilationZoneScope zone_space(DELETE_ON_EXIT);
+ ZoneScope zone_space(isolate, DELETE_ON_EXIT);
ZoneList<int> offsets(8);
+ int start;
+ int end;
do {
- int start;
- int end;
{
AssertNoAllocation no_alloc;
FixedArray* elements = FixedArray::cast(regexp_info->elements());
@@ -3022,22 +3153,25 @@ static MaybeObject* Runtime_StringMatch(Arguments args) {
}
offsets.Add(start);
offsets.Add(end);
- int index = start < end ? end : end + 1;
- if (index > length) break;
- match = RegExpImpl::Exec(regexp, subject, index, regexp_info);
+ if (start == end) if (++end > length) break;
+ match = RegExpImpl::Exec(regexp, subject, end, regexp_info);
if (match.is_null()) {
return Failure::Exception();
}
} while (!match->IsNull());
int matches = offsets.length() / 2;
- Handle<FixedArray> elements = Factory::NewFixedArray(matches);
- for (int i = 0; i < matches ; i++) {
+ Handle<FixedArray> elements = isolate->factory()->NewFixedArray(matches);
+ Handle<String> substring = isolate->factory()->
+ NewSubString(subject, offsets.at(0), offsets.at(1));
+ elements->set(0, *substring);
+ for (int i = 1; i < matches ; i++) {
int from = offsets.at(i * 2);
int to = offsets.at(i * 2 + 1);
- Handle<String> match = Factory::NewSubString(subject, from, to);
- elements->set(i, *match);
+ Handle<String> substring = isolate->factory()->
+ NewProperSubString(subject, from, to);
+ elements->set(i, *substring);
}
- Handle<JSArray> result = Factory::NewJSArrayWithElements(elements);
+ Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(matches));
return *result;
}
@@ -3064,7 +3198,8 @@ static void SetLastMatchInfoNoCaptures(Handle<String> subject,
template <typename SubjectChar, typename PatternChar>
-static bool SearchStringMultiple(Vector<const SubjectChar> subject,
+static bool SearchStringMultiple(Isolate* isolate,
+ Vector<const SubjectChar> subject,
Vector<const PatternChar> pattern,
String* pattern_string,
FixedArrayBuilder* builder,
@@ -3073,7 +3208,7 @@ static bool SearchStringMultiple(Vector<const SubjectChar> subject,
int subject_length = subject.length();
int pattern_length = pattern.length();
int max_search_start = subject_length - pattern_length;
- StringSearch<PatternChar, SubjectChar> search(pattern);
+ StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
while (pos <= max_search_start) {
if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
*match_pos = pos;
@@ -3106,7 +3241,8 @@ static bool SearchStringMultiple(Vector<const SubjectChar> subject,
}
-static bool SearchStringMultiple(Handle<String> subject,
+static bool SearchStringMultiple(Isolate* isolate,
+ Handle<String> subject,
Handle<String> pattern,
Handle<JSArray> last_match_info,
FixedArrayBuilder* builder) {
@@ -3122,13 +3258,15 @@ static bool SearchStringMultiple(Handle<String> subject,
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
if (pattern->IsAsciiRepresentation()) {
- if (SearchStringMultiple(subject_vector,
+ if (SearchStringMultiple(isolate,
+ subject_vector,
pattern->ToAsciiVector(),
*pattern,
builder,
&match_pos)) break;
} else {
- if (SearchStringMultiple(subject_vector,
+ if (SearchStringMultiple(isolate,
+ subject_vector,
pattern->ToUC16Vector(),
*pattern,
builder,
@@ -3137,13 +3275,15 @@ static bool SearchStringMultiple(Handle<String> subject,
} else {
Vector<const uc16> subject_vector = subject->ToUC16Vector();
if (pattern->IsAsciiRepresentation()) {
- if (SearchStringMultiple(subject_vector,
+ if (SearchStringMultiple(isolate,
+ subject_vector,
pattern->ToAsciiVector(),
*pattern,
builder,
&match_pos)) break;
} else {
- if (SearchStringMultiple(subject_vector,
+ if (SearchStringMultiple(isolate,
+ subject_vector,
pattern->ToUC16Vector(),
*pattern,
builder,
@@ -3164,6 +3304,7 @@ static bool SearchStringMultiple(Handle<String> subject,
static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
+ Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
Handle<JSArray> last_match_array,
@@ -3178,6 +3319,7 @@ static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
OffsetsVector registers(required_registers);
Vector<int32_t> register_vector(registers.vector(), registers.length());
int subject_length = subject->length();
+ bool first = true;
for (;;) { // Break on failure, return on exception.
RegExpImpl::IrregexpResult result =
@@ -3194,8 +3336,16 @@ static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
match_start);
}
match_end = register_vector[1];
- HandleScope loop_scope;
- builder->Add(*Factory::NewSubString(subject, match_start, match_end));
+ HandleScope loop_scope(isolate);
+ if (!first) {
+ builder->Add(*isolate->factory()->NewProperSubString(subject,
+ match_start,
+ match_end));
+ } else {
+ builder->Add(*isolate->factory()->NewSubString(subject,
+ match_start,
+ match_end));
+ }
if (match_start != match_end) {
pos = match_end;
} else {
@@ -3208,6 +3358,7 @@ static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
return result;
}
+ first = false;
}
if (match_start >= 0) {
@@ -3228,6 +3379,7 @@ static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
static RegExpImpl::IrregexpResult SearchRegExpMultiple(
+ Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
Handle<JSArray> last_match_array,
@@ -3258,7 +3410,7 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple(
// at the end, so we have two vectors that we swap between.
OffsetsVector registers2(required_registers);
Vector<int> prev_register_vector(registers2.vector(), registers2.length());
-
+ bool first = true;
do {
int match_start = register_vector[0];
builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
@@ -3271,31 +3423,44 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple(
{
// Avoid accumulating new handles inside loop.
- HandleScope temp_scope;
+ HandleScope temp_scope(isolate);
// Arguments array to replace function is match, captures, index and
// subject, i.e., 3 + capture count in total.
- Handle<FixedArray> elements = Factory::NewFixedArray(3 + capture_count);
- Handle<String> match = Factory::NewSubString(subject,
- match_start,
- match_end);
+ Handle<FixedArray> elements =
+ isolate->factory()->NewFixedArray(3 + capture_count);
+ Handle<String> match;
+ if (!first) {
+ match = isolate->factory()->NewProperSubString(subject,
+ match_start,
+ match_end);
+ } else {
+ match = isolate->factory()->NewSubString(subject,
+ match_start,
+ match_end);
+ }
elements->set(0, *match);
for (int i = 1; i <= capture_count; i++) {
int start = register_vector[i * 2];
if (start >= 0) {
int end = register_vector[i * 2 + 1];
ASSERT(start <= end);
- Handle<String> substring = Factory::NewSubString(subject,
- start,
- end);
+ Handle<String> substring;
+ if (!first) {
+ substring = isolate->factory()->NewProperSubString(subject,
+ start,
+ end);
+ } else {
+ substring = isolate->factory()->NewSubString(subject, start, end);
+ }
elements->set(i, *substring);
} else {
ASSERT(register_vector[i * 2 + 1] < 0);
- elements->set(i, Heap::undefined_value());
+ elements->set(i, isolate->heap()->undefined_value());
}
}
elements->set(capture_count + 1, Smi::FromInt(match_start));
elements->set(capture_count + 2, *subject);
- builder->Add(*Factory::NewJSArrayWithElements(elements));
+ builder->Add(*isolate->factory()->NewJSArrayWithElements(elements));
}
// Swap register vectors, so the last successful match is in
// prev_register_vector.
@@ -3316,6 +3481,7 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple(
subject,
pos,
register_vector);
+ first = false;
} while (result == RegExpImpl::RE_SUCCESS);
if (result != RegExpImpl::RE_EXCEPTION) {
@@ -3346,9 +3512,9 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple(
}
-static MaybeObject* Runtime_RegExpExecMultiple(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
ASSERT(args.length() == 4);
- HandleScope handles;
+ HandleScope handles(isolate);
CONVERT_ARG_CHECKED(String, subject, 1);
if (!subject->IsFlat()) { FlattenString(subject); }
@@ -3362,8 +3528,9 @@ static MaybeObject* Runtime_RegExpExecMultiple(Arguments args) {
if (result_array->HasFastElements()) {
result_elements =
Handle<FixedArray>(FixedArray::cast(result_array->elements()));
- } else {
- result_elements = Factory::NewFixedArrayWithHoles(16);
+ }
+ if (result_elements.is_null() || result_elements->length() < 16) {
+ result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
}
FixedArrayBuilder builder(result_elements);
@@ -3371,136 +3538,144 @@ static MaybeObject* Runtime_RegExpExecMultiple(Arguments args) {
Handle<String> pattern(
String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex)));
ASSERT(pattern->IsFlat());
- if (SearchStringMultiple(subject, pattern, last_match_info, &builder)) {
+ if (SearchStringMultiple(isolate, subject, pattern,
+ last_match_info, &builder)) {
return *builder.ToJSArray(result_array);
}
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
RegExpImpl::IrregexpResult result;
if (regexp->CaptureCount() == 0) {
- result = SearchRegExpNoCaptureMultiple(subject,
+ result = SearchRegExpNoCaptureMultiple(isolate,
+ subject,
regexp,
last_match_info,
&builder);
} else {
- result = SearchRegExpMultiple(subject, regexp, last_match_info, &builder);
+ result = SearchRegExpMultiple(isolate,
+ subject,
+ regexp,
+ last_match_info,
+ &builder);
}
if (result == RegExpImpl::RE_SUCCESS) return *builder.ToJSArray(result_array);
- if (result == RegExpImpl::RE_FAILURE) return Heap::null_value();
+ if (result == RegExpImpl::RE_FAILURE) return isolate->heap()->null_value();
ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
return Failure::Exception();
}
-static MaybeObject* Runtime_NumberToRadixString(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
+ CONVERT_SMI_ARG_CHECKED(radix, 1);
+ RUNTIME_ASSERT(2 <= radix && radix <= 36);
// Fast case where the result is a one character string.
- if (args[0]->IsSmi() && args[1]->IsSmi()) {
- int value = Smi::cast(args[0])->value();
- int radix = Smi::cast(args[1])->value();
+ if (args[0]->IsSmi()) {
+ int value = args.smi_at(0);
if (value >= 0 && value < radix) {
- RUNTIME_ASSERT(radix <= 36);
// Character array used for conversion.
static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
- return Heap::LookupSingleCharacterStringFromCode(kCharTable[value]);
+ return isolate->heap()->
+ LookupSingleCharacterStringFromCode(kCharTable[value]);
}
}
// Slow case.
- CONVERT_DOUBLE_CHECKED(value, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
if (isinf(value)) {
if (value < 0) {
- return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
}
- return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
- CONVERT_DOUBLE_CHECKED(radix_number, args[1]);
- int radix = FastD2I(radix_number);
- RUNTIME_ASSERT(2 <= radix && radix <= 36);
char* str = DoubleToRadixCString(value, radix);
- MaybeObject* result = Heap::AllocateStringFromAscii(CStrVector(str));
+ MaybeObject* result =
+ isolate->heap()->AllocateStringFromAscii(CStrVector(str));
DeleteArray(str);
return result;
}
-static MaybeObject* Runtime_NumberToFixed(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(value, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
if (isinf(value)) {
if (value < 0) {
- return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
}
- return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
- CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2I(f_number);
RUNTIME_ASSERT(f >= 0);
char* str = DoubleToFixedCString(value, f);
- MaybeObject* result = Heap::AllocateStringFromAscii(CStrVector(str));
+ MaybeObject* res =
+ isolate->heap()->AllocateStringFromAscii(CStrVector(str));
DeleteArray(str);
- return result;
+ return res;
}
-static MaybeObject* Runtime_NumberToExponential(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(value, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
if (isinf(value)) {
if (value < 0) {
- return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
}
- return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
- CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2I(f_number);
RUNTIME_ASSERT(f >= -1 && f <= 20);
char* str = DoubleToExponentialCString(value, f);
- MaybeObject* result = Heap::AllocateStringFromAscii(CStrVector(str));
+ MaybeObject* res =
+ isolate->heap()->AllocateStringFromAscii(CStrVector(str));
DeleteArray(str);
- return result;
+ return res;
}
-static MaybeObject* Runtime_NumberToPrecision(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(value, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return Heap::AllocateStringFromAscii(CStrVector("NaN"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
if (isinf(value)) {
if (value < 0) {
- return Heap::AllocateStringFromAscii(CStrVector("-Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
}
- return Heap::AllocateStringFromAscii(CStrVector("Infinity"));
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
- CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2I(f_number);
RUNTIME_ASSERT(f >= 1 && f <= 21);
char* str = DoubleToPrecisionCString(value, f);
- MaybeObject* result = Heap::AllocateStringFromAscii(CStrVector(str));
+ MaybeObject* res =
+ isolate->heap()->AllocateStringFromAscii(CStrVector(str));
DeleteArray(str);
- return result;
+ return res;
}
@@ -3516,7 +3691,8 @@ static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
}
-MaybeObject* Runtime::GetElementOrCharAt(Handle<Object> object,
+MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
+ Handle<Object> object,
uint32_t index) {
// Handle [] indexing on Strings
if (object->IsString()) {
@@ -3546,22 +3722,23 @@ MaybeObject* Runtime::GetElement(Handle<Object> object, uint32_t index) {
}
-MaybeObject* Runtime::GetObjectProperty(Handle<Object> object,
+MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
+ Handle<Object> object,
Handle<Object> key) {
- HandleScope scope;
+ HandleScope scope(isolate);
if (object->IsUndefined() || object->IsNull()) {
Handle<Object> args[2] = { key, object };
Handle<Object> error =
- Factory::NewTypeError("non_object_property_load",
- HandleVector(args, 2));
- return Top::Throw(*error);
+ isolate->factory()->NewTypeError("non_object_property_load",
+ HandleVector(args, 2));
+ return isolate->Throw(*error);
}
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
- return GetElementOrCharAt(object, index);
+ return GetElementOrCharAt(isolate, object, index);
}
// Convert the key to a string - possibly by calling back into JavaScript.
@@ -3579,27 +3756,26 @@ MaybeObject* Runtime::GetObjectProperty(Handle<Object> object,
// Check if the name is trivially convertible to an index and get
// the element if so.
if (name->AsArrayIndex(&index)) {
- return GetElementOrCharAt(object, index);
+ return GetElementOrCharAt(isolate, object, index);
} else {
- PropertyAttributes attr;
- return object->GetProperty(*name, &attr);
+ return object->GetProperty(*name);
}
}
-static MaybeObject* Runtime_GetProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
- return Runtime::GetObjectProperty(object, key);
+ return Runtime::GetObjectProperty(isolate, object, key);
}
// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
-static MaybeObject* Runtime_KeyedGetProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -3623,17 +3799,18 @@ static MaybeObject* Runtime_KeyedGetProperty(Arguments args) {
if (receiver->HasFastProperties()) {
// Attempt to use lookup cache.
Map* receiver_map = receiver->map();
- int offset = KeyedLookupCache::Lookup(receiver_map, key);
+ KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
+ int offset = keyed_lookup_cache->Lookup(receiver_map, key);
if (offset != -1) {
Object* value = receiver->FastPropertyAt(offset);
- return value->IsTheHole() ? Heap::undefined_value() : value;
+ return value->IsTheHole() ? isolate->heap()->undefined_value() : value;
}
// Lookup cache miss. Perform lookup and update the cache if appropriate.
LookupResult result;
receiver->LocalLookup(key, &result);
if (result.IsProperty() && result.type() == FIELD) {
int offset = result.GetFieldIndex();
- KeyedLookupCache::Update(receiver_map, key, offset);
+ keyed_lookup_cache->Update(receiver_map, key, offset);
return receiver->FastPropertyAt(offset);
}
} else {
@@ -3651,9 +3828,9 @@ static MaybeObject* Runtime_KeyedGetProperty(Arguments args) {
}
} else if (args[0]->IsString() && args[1]->IsSmi()) {
// Fast case for string indexing using [] with a smi index.
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<String> str = args.at<String>(0);
- int index = Smi::cast(args[1])->value();
+ int index = args.smi_at(1);
if (index >= 0 && index < str->length()) {
Handle<Object> result = GetCharAt(str, index);
return *result;
@@ -3661,7 +3838,8 @@ static MaybeObject* Runtime_KeyedGetProperty(Arguments args) {
}
// Fall back to GetObjectProperty.
- return Runtime::GetObjectProperty(args.at<Object>(0),
+ return Runtime::GetObjectProperty(isolate,
+ args.at<Object>(0),
args.at<Object>(1));
}
@@ -3671,9 +3849,9 @@ static MaybeObject* Runtime_KeyedGetProperty(Arguments args) {
// Steps 9c & 12 - replace an existing data property with an accessor property.
// Step 12 - update an existing accessor property with an accessor or generic
// descriptor.
-static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
ASSERT(args.length() == 5);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_CHECKED(String, name, args[1]);
CONVERT_CHECKED(Smi, flag_setter, args[2]);
@@ -3708,9 +3886,9 @@ static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
// Steps 9b & 12 - replace an existing accessor property with a data property.
// Step 12 - update an existing data property with a data or generic
// descriptor.
-static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
ASSERT(args.length() == 4);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSObject, js_object, 0);
CONVERT_ARG_CHECKED(String, name, 1);
Handle<Object> obj_value = args.at<Object>(2);
@@ -3745,12 +3923,24 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
PropertyDetails details = PropertyDetails(attr, NORMAL);
- NumberDictionarySet(dictionary, index, obj_value, details);
+ Handle<NumberDictionary> extended_dictionary =
+ NumberDictionarySet(dictionary, index, obj_value, details);
+ if (*extended_dictionary != *dictionary) {
+ js_object->set_elements(*extended_dictionary);
+ }
return *obj_value;
}
LookupResult result;
- js_object->LookupRealNamedProperty(*name, &result);
+ js_object->LocalLookupRealNamedProperty(*name, &result);
+
+ // To be compatible with safari we do not change the value on API objects
+ // in defineProperty. Firefox disagrees here, and actually changes the value.
+ if (result.IsProperty() &&
+ (result.type() == CALLBACKS) &&
+ result.GetCallbackObject()->IsAccessorInfo()) {
+ return isolate->heap()->undefined_value();
+ }
// Take special care when attributes are different and there is already
// a property. For simplicity we normalize the property which enables us
@@ -3774,23 +3964,51 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
attr);
}
- return Runtime::ForceSetObjectProperty(js_object, name, obj_value, attr);
+ return Runtime::ForceSetObjectProperty(isolate,
+ js_object,
+ name,
+ obj_value,
+ attr);
+}
+
+
+// Special case for elements if any of the flags are true.
+// If elements are in fast case we always implicitly assume that:
+// DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
+static MaybeObject* NormalizeObjectSetElement(Isolate* isolate,
+ Handle<JSObject> js_object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attr) {
+ // Normalize the elements to enable attributes on the property.
+ NormalizeElements(js_object);
+ Handle<NumberDictionary> dictionary(js_object->element_dictionary());
+ // Make sure that we never go back to fast case.
+ dictionary->set_requires_slow_elements();
+ PropertyDetails details = PropertyDetails(attr, NORMAL);
+ Handle<NumberDictionary> extended_dictionary =
+ NumberDictionarySet(dictionary, index, value, details);
+ if (*extended_dictionary != *dictionary) {
+ js_object->set_elements(*extended_dictionary);
+ }
+ return *value;
}
-MaybeObject* Runtime::SetObjectProperty(Handle<Object> object,
+MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
+ Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr,
- StrictModeFlag strict) {
- HandleScope scope;
+ StrictModeFlag strict_mode) {
+ HandleScope scope(isolate);
if (object->IsUndefined() || object->IsNull()) {
Handle<Object> args[2] = { key, object };
Handle<Object> error =
- Factory::NewTypeError("non_object_property_store",
- HandleVector(args, 2));
- return Top::Throw(*error);
+ isolate->factory()->NewTypeError("non_object_property_store",
+ HandleVector(args, 2));
+ return isolate->Throw(*error);
}
// If the object isn't a JavaScript object, we ignore the store.
@@ -3812,8 +4030,11 @@ MaybeObject* Runtime::SetObjectProperty(Handle<Object> object,
return *value;
}
- // TODO(1220): Implement SetElement strict mode.
- Handle<Object> result = SetElement(js_object, index, value);
+ if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
+ return NormalizeObjectSetElement(isolate, js_object, index, value, attr);
+ }
+
+ Handle<Object> result = SetElement(js_object, index, value, strict_mode);
if (result.is_null()) return Failure::Exception();
return *value;
}
@@ -3821,11 +4042,18 @@ MaybeObject* Runtime::SetObjectProperty(Handle<Object> object,
if (key->IsString()) {
Handle<Object> result;
if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
- result = SetElement(js_object, index, value);
+ if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
+ return NormalizeObjectSetElement(isolate,
+ js_object,
+ index,
+ value,
+ attr);
+ }
+ result = SetElement(js_object, index, value, strict_mode);
} else {
Handle<String> key_string = Handle<String>::cast(key);
key_string->TryFlatten();
- result = SetProperty(js_object, key_string, value, attr, strict);
+ result = SetProperty(js_object, key_string, value, attr, strict_mode);
}
if (result.is_null()) return Failure::Exception();
return *value;
@@ -3838,19 +4066,19 @@ MaybeObject* Runtime::SetObjectProperty(Handle<Object> object,
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- // TODO(1220): Implement SetElement strict mode.
- return js_object->SetElement(index, *value);
+ return js_object->SetElement(index, *value, strict_mode, true);
} else {
- return js_object->SetProperty(*name, *value, attr, strict);
+ return js_object->SetProperty(*name, *value, attr, strict_mode);
}
}
-MaybeObject* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
+MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
+ Handle<JSObject> js_object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr) {
- HandleScope scope;
+ HandleScope scope(isolate);
// Check if the given key is an array index.
uint32_t index;
@@ -3866,12 +4094,12 @@ MaybeObject* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
return *value;
}
- return js_object->SetElement(index, *value);
+ return js_object->SetElement(index, *value, kNonStrictMode, true);
}
if (key->IsString()) {
if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
- return js_object->SetElement(index, *value);
+ return js_object->SetElement(index, *value, kNonStrictMode, true);
} else {
Handle<String> key_string = Handle<String>::cast(key);
key_string->TryFlatten();
@@ -3888,16 +4116,17 @@ MaybeObject* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(index, *value);
+ return js_object->SetElement(index, *value, kNonStrictMode, true);
} else {
return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
}
}
-MaybeObject* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
+MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
+ Handle<JSObject> js_object,
Handle<Object> key) {
- HandleScope scope;
+ HandleScope scope(isolate);
// Check if the given key is an array index.
uint32_t index;
@@ -3909,7 +4138,7 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
// underlying string does nothing with the deletion, we can ignore
// such deletions.
if (js_object->IsStringObjectWithCharacterAt(index)) {
- return Heap::true_value();
+ return isolate->heap()->true_value();
}
return js_object->DeleteElement(index, JSObject::FORCE_DELETION);
@@ -3931,35 +4160,57 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
}
-static MaybeObject* Runtime_SetProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
NoHandleAllocation ha;
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- CONVERT_SMI_CHECKED(unchecked_attributes, args[3]);
+ CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
RUNTIME_ASSERT(
(unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
// Compute attributes.
PropertyAttributes attributes =
static_cast<PropertyAttributes>(unchecked_attributes);
- StrictModeFlag strict = kNonStrictMode;
+ StrictModeFlag strict_mode = kNonStrictMode;
if (args.length() == 5) {
- CONVERT_SMI_CHECKED(strict_unchecked, args[4]);
+ CONVERT_SMI_ARG_CHECKED(strict_unchecked, 4);
RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
strict_unchecked == kNonStrictMode);
- strict = static_cast<StrictModeFlag>(strict_unchecked);
+ strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
}
- return Runtime::SetObjectProperty(object, key, value, attributes, strict);
+ return Runtime::SetObjectProperty(isolate,
+ object,
+ key,
+ value,
+ attributes,
+ strict_mode);
+}
+
+
+// Set the native flag on the function.
+// This is used to decide if we should transform null and undefined
+// into the global object when doing call and apply.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
+ NoHandleAllocation ha;
+ RUNTIME_ASSERT(args.length() == 1);
+
+ Handle<Object> object = args.at<Object>(0);
+
+ if (object->IsJSFunction()) {
+ JSFunction* func = JSFunction::cast(*object);
+ func->shared()->set_native(true);
+ }
+ return isolate->heap()->undefined_value();
}
// Set a local property, even if it is READ_ONLY. If the property does not
// exist, it will be added with attributes NONE.
-static MaybeObject* Runtime_IgnoreAttributesAndSetProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
NoHandleAllocation ha;
RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
CONVERT_CHECKED(JSObject, object, args[0]);
@@ -3980,64 +4231,76 @@ static MaybeObject* Runtime_IgnoreAttributesAndSetProperty(Arguments args) {
}
-static MaybeObject* Runtime_DeleteProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_CHECKED(JSObject, object, args[0]);
CONVERT_CHECKED(String, key, args[1]);
- CONVERT_SMI_CHECKED(strict, args[2]);
+ CONVERT_SMI_ARG_CHECKED(strict, 2);
return object->DeleteProperty(key, (strict == kStrictMode)
? JSObject::STRICT_DELETION
: JSObject::NORMAL_DELETION);
}
-static Object* HasLocalPropertyImplementation(Handle<JSObject> object,
+static Object* HasLocalPropertyImplementation(Isolate* isolate,
+ Handle<JSObject> object,
Handle<String> key) {
- if (object->HasLocalProperty(*key)) return Heap::true_value();
+ if (object->HasLocalProperty(*key)) return isolate->heap()->true_value();
// Handle hidden prototypes. If there's a hidden prototype above this thing
// then we have to check it for properties, because they are supposed to
// look like they are on this object.
Handle<Object> proto(object->GetPrototype());
if (proto->IsJSObject() &&
Handle<JSObject>::cast(proto)->map()->is_hidden_prototype()) {
- return HasLocalPropertyImplementation(Handle<JSObject>::cast(proto), key);
+ return HasLocalPropertyImplementation(isolate,
+ Handle<JSObject>::cast(proto),
+ key);
}
- return Heap::false_value();
+ return isolate->heap()->false_value();
}
-static MaybeObject* Runtime_HasLocalProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, key, args[1]);
+ uint32_t index;
+ const bool key_is_array_index = key->AsArrayIndex(&index);
+
Object* obj = args[0];
// Only JS objects can have properties.
if (obj->IsJSObject()) {
JSObject* object = JSObject::cast(obj);
- // Fast case - no interceptors.
- if (object->HasRealNamedProperty(key)) return Heap::true_value();
- // Slow case. Either it's not there or we have an interceptor. We should
- // have handles for this kind of deal.
- HandleScope scope;
- return HasLocalPropertyImplementation(Handle<JSObject>(object),
+ // Fast case: either the key is a real named property or it is not
+ // an array index and there are no interceptors or hidden
+ // prototypes.
+ if (object->HasRealNamedProperty(key)) return isolate->heap()->true_value();
+ Map* map = object->map();
+ if (!key_is_array_index &&
+ !map->has_named_interceptor() &&
+ !HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) {
+ return isolate->heap()->false_value();
+ }
+ // Slow case.
+ HandleScope scope(isolate);
+ return HasLocalPropertyImplementation(isolate,
+ Handle<JSObject>(object),
Handle<String>(key));
- } else if (obj->IsString()) {
+ } else if (obj->IsString() && key_is_array_index) {
// Well, there is one exception: Handle [] on strings.
- uint32_t index;
- if (key->AsArrayIndex(&index)) {
- String* string = String::cast(obj);
- if (index < static_cast<uint32_t>(string->length()))
- return Heap::true_value();
+ String* string = String::cast(obj);
+ if (index < static_cast<uint32_t>(string->length())) {
+ return isolate->heap()->true_value();
}
}
- return Heap::false_value();
+ return isolate->heap()->false_value();
}
-static MaybeObject* Runtime_HasProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
@@ -4045,13 +4308,13 @@ static MaybeObject* Runtime_HasProperty(Arguments args) {
if (args[0]->IsJSObject()) {
JSObject* object = JSObject::cast(args[0]);
CONVERT_CHECKED(String, key, args[1]);
- if (object->HasProperty(key)) return Heap::true_value();
+ if (object->HasProperty(key)) return isolate->heap()->true_value();
}
- return Heap::false_value();
+ return isolate->heap()->false_value();
}
-static MaybeObject* Runtime_HasElement(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
@@ -4060,13 +4323,13 @@ static MaybeObject* Runtime_HasElement(Arguments args) {
JSObject* object = JSObject::cast(args[0]);
CONVERT_CHECKED(Smi, index_obj, args[1]);
uint32_t index = index_obj->value();
- if (object->HasElement(index)) return Heap::true_value();
+ if (object->HasElement(index)) return isolate->heap()->true_value();
}
- return Heap::false_value();
+ return isolate->heap()->false_value();
}
-static MaybeObject* Runtime_IsPropertyEnumerable(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -4075,16 +4338,16 @@ static MaybeObject* Runtime_IsPropertyEnumerable(Arguments args) {
uint32_t index;
if (key->AsArrayIndex(&index)) {
- return Heap::ToBoolean(object->HasElement(index));
+ return isolate->heap()->ToBoolean(object->HasElement(index));
}
PropertyAttributes att = object->GetLocalPropertyAttribute(key);
- return Heap::ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
+ return isolate->heap()->ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
}
-static MaybeObject* Runtime_GetPropertyNames(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, object, 0);
return *GetKeysFor(object);
@@ -4096,14 +4359,14 @@ static MaybeObject* Runtime_GetPropertyNames(Arguments args) {
// all enumerable properties of the object and its prototypes
// have none, the map of the object. This is used to speed up
// the check for deletions during a for-in.
-static MaybeObject* Runtime_GetPropertyNamesFast(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, raw_object, args[0]);
if (raw_object->IsSimpleEnum()) return raw_object->map();
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSObject> object(raw_object);
Handle<FixedArray> content = GetKeysInFixedArrayFor(object,
INCLUDE_PROTOS);
@@ -4132,11 +4395,11 @@ static int LocalPrototypeChainLength(JSObject* obj) {
// Return the names of the local named properties.
// args[0]: object
-static MaybeObject* Runtime_GetLocalPropertyNames(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
if (!args[0]->IsJSObject()) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -4145,9 +4408,11 @@ static MaybeObject* Runtime_GetLocalPropertyNames(Arguments args) {
if (obj->IsJSGlobalProxy()) {
// Only collect names if access is permitted.
if (obj->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(*obj, Heap::undefined_value(), v8::ACCESS_KEYS)) {
- Top::ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
- return *Factory::NewJSArray(0);
+ !isolate->MayNamedAccess(*obj,
+ isolate->heap()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
+ return *isolate->factory()->NewJSArray(0);
}
obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
}
@@ -4162,11 +4427,11 @@ static MaybeObject* Runtime_GetLocalPropertyNames(Arguments args) {
for (int i = 0; i < length; i++) {
// Only collect names if access is permitted.
if (jsproto->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(*jsproto,
- Heap::undefined_value(),
- v8::ACCESS_KEYS)) {
- Top::ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
- return *Factory::NewJSArray(0);
+ !isolate->MayNamedAccess(*jsproto,
+ isolate->heap()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
+ return *isolate->factory()->NewJSArray(0);
}
int n;
n = jsproto->NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE));
@@ -4178,7 +4443,8 @@ static MaybeObject* Runtime_GetLocalPropertyNames(Arguments args) {
}
// Allocate an array with storage for all the property names.
- Handle<FixedArray> names = Factory::NewFixedArray(total_property_count);
+ Handle<FixedArray> names =
+ isolate->factory()->NewFixedArray(total_property_count);
// Get the property names.
jsproto = obj;
@@ -4197,43 +4463,43 @@ static MaybeObject* Runtime_GetLocalPropertyNames(Arguments args) {
// Filter out name of hidden propeties object.
if (proto_with_hidden_properties > 0) {
Handle<FixedArray> old_names = names;
- names = Factory::NewFixedArray(
+ names = isolate->factory()->NewFixedArray(
names->length() - proto_with_hidden_properties);
int dest_pos = 0;
for (int i = 0; i < total_property_count; i++) {
Object* name = old_names->get(i);
- if (name == Heap::hidden_symbol()) {
+ if (name == isolate->heap()->hidden_symbol()) {
continue;
}
names->set(dest_pos++, name);
}
}
- return *Factory::NewJSArrayWithElements(names);
+ return *isolate->factory()->NewJSArrayWithElements(names);
}
// Return the names of the local indexed properties.
// args[0]: object
-static MaybeObject* Runtime_GetLocalElementNames(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalElementNames) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
if (!args[0]->IsJSObject()) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
CONVERT_ARG_CHECKED(JSObject, obj, 0);
int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
- Handle<FixedArray> names = Factory::NewFixedArray(n);
+ Handle<FixedArray> names = isolate->factory()->NewFixedArray(n);
obj->GetLocalElementKeys(*names, static_cast<PropertyAttributes>(NONE));
- return *Factory::NewJSArrayWithElements(names);
+ return *isolate->factory()->NewJSArrayWithElements(names);
}
// Return information on whether an object has a named or indexed interceptor.
// args[0]: object
-static MaybeObject* Runtime_GetInterceptorInfo(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetInterceptorInfo) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
if (!args[0]->IsJSObject()) {
return Smi::FromInt(0);
@@ -4250,8 +4516,8 @@ static MaybeObject* Runtime_GetInterceptorInfo(Arguments args) {
// Return property names from named interceptor.
// args[0]: object
-static MaybeObject* Runtime_GetNamedInterceptorPropertyNames(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetNamedInterceptorPropertyNames) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -4259,14 +4525,14 @@ static MaybeObject* Runtime_GetNamedInterceptorPropertyNames(Arguments args) {
v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Return element names from indexed interceptor.
// args[0]: object
-static MaybeObject* Runtime_GetIndexedInterceptorElementNames(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -4274,28 +4540,28 @@ static MaybeObject* Runtime_GetIndexedInterceptorElementNames(Arguments args) {
v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_LocalKeys(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
ASSERT_EQ(args.length(), 1);
CONVERT_CHECKED(JSObject, raw_object, args[0]);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSObject> object(raw_object);
if (object->IsJSGlobalProxy()) {
// Do access checks before going to the global object.
if (object->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(*object, Heap::undefined_value(),
+ !isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
- Top::ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
- return *Factory::NewJSArray(0);
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ return *isolate->factory()->NewJSArray(0);
}
Handle<Object> proto(object->GetPrototype());
// If proxy is detached we simply return an empty array.
- if (proto->IsNull()) return *Factory::NewJSArray(0);
+ if (proto->IsNull()) return *isolate->factory()->NewJSArray(0);
object = Handle<JSObject>::cast(proto);
}
@@ -4305,29 +4571,30 @@ static MaybeObject* Runtime_LocalKeys(Arguments args) {
// property array and since the result is mutable we have to create
// a fresh clone on each invocation.
int length = contents->length();
- Handle<FixedArray> copy = Factory::NewFixedArray(length);
+ Handle<FixedArray> copy = isolate->factory()->NewFixedArray(length);
for (int i = 0; i < length; i++) {
Object* entry = contents->get(i);
if (entry->IsString()) {
copy->set(i, entry);
} else {
ASSERT(entry->IsNumber());
- HandleScope scope;
- Handle<Object> entry_handle(entry);
- Handle<Object> entry_str = Factory::NumberToString(entry_handle);
+ HandleScope scope(isolate);
+ Handle<Object> entry_handle(entry, isolate);
+ Handle<Object> entry_str =
+ isolate->factory()->NumberToString(entry_handle);
copy->set(i, *entry_str);
}
}
- return *Factory::NewJSArrayWithElements(copy);
+ return *isolate->factory()->NewJSArrayWithElements(copy);
}
-static MaybeObject* Runtime_GetArgumentsProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
// Compute the frame holding the arguments.
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(isolate);
it.AdvanceToArgumentsFrame();
JavaScriptFrame* frame = it.frame();
@@ -4342,7 +4609,7 @@ static MaybeObject* Runtime_GetArgumentsProperty(Arguments args) {
}
// Convert the key to a string.
- HandleScope scope;
+ HandleScope scope(isolate);
bool exception = false;
Handle<Object> converted =
Execution::ToString(args.at<Object>(0), &exception);
@@ -4354,21 +4621,29 @@ static MaybeObject* Runtime_GetArgumentsProperty(Arguments args) {
if (index < n) {
return frame->GetParameter(index);
} else {
- return Top::initial_object_prototype()->GetElement(index);
+ return isolate->initial_object_prototype()->GetElement(index);
}
}
// Handle special arguments properties.
- if (key->Equals(Heap::length_symbol())) return Smi::FromInt(n);
- if (key->Equals(Heap::callee_symbol())) return frame->function();
+ if (key->Equals(isolate->heap()->length_symbol())) return Smi::FromInt(n);
+ if (key->Equals(isolate->heap()->callee_symbol())) {
+ Object* function = frame->function();
+ if (function->IsJSFunction() &&
+ JSFunction::cast(function)->shared()->strict_mode()) {
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
+ }
+ return function;
+ }
// Lookup in the initial Object.prototype object.
- return Top::initial_object_prototype()->GetProperty(*key);
+ return isolate->initial_object_prototype()->GetProperty(*key);
}
-static MaybeObject* Runtime_ToFastProperties(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
@@ -4383,8 +4658,8 @@ static MaybeObject* Runtime_ToFastProperties(Arguments args) {
}
-static MaybeObject* Runtime_ToSlowProperties(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ToSlowProperties) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
@@ -4396,7 +4671,7 @@ static MaybeObject* Runtime_ToSlowProperties(Arguments args) {
}
-static MaybeObject* Runtime_ToBool(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -4406,37 +4681,39 @@ static MaybeObject* Runtime_ToBool(Arguments args) {
// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
// Possible optimizations: put the type string into the oddballs.
-static MaybeObject* Runtime_Typeof(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Typeof) {
NoHandleAllocation ha;
Object* obj = args[0];
- if (obj->IsNumber()) return Heap::number_symbol();
+ if (obj->IsNumber()) return isolate->heap()->number_symbol();
HeapObject* heap_obj = HeapObject::cast(obj);
// typeof an undetectable object is 'undefined'
- if (heap_obj->map()->is_undetectable()) return Heap::undefined_symbol();
+ if (heap_obj->map()->is_undetectable()) {
+ return isolate->heap()->undefined_symbol();
+ }
InstanceType instance_type = heap_obj->map()->instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
- return Heap::string_symbol();
+ return isolate->heap()->string_symbol();
}
switch (instance_type) {
case ODDBALL_TYPE:
if (heap_obj->IsTrue() || heap_obj->IsFalse()) {
- return Heap::boolean_symbol();
+ return isolate->heap()->boolean_symbol();
}
if (heap_obj->IsNull()) {
- return Heap::object_symbol();
+ return isolate->heap()->object_symbol();
}
ASSERT(heap_obj->IsUndefined());
- return Heap::undefined_symbol();
- case JS_FUNCTION_TYPE: case JS_REGEXP_TYPE:
- return Heap::function_symbol();
+ return isolate->heap()->undefined_symbol();
+ case JS_FUNCTION_TYPE:
+ return isolate->heap()->function_symbol();
default:
// For any kind of object not handled above, the spec rule for
// host objects gives that it is okay to return "object"
- return Heap::object_symbol();
+ return isolate->heap()->object_symbol();
}
}
@@ -4463,7 +4740,7 @@ static int ParseDecimalInteger(const char*s, int from, int to) {
}
-static MaybeObject* Runtime_StringToNumber(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(String, subject, args[0]);
@@ -4479,21 +4756,21 @@ static MaybeObject* Runtime_StringToNumber(Arguments args) {
int start_pos = (minus ? 1 : 0);
if (start_pos == len) {
- return Heap::nan_value();
+ return isolate->heap()->nan_value();
} else if (data[start_pos] > '9') {
// Fast check for a junk value. A valid string may start from a
// whitespace, a sign ('+' or '-'), the decimal point, a decimal digit or
// the 'I' character ('Infinity'). All of that have codes not greater than
// '9' except 'I'.
if (data[start_pos] != 'I') {
- return Heap::nan_value();
+ return isolate->heap()->nan_value();
}
} else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
// The maximal/minimal smi has 10 digits. If the string has less digits we
// know it will fit into the smi-data type.
int d = ParseDecimalInteger(data, start_pos, len);
if (minus) {
- if (d == 0) return Heap::minus_zero_value();
+ if (d == 0) return isolate->heap()->minus_zero_value();
d = -d;
} else if (!subject->HasHashCode() &&
len <= String::kMaxArrayIndexSize &&
@@ -4513,11 +4790,12 @@ static MaybeObject* Runtime_StringToNumber(Arguments args) {
}
// Slower case.
- return Heap::NumberFromDouble(StringToDouble(subject, ALLOW_HEX));
+ return isolate->heap()->NumberFromDouble(
+ StringToDouble(isolate->unicode_cache(), subject, ALLOW_HEX));
}
-static MaybeObject* Runtime_StringFromCharCodeArray(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringFromCharCodeArray) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -4540,9 +4818,9 @@ static MaybeObject* Runtime_StringFromCharCodeArray(Arguments args) {
MaybeObject* maybe_object = NULL;
if (i == length) { // The string is ASCII.
- maybe_object = Heap::AllocateRawAsciiString(length);
+ maybe_object = isolate->heap()->AllocateRawAsciiString(length);
} else { // The string is not ASCII.
- maybe_object = Heap::AllocateRawTwoByteString(length);
+ maybe_object = isolate->heap()->AllocateRawTwoByteString(length);
}
Object* object = NULL;
@@ -4597,7 +4875,7 @@ static bool IsNotEscaped(uint16_t character) {
}
-static MaybeObject* Runtime_URIEscape(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) {
const char hex_chars[] = "0123456789ABCDEF";
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -4608,7 +4886,8 @@ static MaybeObject* Runtime_URIEscape(Arguments args) {
int escaped_length = 0;
int length = source->length();
{
- Access<StringInputBuffer> buffer(&runtime_string_input_buffer);
+ Access<StringInputBuffer> buffer(
+ isolate->runtime_state()->string_input_buffer());
buffer->Reset(source);
while (buffer->has_more()) {
uint16_t character = buffer->GetNext();
@@ -4622,7 +4901,7 @@ static MaybeObject* Runtime_URIEscape(Arguments args) {
// We don't allow strings that are longer than a maximal length.
ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
if (escaped_length > String::kMaxLength) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
}
@@ -4632,13 +4911,15 @@ static MaybeObject* Runtime_URIEscape(Arguments args) {
return source;
}
Object* o;
- { MaybeObject* maybe_o = Heap::AllocateRawAsciiString(escaped_length);
+ { MaybeObject* maybe_o =
+ isolate->heap()->AllocateRawAsciiString(escaped_length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
String* destination = String::cast(o);
int dest_position = 0;
- Access<StringInputBuffer> buffer(&runtime_string_input_buffer);
+ Access<StringInputBuffer> buffer(
+ isolate->runtime_state()->string_input_buffer());
buffer->Rewind();
while (buffer->has_more()) {
uint16_t chr = buffer->GetNext();
@@ -4713,7 +4994,7 @@ static inline int Unescape(String* source,
}
-static MaybeObject* Runtime_URIUnescape(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(String, source, args[0]);
@@ -4737,9 +5018,10 @@ static MaybeObject* Runtime_URIUnescape(Arguments args) {
return source;
Object* o;
- { MaybeObject* maybe_o = ascii ?
- Heap::AllocateRawAsciiString(unescaped_length) :
- Heap::AllocateRawTwoByteString(unescaped_length);
+ { MaybeObject* maybe_o =
+ ascii ?
+ isolate->heap()->AllocateRawAsciiString(unescaped_length) :
+ isolate->heap()->AllocateRawTwoByteString(unescaped_length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
String* destination = String::cast(o);
@@ -4800,6 +5082,8 @@ static const int kMaxGuaranteedNewSpaceString = 32 * 1024;
// Doing JSON quoting cannot make the string more than this many times larger.
static const int kJsonQuoteWorstCaseBlowup = 6;
+static const int kSpaceForQuotesAndComma = 3;
+static const int kSpaceForBrackets = 2;
// Covers the entire ASCII range (all other characters are unchanged by JSON
// quoting).
@@ -4824,23 +5108,24 @@ static const byte JsonQuoteLengths[kQuoteTableLength] = {
template <typename StringType>
-MaybeObject* AllocateRawString(int length);
+MaybeObject* AllocateRawString(Isolate* isolate, int length);
template <>
-MaybeObject* AllocateRawString<SeqTwoByteString>(int length) {
- return Heap::AllocateRawTwoByteString(length);
+MaybeObject* AllocateRawString<SeqTwoByteString>(Isolate* isolate, int length) {
+ return isolate->heap()->AllocateRawTwoByteString(length);
}
template <>
-MaybeObject* AllocateRawString<SeqAsciiString>(int length) {
- return Heap::AllocateRawAsciiString(length);
+MaybeObject* AllocateRawString<SeqAsciiString>(Isolate* isolate, int length) {
+ return isolate->heap()->AllocateRawAsciiString(length);
}
template <typename Char, typename StringType, bool comma>
-static MaybeObject* SlowQuoteJsonString(Vector<const Char> characters) {
+static MaybeObject* SlowQuoteJsonString(Isolate* isolate,
+ Vector<const Char> characters) {
int length = characters.length();
const Char* read_cursor = characters.start();
const Char* end = read_cursor + length;
@@ -4854,7 +5139,8 @@ static MaybeObject* SlowQuoteJsonString(Vector<const Char> characters) {
quoted_length += JsonQuoteLengths[static_cast<unsigned>(c)];
}
}
- MaybeObject* new_alloc = AllocateRawString<StringType>(quoted_length);
+ MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
+ quoted_length);
Object* new_object;
if (!new_alloc->ToObject(&new_object)) {
return new_alloc;
@@ -4885,43 +5171,21 @@ static MaybeObject* SlowQuoteJsonString(Vector<const Char> characters) {
}
-template <typename Char, typename StringType, bool comma>
-static MaybeObject* QuoteJsonString(Vector<const Char> characters) {
- int length = characters.length();
- Counters::quote_json_char_count.Increment(length);
- const int kSpaceForQuotes = 2 + (comma ? 1 :0);
- int worst_case_length = length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
- if (worst_case_length > kMaxGuaranteedNewSpaceString) {
- return SlowQuoteJsonString<Char, StringType, comma>(characters);
- }
-
- MaybeObject* new_alloc = AllocateRawString<StringType>(worst_case_length);
- Object* new_object;
- if (!new_alloc->ToObject(&new_object)) {
- return new_alloc;
- }
- if (!Heap::new_space()->Contains(new_object)) {
- // Even if our string is small enough to fit in new space we still have to
- // handle it being allocated in old space as may happen in the third
- // attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
- // CEntryStub::GenerateCore.
- return SlowQuoteJsonString<Char, StringType, comma>(characters);
- }
- StringType* new_string = StringType::cast(new_object);
- ASSERT(Heap::new_space()->Contains(new_string));
-
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqAsciiString::kHeaderSize);
- if (comma) *(write_cursor++) = ',';
+template <typename SinkChar, typename SourceChar>
+static inline SinkChar* WriteQuoteJsonString(
+ Isolate* isolate,
+ SinkChar* write_cursor,
+ Vector<const SourceChar> characters) {
+ // SinkChar is only char if SourceChar is guaranteed to be char.
+ ASSERT(sizeof(SinkChar) >= sizeof(SourceChar));
+ const SourceChar* read_cursor = characters.start();
+ const SourceChar* end = read_cursor + characters.length();
*(write_cursor++) = '"';
-
- const Char* read_cursor = characters.start();
- const Char* end = read_cursor + length;
while (read_cursor < end) {
- Char c = *(read_cursor++);
- if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
- *(write_cursor++) = c;
+ SourceChar c = *(read_cursor++);
+ if (sizeof(SourceChar) > 1u &&
+ static_cast<unsigned>(c) >= kQuoteTableLength) {
+ *(write_cursor++) = static_cast<SinkChar>(c);
} else {
int len = JsonQuoteLengths[static_cast<unsigned>(c)];
const char* replacement = JsonQuotes +
@@ -4941,17 +5205,55 @@ static MaybeObject* QuoteJsonString(Vector<const Char> characters) {
}
}
*(write_cursor++) = '"';
+ return write_cursor;
+}
+
+template <typename Char, typename StringType, bool comma>
+static MaybeObject* QuoteJsonString(Isolate* isolate,
+ Vector<const Char> characters) {
+ int length = characters.length();
+ isolate->counters()->quote_json_char_count()->Increment(length);
+ int worst_case_length =
+ length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotesAndComma;
+ if (worst_case_length > kMaxGuaranteedNewSpaceString) {
+ return SlowQuoteJsonString<Char, StringType, comma>(isolate, characters);
+ }
+
+ MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
+ worst_case_length);
+ Object* new_object;
+ if (!new_alloc->ToObject(&new_object)) {
+ return new_alloc;
+ }
+ if (!isolate->heap()->new_space()->Contains(new_object)) {
+ // Even if our string is small enough to fit in new space we still have to
+ // handle it being allocated in old space as may happen in the third
+ // attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
+ // CEntryStub::GenerateCore.
+ return SlowQuoteJsonString<Char, StringType, comma>(isolate, characters);
+ }
+ StringType* new_string = StringType::cast(new_object);
+ ASSERT(isolate->heap()->new_space()->Contains(new_string));
+
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ Char* write_cursor = reinterpret_cast<Char*>(
+ new_string->address() + SeqAsciiString::kHeaderSize);
+ if (comma) *(write_cursor++) = ',';
+ write_cursor = WriteQuoteJsonString<Char, Char>(isolate,
+ write_cursor,
+ characters);
int final_length = static_cast<int>(
write_cursor - reinterpret_cast<Char*>(
new_string->address() + SeqAsciiString::kHeaderSize));
- Heap::new_space()->ShrinkStringAtAllocationBoundary<StringType>(new_string,
- final_length);
+ isolate->heap()->new_space()->
+ template ShrinkStringAtAllocationBoundary<StringType>(
+ new_string, final_length);
return new_string;
}
-static MaybeObject* Runtime_QuoteJSONString(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) {
NoHandleAllocation ha;
CONVERT_CHECKED(String, str, args[0]);
if (!str->IsFlat()) {
@@ -4964,14 +5266,16 @@ static MaybeObject* Runtime_QuoteJSONString(Arguments args) {
ASSERT(str->IsFlat());
}
if (str->IsTwoByteRepresentation()) {
- return QuoteJsonString<uc16, SeqTwoByteString, false>(str->ToUC16Vector());
+ return QuoteJsonString<uc16, SeqTwoByteString, false>(isolate,
+ str->ToUC16Vector());
} else {
- return QuoteJsonString<char, SeqAsciiString, false>(str->ToAsciiVector());
+ return QuoteJsonString<char, SeqAsciiString, false>(isolate,
+ str->ToAsciiVector());
}
}
-static MaybeObject* Runtime_QuoteJSONStringComma(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) {
NoHandleAllocation ha;
CONVERT_CHECKED(String, str, args[0]);
if (!str->IsFlat()) {
@@ -4984,45 +5288,139 @@ static MaybeObject* Runtime_QuoteJSONStringComma(Arguments args) {
ASSERT(str->IsFlat());
}
if (str->IsTwoByteRepresentation()) {
- return QuoteJsonString<uc16, SeqTwoByteString, true>(str->ToUC16Vector());
+ return QuoteJsonString<uc16, SeqTwoByteString, true>(isolate,
+ str->ToUC16Vector());
+ } else {
+ return QuoteJsonString<char, SeqAsciiString, true>(isolate,
+ str->ToAsciiVector());
+ }
+}
+
+
+template <typename Char, typename StringType>
+static MaybeObject* QuoteJsonStringArray(Isolate* isolate,
+ FixedArray* array,
+ int worst_case_length) {
+ int length = array->length();
+
+ MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
+ worst_case_length);
+ Object* new_object;
+ if (!new_alloc->ToObject(&new_object)) {
+ return new_alloc;
+ }
+ if (!isolate->heap()->new_space()->Contains(new_object)) {
+ // Even if our string is small enough to fit in new space we still have to
+ // handle it being allocated in old space as may happen in the third
+ // attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
+ // CEntryStub::GenerateCore.
+ return isolate->heap()->undefined_value();
+ }
+ AssertNoAllocation no_gc;
+ StringType* new_string = StringType::cast(new_object);
+ ASSERT(isolate->heap()->new_space()->Contains(new_string));
+
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ Char* write_cursor = reinterpret_cast<Char*>(
+ new_string->address() + SeqAsciiString::kHeaderSize);
+ *(write_cursor++) = '[';
+ for (int i = 0; i < length; i++) {
+ if (i != 0) *(write_cursor++) = ',';
+ String* str = String::cast(array->get(i));
+ if (str->IsTwoByteRepresentation()) {
+ write_cursor = WriteQuoteJsonString<Char, uc16>(isolate,
+ write_cursor,
+ str->ToUC16Vector());
+ } else {
+ write_cursor = WriteQuoteJsonString<Char, char>(isolate,
+ write_cursor,
+ str->ToAsciiVector());
+ }
+ }
+ *(write_cursor++) = ']';
+
+ int final_length = static_cast<int>(
+ write_cursor - reinterpret_cast<Char*>(
+ new_string->address() + SeqAsciiString::kHeaderSize));
+ isolate->heap()->new_space()->
+ template ShrinkStringAtAllocationBoundary<StringType>(
+ new_string, final_length);
+ return new_string;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSArray, array, args[0]);
+
+ if (!array->HasFastElements()) return isolate->heap()->undefined_value();
+ FixedArray* elements = FixedArray::cast(array->elements());
+ int n = elements->length();
+ bool ascii = true;
+ int total_length = 0;
+
+ for (int i = 0; i < n; i++) {
+ Object* elt = elements->get(i);
+ if (!elt->IsString()) return isolate->heap()->undefined_value();
+ String* element = String::cast(elt);
+ if (!element->IsFlat()) return isolate->heap()->undefined_value();
+ total_length += element->length();
+ if (ascii && element->IsTwoByteRepresentation()) {
+ ascii = false;
+ }
+ }
+
+ int worst_case_length =
+ kSpaceForBrackets + n * kSpaceForQuotesAndComma
+ + total_length * kJsonQuoteWorstCaseBlowup;
+
+ if (worst_case_length > kMaxGuaranteedNewSpaceString) {
+ return isolate->heap()->undefined_value();
+ }
+
+ if (ascii) {
+ return QuoteJsonStringArray<char, SeqAsciiString>(isolate,
+ elements,
+ worst_case_length);
} else {
- return QuoteJsonString<char, SeqAsciiString, true>(str->ToAsciiVector());
+ return QuoteJsonStringArray<uc16, SeqTwoByteString>(isolate,
+ elements,
+ worst_case_length);
}
}
-static MaybeObject* Runtime_StringParseInt(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
NoHandleAllocation ha;
CONVERT_CHECKED(String, s, args[0]);
- CONVERT_SMI_CHECKED(radix, args[1]);
+ CONVERT_SMI_ARG_CHECKED(radix, 1);
s->TryFlatten();
RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
- double value = StringToInt(s, radix);
- return Heap::NumberFromDouble(value);
+ double value = StringToInt(isolate->unicode_cache(), s, radix);
+ return isolate->heap()->NumberFromDouble(value);
}
-static MaybeObject* Runtime_StringParseFloat(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
NoHandleAllocation ha;
CONVERT_CHECKED(String, str, args[0]);
// ECMA-262 section 15.1.2.3, empty string is NaN
- double value = StringToDouble(str, ALLOW_TRAILING_JUNK, OS::nan_value());
+ double value = StringToDouble(isolate->unicode_cache(),
+ str, ALLOW_TRAILING_JUNK, OS::nan_value());
// Create a number object from the value.
- return Heap::NumberFromDouble(value);
+ return isolate->heap()->NumberFromDouble(value);
}
-static unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping;
-static unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping;
-
-
template <class Converter>
MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
+ Isolate* isolate,
String* s,
int length,
int input_string_length,
@@ -5040,8 +5438,8 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// dependent upper/lower conversions.
Object* o;
{ MaybeObject* maybe_o = s->IsAsciiRepresentation()
- ? Heap::AllocateRawAsciiString(length)
- : Heap::AllocateRawTwoByteString(length);
+ ? isolate->heap()->AllocateRawAsciiString(length)
+ : isolate->heap()->AllocateRawTwoByteString(length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
String* result = String::cast(o);
@@ -5049,7 +5447,8 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// Convert all characters to upper case, assuming that they will fit
// in the buffer
- Access<StringInputBuffer> buffer(&runtime_string_input_buffer);
+ Access<StringInputBuffer> buffer(
+ isolate->runtime_state()->string_input_buffer());
buffer->Reset(s);
unibrow::uchar chars[Converter::kMaxWidth];
// We can assume that the string is not empty
@@ -5096,7 +5495,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
if (char_length == 0) char_length = 1;
current_length += char_length;
if (current_length > Smi::kMaxValue) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
}
@@ -5254,6 +5653,7 @@ struct ToUpperTraits {
template <typename ConvertTraits>
MUST_USE_RESULT static MaybeObject* ConvertCase(
Arguments args,
+ Isolate* isolate,
unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
NoHandleAllocation ha;
CONVERT_CHECKED(String, s, args[0]);
@@ -5271,7 +5671,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
// dependent upper/lower conversions.
if (s->IsSeqAsciiString()) {
Object* o;
- { MaybeObject* maybe_o = Heap::AllocateRawAsciiString(length);
+ { MaybeObject* maybe_o = isolate->heap()->AllocateRawAsciiString(length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
SeqAsciiString* result = SeqAsciiString::cast(o);
@@ -5281,13 +5681,15 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
}
Object* answer;
- { MaybeObject* maybe_answer = ConvertCaseHelper(s, length, length, mapping);
+ { MaybeObject* maybe_answer =
+ ConvertCaseHelper(isolate, s, length, length, mapping);
if (!maybe_answer->ToObject(&answer)) return maybe_answer;
}
if (answer->IsSmi()) {
// Retry with correct length.
{ MaybeObject* maybe_answer =
- ConvertCaseHelper(s, Smi::cast(answer)->value(), length, mapping);
+ ConvertCaseHelper(isolate,
+ s, Smi::cast(answer)->value(), length, mapping);
if (!maybe_answer->ToObject(&answer)) return maybe_answer;
}
}
@@ -5295,13 +5697,15 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
}
-static MaybeObject* Runtime_StringToLowerCase(Arguments args) {
- return ConvertCase<ToLowerTraits>(args, &to_lower_mapping);
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToLowerCase) {
+ return ConvertCase<ToLowerTraits>(
+ args, isolate, isolate->runtime_state()->to_lower_mapping());
}
-static MaybeObject* Runtime_StringToUpperCase(Arguments args) {
- return ConvertCase<ToUpperTraits>(args, &to_upper_mapping);
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
+ return ConvertCase<ToUpperTraits>(
+ args, isolate, isolate->runtime_state()->to_upper_mapping());
}
@@ -5310,7 +5714,7 @@ static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
}
-static MaybeObject* Runtime_StringTrim(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
@@ -5339,14 +5743,15 @@ static MaybeObject* Runtime_StringTrim(Arguments args) {
template <typename SubjectChar, typename PatternChar>
-void FindStringIndices(Vector<const SubjectChar> subject,
+void FindStringIndices(Isolate* isolate,
+ Vector<const SubjectChar> subject,
Vector<const PatternChar> pattern,
ZoneList<int>* indices,
unsigned int limit) {
ASSERT(limit > 0);
// Collect indices of pattern in subject, and the end-of-string index.
// Stop after finding at most limit values.
- StringSearch<PatternChar, SubjectChar> search(pattern);
+ StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
int pattern_length = pattern.length();
int index = 0;
while (limit > 0) {
@@ -5359,9 +5764,9 @@ void FindStringIndices(Vector<const SubjectChar> subject,
}
-static MaybeObject* Runtime_StringSplit(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
ASSERT(args.length() == 3);
- HandleScope handle_scope;
+ HandleScope handle_scope(isolate);
CONVERT_ARG_CHECKED(String, subject, 0);
CONVERT_ARG_CHECKED(String, pattern, 1);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
@@ -5378,7 +5783,7 @@ static MaybeObject* Runtime_StringSplit(Arguments args) {
static const int kMaxInitialListCapacity = 16;
- ZoneScope scope(DELETE_ON_EXIT);
+ ZoneScope scope(isolate, DELETE_ON_EXIT);
// Find (up to limit) indices of separator and end-of-string in subject
int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
@@ -5391,12 +5796,14 @@ static MaybeObject* Runtime_StringSplit(Arguments args) {
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
if (pattern->IsAsciiRepresentation()) {
- FindStringIndices(subject_vector,
+ FindStringIndices(isolate,
+ subject_vector,
pattern->ToAsciiVector(),
&indices,
limit);
} else {
- FindStringIndices(subject_vector,
+ FindStringIndices(isolate,
+ subject_vector,
pattern->ToUC16Vector(),
&indices,
limit);
@@ -5404,12 +5811,14 @@ static MaybeObject* Runtime_StringSplit(Arguments args) {
} else {
Vector<const uc16> subject_vector = subject->ToUC16Vector();
if (pattern->IsAsciiRepresentation()) {
- FindStringIndices(subject_vector,
+ FindStringIndices(isolate,
+ subject_vector,
pattern->ToAsciiVector(),
&indices,
limit);
} else {
- FindStringIndices(subject_vector,
+ FindStringIndices(isolate,
+ subject_vector,
pattern->ToUC16Vector(),
&indices,
limit);
@@ -5426,7 +5835,7 @@ static MaybeObject* Runtime_StringSplit(Arguments args) {
// Create JSArray of substrings separated by separator.
int part_count = indices.length();
- Handle<JSArray> result = Factory::NewJSArray(part_count);
+ Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
result->set_length(Smi::FromInt(part_count));
ASSERT(result->HasFastElements());
@@ -5442,7 +5851,7 @@ static MaybeObject* Runtime_StringSplit(Arguments args) {
HandleScope local_loop_handle;
int part_end = indices.at(i);
Handle<String> substring =
- Factory::NewSubString(subject, part_start, part_end);
+ isolate->factory()->NewProperSubString(subject, part_start, part_end);
elements->set(i, *substring);
part_start = part_end + pattern_length;
}
@@ -5455,17 +5864,18 @@ static MaybeObject* Runtime_StringSplit(Arguments args) {
// one-char strings in the cache. Gives up on the first char that is
// not in the cache and fills the remainder with smi zeros. Returns
// the length of the successfully copied prefix.
-static int CopyCachedAsciiCharsToArray(const char* chars,
+static int CopyCachedAsciiCharsToArray(Heap* heap,
+ const char* chars,
FixedArray* elements,
int length) {
AssertNoAllocation nogc;
- FixedArray* ascii_cache = Heap::single_character_string_cache();
- Object* undefined = Heap::undefined_value();
+ FixedArray* ascii_cache = heap->single_character_string_cache();
+ Object* undefined = heap->undefined_value();
int i;
for (i = 0; i < length; ++i) {
Object* value = ascii_cache->get(chars[i]);
if (value == undefined) break;
- ASSERT(!Heap::InNewSpace(value));
+ ASSERT(!heap->InNewSpace(value));
elements->set(i, value, SKIP_WRITE_BARRIER);
}
if (i < length) {
@@ -5485,8 +5895,8 @@ static int CopyCachedAsciiCharsToArray(const char* chars,
// Converts a String to JSArray.
// For example, "foo" => ["f", "o", "o"].
-static MaybeObject* Runtime_StringToArray(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, s, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
@@ -5497,15 +5907,17 @@ static MaybeObject* Runtime_StringToArray(Arguments args) {
Handle<FixedArray> elements;
if (s->IsFlat() && s->IsAsciiRepresentation()) {
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateUninitializedFixedArray(length);
+ { MaybeObject* maybe_obj =
+ isolate->heap()->AllocateUninitializedFixedArray(length);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- elements = Handle<FixedArray>(FixedArray::cast(obj));
+ elements = Handle<FixedArray>(FixedArray::cast(obj), isolate);
Vector<const char> chars = s->ToAsciiVector();
// Note, this will initialize all elements (not only the prefix)
// to prevent GC from seeing partially initialized array.
- int num_copied_from_cache = CopyCachedAsciiCharsToArray(chars.start(),
+ int num_copied_from_cache = CopyCachedAsciiCharsToArray(isolate->heap(),
+ chars.start(),
*elements,
length);
@@ -5514,7 +5926,7 @@ static MaybeObject* Runtime_StringToArray(Arguments args) {
elements->set(i, *str);
}
} else {
- elements = Factory::NewFixedArray(length);
+ elements = isolate->factory()->NewFixedArray(length);
for (int i = 0; i < length; ++i) {
Handle<Object> str = LookupSingleCharacterStringFromCode(s->Get(i));
elements->set(i, *str);
@@ -5527,11 +5939,11 @@ static MaybeObject* Runtime_StringToArray(Arguments args) {
}
#endif
- return *Factory::NewJSArrayWithElements(elements);
+ return *isolate->factory()->NewJSArrayWithElements(elements);
}
-static MaybeObject* Runtime_NewStringWrapper(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(String, value, args[0]);
@@ -5539,54 +5951,54 @@ static MaybeObject* Runtime_NewStringWrapper(Arguments args) {
}
-bool Runtime::IsUpperCaseChar(uint16_t ch) {
+bool Runtime::IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch) {
unibrow::uchar chars[unibrow::ToUppercase::kMaxWidth];
- int char_length = to_upper_mapping.get(ch, 0, chars);
+ int char_length = runtime_state->to_upper_mapping()->get(ch, 0, chars);
return char_length == 0;
}
-static MaybeObject* Runtime_NumberToString(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
Object* number = args[0];
RUNTIME_ASSERT(number->IsNumber());
- return Heap::NumberToString(number);
+ return isolate->heap()->NumberToString(number);
}
-static MaybeObject* Runtime_NumberToStringSkipCache(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
Object* number = args[0];
RUNTIME_ASSERT(number->IsNumber());
- return Heap::NumberToString(number, false);
+ return isolate->heap()->NumberToString(number, false);
}
-static MaybeObject* Runtime_NumberToInteger(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_DOUBLE_CHECKED(number, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(number, 0);
// We do not include 0 so that we don't have to treat +0 / -0 cases.
if (number > 0 && number <= Smi::kMaxValue) {
return Smi::FromInt(static_cast<int>(number));
}
- return Heap::NumberFromDouble(DoubleToInteger(number));
+ return isolate->heap()->NumberFromDouble(DoubleToInteger(number));
}
-static MaybeObject* Runtime_NumberToIntegerMapMinusZero(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_DOUBLE_CHECKED(number, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(number, 0);
// We do not include 0 so that we don't have to treat +0 / -0 cases.
if (number > 0 && number <= Smi::kMaxValue) {
@@ -5597,36 +6009,36 @@ static MaybeObject* Runtime_NumberToIntegerMapMinusZero(Arguments args) {
// Map both -0 and +0 to +0.
if (double_value == 0) double_value = 0;
- return Heap::NumberFromDouble(double_value);
+ return isolate->heap()->NumberFromDouble(double_value);
}
-static MaybeObject* Runtime_NumberToJSUint32(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
- return Heap::NumberFromUint32(number);
+ return isolate->heap()->NumberFromUint32(number);
}
-static MaybeObject* Runtime_NumberToJSInt32(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_DOUBLE_CHECKED(number, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(number, 0);
// We do not include 0 so that we don't have to treat +0 / -0 cases.
if (number > 0 && number <= Smi::kMaxValue) {
return Smi::FromInt(static_cast<int>(number));
}
- return Heap::NumberFromInt32(DoubleToInt32(number));
+ return isolate->heap()->NumberFromInt32(DoubleToInt32(number));
}
// Converts a Number to a Smi, if possible. Returns NaN if the number is not
// a small integer.
-static MaybeObject* Runtime_NumberToSmi(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -5641,94 +6053,94 @@ static MaybeObject* Runtime_NumberToSmi(Arguments args) {
return Smi::FromInt(int_value);
}
}
- return Heap::nan_value();
+ return isolate->heap()->nan_value();
}
-static MaybeObject* Runtime_AllocateHeapNumber(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) {
NoHandleAllocation ha;
ASSERT(args.length() == 0);
- return Heap::AllocateHeapNumber(0);
+ return isolate->heap()->AllocateHeapNumber(0);
}
-static MaybeObject* Runtime_NumberAdd(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- return Heap::NumberFromDouble(x + y);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ return isolate->heap()->NumberFromDouble(x + y);
}
-static MaybeObject* Runtime_NumberSub(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- return Heap::NumberFromDouble(x - y);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ return isolate->heap()->NumberFromDouble(x - y);
}
-static MaybeObject* Runtime_NumberMul(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- return Heap::NumberFromDouble(x * y);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ return isolate->heap()->NumberFromDouble(x * y);
}
-static MaybeObject* Runtime_NumberUnaryMinus(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::NumberFromDouble(-x);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->heap()->NumberFromDouble(-x);
}
-static MaybeObject* Runtime_NumberAlloc(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
NoHandleAllocation ha;
ASSERT(args.length() == 0);
- return Heap::NumberFromDouble(9876543210.0);
+ return isolate->heap()->NumberFromDouble(9876543210.0);
}
-static MaybeObject* Runtime_NumberDiv(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- return Heap::NumberFromDouble(x / y);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ return isolate->heap()->NumberFromDouble(x / y);
}
-static MaybeObject* Runtime_NumberMod(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
x = modulo(x, y);
// NumberFromDouble may return a Smi instead of a Number object
- return Heap::NumberFromDouble(x);
+ return isolate->heap()->NumberFromDouble(x);
}
-static MaybeObject* Runtime_StringAdd(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, str1, args[0]);
CONVERT_CHECKED(String, str2, args[1]);
- Counters::string_add_runtime.Increment();
- return Heap::AllocateConsString(str1, str2);
+ isolate->counters()->string_add_runtime()->Increment();
+ return isolate->heap()->AllocateConsString(str1, str2);
}
@@ -5771,15 +6183,15 @@ static inline void StringBuilderConcatHelper(String* special,
}
-static MaybeObject* Runtime_StringBuilderConcat(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_CHECKED(JSArray, array, args[0]);
if (!args[1]->IsSmi()) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
- int array_length = Smi::cast(args[1])->value();
+ int array_length = args.smi_at(1);
CONVERT_CHECKED(String, special, args[2]);
// This assumption is used by the slice encoding in one or two smis.
@@ -5787,7 +6199,7 @@ static MaybeObject* Runtime_StringBuilderConcat(Arguments args) {
int special_length = special->length();
if (!array->HasFastElements()) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
FixedArray* fixed_array = FixedArray::cast(array->elements());
if (fixed_array->length() < array_length) {
@@ -5795,7 +6207,7 @@ static MaybeObject* Runtime_StringBuilderConcat(Arguments args) {
}
if (array_length == 0) {
- return Heap::empty_string();
+ return isolate->heap()->empty_string();
} else if (array_length == 1) {
Object* first = fixed_array->get(0);
if (first->IsString()) return first;
@@ -5821,21 +6233,21 @@ static MaybeObject* Runtime_StringBuilderConcat(Arguments args) {
// Get the position and check that it is a positive smi.
i++;
if (i >= array_length) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
Object* next_smi = fixed_array->get(i);
if (!next_smi->IsSmi()) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
pos = Smi::cast(next_smi)->value();
if (pos < 0) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
}
ASSERT(pos >= 0);
ASSERT(len >= 0);
if (pos > special_length || len > special_length - pos) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
increment = len;
} else if (elt->IsString()) {
@@ -5846,10 +6258,10 @@ static MaybeObject* Runtime_StringBuilderConcat(Arguments args) {
ascii = false;
}
} else {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
if (increment > String::kMaxLength - position) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
position += increment;
@@ -5859,7 +6271,8 @@ static MaybeObject* Runtime_StringBuilderConcat(Arguments args) {
Object* object;
if (ascii) {
- { MaybeObject* maybe_object = Heap::AllocateRawAsciiString(length);
+ { MaybeObject* maybe_object =
+ isolate->heap()->AllocateRawAsciiString(length);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
SeqAsciiString* answer = SeqAsciiString::cast(object);
@@ -5869,7 +6282,8 @@ static MaybeObject* Runtime_StringBuilderConcat(Arguments args) {
array_length);
return answer;
} else {
- { MaybeObject* maybe_object = Heap::AllocateRawTwoByteString(length);
+ { MaybeObject* maybe_object =
+ isolate->heap()->AllocateRawTwoByteString(length);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
SeqTwoByteString* answer = SeqTwoByteString::cast(object);
@@ -5882,19 +6296,19 @@ static MaybeObject* Runtime_StringBuilderConcat(Arguments args) {
}
-static MaybeObject* Runtime_StringBuilderJoin(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_CHECKED(JSArray, array, args[0]);
if (!args[1]->IsSmi()) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
- int array_length = Smi::cast(args[1])->value();
+ int array_length = args.smi_at(1);
CONVERT_CHECKED(String, separator, args[2]);
if (!array->HasFastElements()) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
FixedArray* fixed_array = FixedArray::cast(array->elements());
if (fixed_array->length() < array_length) {
@@ -5902,7 +6316,7 @@ static MaybeObject* Runtime_StringBuilderJoin(Arguments args) {
}
if (array_length == 0) {
- return Heap::empty_string();
+ return isolate->heap()->empty_string();
} else if (array_length == 1) {
Object* first = fixed_array->get(0);
if (first->IsString()) return first;
@@ -5912,7 +6326,7 @@ static MaybeObject* Runtime_StringBuilderJoin(Arguments args) {
int max_nof_separators =
(String::kMaxLength + separator_length - 1) / separator_length;
if (max_nof_separators < (array_length - 1)) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
int length = (array_length - 1) * separator_length;
@@ -5920,19 +6334,20 @@ static MaybeObject* Runtime_StringBuilderJoin(Arguments args) {
Object* element_obj = fixed_array->get(i);
if (!element_obj->IsString()) {
// TODO(1161): handle this case.
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
String* element = String::cast(element_obj);
int increment = element->length();
if (increment > String::kMaxLength - length) {
- Top::context()->mark_out_of_memory();
+ isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
length += increment;
}
Object* object;
- { MaybeObject* maybe_object = Heap::AllocateRawTwoByteString(length);
+ { MaybeObject* maybe_object =
+ isolate->heap()->AllocateRawTwoByteString(length);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
SeqTwoByteString* answer = SeqTwoByteString::cast(object);
@@ -5964,82 +6379,211 @@ static MaybeObject* Runtime_StringBuilderJoin(Arguments args) {
return answer;
}
+template <typename Char>
+static void JoinSparseArrayWithSeparator(FixedArray* elements,
+ int elements_length,
+ uint32_t array_length,
+ String* separator,
+ Vector<Char> buffer) {
+ int previous_separator_position = 0;
+ int separator_length = separator->length();
+ int cursor = 0;
+ for (int i = 0; i < elements_length; i += 2) {
+ int position = NumberToInt32(elements->get(i));
+ String* string = String::cast(elements->get(i + 1));
+ int string_length = string->length();
+ if (string->length() > 0) {
+ while (previous_separator_position < position) {
+ String::WriteToFlat<Char>(separator, &buffer[cursor],
+ 0, separator_length);
+ cursor += separator_length;
+ previous_separator_position++;
+ }
+ String::WriteToFlat<Char>(string, &buffer[cursor],
+ 0, string_length);
+ cursor += string->length();
+ }
+ }
+ if (separator_length > 0) {
+ // Array length must be representable as a signed 32-bit number,
+ // otherwise the total string length would have been too large.
+ ASSERT(array_length <= 0x7fffffff); // Is int32_t.
+ int last_array_index = static_cast<int>(array_length - 1);
+ while (previous_separator_position < last_array_index) {
+ String::WriteToFlat<Char>(separator, &buffer[cursor],
+ 0, separator_length);
+ cursor += separator_length;
+ previous_separator_position++;
+ }
+ }
+ ASSERT(cursor <= buffer.length());
+}
-static MaybeObject* Runtime_NumberOr(Arguments args) {
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 3);
+ CONVERT_CHECKED(JSArray, elements_array, args[0]);
+ RUNTIME_ASSERT(elements_array->HasFastElements());
+ CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
+ CONVERT_CHECKED(String, separator, args[2]);
+ // elements_array is fast-mode JSarray of alternating positions
+ // (increasing order) and strings.
+ // array_length is length of original array (used to add separators);
+ // separator is string to put between elements. Assumed to be non-empty.
+
+ // Find total length of join result.
+ int string_length = 0;
+ bool is_ascii = true;
+ int max_string_length = SeqAsciiString::kMaxLength;
+ bool overflow = false;
+ CONVERT_NUMBER_CHECKED(int, elements_length,
+ Int32, elements_array->length());
+ RUNTIME_ASSERT((elements_length & 1) == 0); // Even length.
+ FixedArray* elements = FixedArray::cast(elements_array->elements());
+ for (int i = 0; i < elements_length; i += 2) {
+ RUNTIME_ASSERT(elements->get(i)->IsNumber());
+ CONVERT_CHECKED(String, string, elements->get(i + 1));
+ int length = string->length();
+ if (is_ascii && !string->IsAsciiRepresentation()) {
+ is_ascii = false;
+ max_string_length = SeqTwoByteString::kMaxLength;
+ }
+ if (length > max_string_length ||
+ max_string_length - length < string_length) {
+ overflow = true;
+ break;
+ }
+ string_length += length;
+ }
+ int separator_length = separator->length();
+ if (!overflow && separator_length > 0) {
+ if (array_length <= 0x7fffffffu) {
+ int separator_count = static_cast<int>(array_length) - 1;
+ int remaining_length = max_string_length - string_length;
+ if ((remaining_length / separator_length) >= separator_count) {
+ string_length += separator_length * (array_length - 1);
+ } else {
+ // Not room for the separators within the maximal string length.
+ overflow = true;
+ }
+ } else {
+ // Nonempty separator and at least 2^31-1 separators necessary
+ // means that the string is too large to create.
+ STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
+ overflow = true;
+ }
+ }
+ if (overflow) {
+ // Throw OutOfMemory exception for creating too large a string.
+ V8::FatalProcessOutOfMemory("Array join result too large.");
+ }
+
+ if (is_ascii) {
+ MaybeObject* result_allocation =
+ isolate->heap()->AllocateRawAsciiString(string_length);
+ if (result_allocation->IsFailure()) return result_allocation;
+ SeqAsciiString* result_string =
+ SeqAsciiString::cast(result_allocation->ToObjectUnchecked());
+ JoinSparseArrayWithSeparator<char>(elements,
+ elements_length,
+ array_length,
+ separator,
+ Vector<char>(result_string->GetChars(),
+ string_length));
+ return result_string;
+ } else {
+ MaybeObject* result_allocation =
+ isolate->heap()->AllocateRawTwoByteString(string_length);
+ if (result_allocation->IsFailure()) return result_allocation;
+ SeqTwoByteString* result_string =
+ SeqTwoByteString::cast(result_allocation->ToObjectUnchecked());
+ JoinSparseArrayWithSeparator<uc16>(elements,
+ elements_length,
+ array_length,
+ separator,
+ Vector<uc16>(result_string->GetChars(),
+ string_length));
+ return result_string;
+ }
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return Heap::NumberFromInt32(x | y);
+ return isolate->heap()->NumberFromInt32(x | y);
}
-static MaybeObject* Runtime_NumberAnd(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return Heap::NumberFromInt32(x & y);
+ return isolate->heap()->NumberFromInt32(x & y);
}
-static MaybeObject* Runtime_NumberXor(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return Heap::NumberFromInt32(x ^ y);
+ return isolate->heap()->NumberFromInt32(x ^ y);
}
-static MaybeObject* Runtime_NumberNot(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- return Heap::NumberFromInt32(~x);
+ return isolate->heap()->NumberFromInt32(~x);
}
-static MaybeObject* Runtime_NumberShl(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return Heap::NumberFromInt32(x << (y & 0x1f));
+ return isolate->heap()->NumberFromInt32(x << (y & 0x1f));
}
-static MaybeObject* Runtime_NumberShr(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return Heap::NumberFromUint32(x >> (y & 0x1f));
+ return isolate->heap()->NumberFromUint32(x >> (y & 0x1f));
}
-static MaybeObject* Runtime_NumberSar(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return Heap::NumberFromInt32(ArithmeticShiftRight(x, y & 0x1f));
+ return isolate->heap()->NumberFromInt32(ArithmeticShiftRight(x, y & 0x1f));
}
-static MaybeObject* Runtime_NumberEquals(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
if (isnan(x)) return Smi::FromInt(NOT_EQUAL);
if (isnan(y)) return Smi::FromInt(NOT_EQUAL);
if (x == y) return Smi::FromInt(EQUAL);
@@ -6053,7 +6597,7 @@ static MaybeObject* Runtime_NumberEquals(Arguments args) {
}
-static MaybeObject* Runtime_StringEquals(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -6071,12 +6615,12 @@ static MaybeObject* Runtime_StringEquals(Arguments args) {
}
-static MaybeObject* Runtime_NumberCompare(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
if (isnan(x) || isnan(y)) return args[2];
if (x == y) return Smi::FromInt(EQUAL);
if (isless(x, y)) return Smi::FromInt(LESS);
@@ -6086,15 +6630,10 @@ static MaybeObject* Runtime_NumberCompare(Arguments args) {
// Compare two Smis as if they were converted to strings and then
// compared lexicographically.
-static MaybeObject* Runtime_SmiLexicographicCompare(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- // Arrays for the individual characters of the two Smis. Smis are
- // 31 bit integers and 10 decimal digits are therefore enough.
- static int x_elms[10];
- static int y_elms[10];
-
// Extract the integer values from the Smis.
CONVERT_CHECKED(Smi, x, args[0]);
CONVERT_CHECKED(Smi, y, args[1]);
@@ -6104,49 +6643,77 @@ static MaybeObject* Runtime_SmiLexicographicCompare(Arguments args) {
// If the integers are equal so are the string representations.
if (x_value == y_value) return Smi::FromInt(EQUAL);
- // If one of the integers are zero the normal integer order is the
+ // If one of the integers is zero the normal integer order is the
// same as the lexicographic order of the string representations.
- if (x_value == 0 || y_value == 0) return Smi::FromInt(x_value - y_value);
+ if (x_value == 0 || y_value == 0)
+ return Smi::FromInt(x_value < y_value ? LESS : GREATER);
// If only one of the integers is negative the negative number is
// smallest because the char code of '-' is less than the char code
// of any digit. Otherwise, we make both values positive.
+
+ // Use unsigned values otherwise the logic is incorrect for -MIN_INT on
+ // architectures using 32-bit Smis.
+ uint32_t x_scaled = x_value;
+ uint32_t y_scaled = y_value;
if (x_value < 0 || y_value < 0) {
if (y_value >= 0) return Smi::FromInt(LESS);
if (x_value >= 0) return Smi::FromInt(GREATER);
- x_value = -x_value;
- y_value = -y_value;
- }
-
- // Convert the integers to arrays of their decimal digits.
- int x_index = 0;
- int y_index = 0;
- while (x_value > 0) {
- x_elms[x_index++] = x_value % 10;
- x_value /= 10;
- }
- while (y_value > 0) {
- y_elms[y_index++] = y_value % 10;
- y_value /= 10;
- }
-
- // Loop through the arrays of decimal digits finding the first place
- // where they differ.
- while (--x_index >= 0 && --y_index >= 0) {
- int diff = x_elms[x_index] - y_elms[y_index];
- if (diff != 0) return Smi::FromInt(diff);
+ x_scaled = -x_value;
+ y_scaled = -y_value;
}
- // If one array is a suffix of the other array, the longest array is
- // the representation of the largest of the Smis in the
- // lexicographic ordering.
- return Smi::FromInt(x_index - y_index);
-}
-
+ static const uint32_t kPowersOf10[] = {
+ 1, 10, 100, 1000, 10*1000, 100*1000,
+ 1000*1000, 10*1000*1000, 100*1000*1000,
+ 1000*1000*1000
+ };
-static Object* StringInputBufferCompare(String* x, String* y) {
- static StringInputBuffer bufx;
- static StringInputBuffer bufy;
+ // If the integers have the same number of decimal digits they can be
+ // compared directly as the numeric order is the same as the
+ // lexicographic order. If one integer has fewer digits, it is scaled
+ // by some power of 10 to have the same number of digits as the longer
+ // integer. If the scaled integers are equal it means the shorter
+ // integer comes first in the lexicographic order.
+
+ // From http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
+ int x_log2 = IntegerLog2(x_scaled);
+ int x_log10 = ((x_log2 + 1) * 1233) >> 12;
+ x_log10 -= x_scaled < kPowersOf10[x_log10];
+
+ int y_log2 = IntegerLog2(y_scaled);
+ int y_log10 = ((y_log2 + 1) * 1233) >> 12;
+ y_log10 -= y_scaled < kPowersOf10[y_log10];
+
+ int tie = EQUAL;
+
+ if (x_log10 < y_log10) {
+ // X has fewer digits. We would like to simply scale up X but that
+ // might overflow, e.g when comparing 9 with 1_000_000_000, 9 would
+ // be scaled up to 9_000_000_000. So we scale up by the next
+ // smallest power and scale down Y to drop one digit. It is OK to
+ // drop one digit from the longer integer since the final digit is
+ // past the length of the shorter integer.
+ x_scaled *= kPowersOf10[y_log10 - x_log10 - 1];
+ y_scaled /= 10;
+ tie = LESS;
+ } else if (y_log10 < x_log10) {
+ y_scaled *= kPowersOf10[x_log10 - y_log10 - 1];
+ x_scaled /= 10;
+ tie = GREATER;
+ }
+
+ if (x_scaled < y_scaled) return Smi::FromInt(LESS);
+ if (x_scaled > y_scaled) return Smi::FromInt(GREATER);
+ return Smi::FromInt(tie);
+}
+
+
+static Object* StringInputBufferCompare(RuntimeState* state,
+ String* x,
+ String* y) {
+ StringInputBuffer& bufx = *state->string_input_buffer_compare_bufx();
+ StringInputBuffer& bufy = *state->string_input_buffer_compare_bufy();
bufx.Reset(x);
bufy.Reset(y);
while (bufx.has_more() && bufy.has_more()) {
@@ -6199,19 +6766,20 @@ static Object* FlatStringCompare(String* x, String* y) {
} else {
result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
}
- ASSERT(result == StringInputBufferCompare(x, y));
+ ASSERT(result ==
+ StringInputBufferCompare(Isolate::Current()->runtime_state(), x, y));
return result;
}
-static MaybeObject* Runtime_StringCompare(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, x, args[0]);
CONVERT_CHECKED(String, y, args[1]);
- Counters::string_compare_runtime.Increment();
+ isolate->counters()->string_compare_runtime()->Increment();
// A few fast case tests before we flatten.
if (x == y) return Smi::FromInt(EQUAL);
@@ -6227,161 +6795,163 @@ static MaybeObject* Runtime_StringCompare(Arguments args) {
else if (d > 0) return Smi::FromInt(GREATER);
Object* obj;
- { MaybeObject* maybe_obj = Heap::PrepareForCompare(x);
+ { MaybeObject* maybe_obj = isolate->heap()->PrepareForCompare(x);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- { MaybeObject* maybe_obj = Heap::PrepareForCompare(y);
+ { MaybeObject* maybe_obj = isolate->heap()->PrepareForCompare(y);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y)
- : StringInputBufferCompare(x, y);
+ : StringInputBufferCompare(isolate->runtime_state(), x, y);
}
-static MaybeObject* Runtime_Math_acos(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_acos.Increment();
+ isolate->counters()->math_acos()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::ACOS, x);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::ACOS, x);
}
-static MaybeObject* Runtime_Math_asin(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_asin.Increment();
+ isolate->counters()->math_asin()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::ASIN, x);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::ASIN, x);
}
-static MaybeObject* Runtime_Math_atan(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_atan.Increment();
+ isolate->counters()->math_atan()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::ATAN, x);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::ATAN, x);
}
-static MaybeObject* Runtime_Math_atan2(Arguments args) {
+static const double kPiDividedBy4 = 0.78539816339744830962;
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- Counters::math_atan2.Increment();
+ isolate->counters()->math_atan2()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
double result;
if (isinf(x) && isinf(y)) {
// Make sure that the result in case of two infinite arguments
// is a multiple of Pi / 4. The sign of the result is determined
// by the first argument (x) and the sign of the second argument
// determines the multiplier: one or three.
- static double kPiDividedBy4 = 0.78539816339744830962;
int multiplier = (x < 0) ? -1 : 1;
if (y < 0) multiplier *= 3;
result = multiplier * kPiDividedBy4;
} else {
result = atan2(x, y);
}
- return Heap::AllocateHeapNumber(result);
+ return isolate->heap()->AllocateHeapNumber(result);
}
-static MaybeObject* Runtime_Math_ceil(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_ceil.Increment();
+ isolate->counters()->math_ceil()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::NumberFromDouble(ceiling(x));
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->heap()->NumberFromDouble(ceiling(x));
}
-static MaybeObject* Runtime_Math_cos(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_cos.Increment();
+ isolate->counters()->math_cos()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::COS, x);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::COS, x);
}
-static MaybeObject* Runtime_Math_exp(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_exp.Increment();
+ isolate->counters()->math_exp()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::EXP, x);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::EXP, x);
}
-static MaybeObject* Runtime_Math_floor(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_floor.Increment();
+ isolate->counters()->math_floor()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::NumberFromDouble(floor(x));
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->heap()->NumberFromDouble(floor(x));
}
-static MaybeObject* Runtime_Math_log(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_log.Increment();
+ isolate->counters()->math_log()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::LOG, x);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
}
-static MaybeObject* Runtime_Math_pow(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- Counters::math_pow.Increment();
+ isolate->counters()->math_pow()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
// If the second argument is a smi, it is much faster to call the
// custom powi() function than the generic pow().
if (args[1]->IsSmi()) {
- int y = Smi::cast(args[1])->value();
- return Heap::NumberFromDouble(power_double_int(x, y));
+ int y = args.smi_at(1);
+ return isolate->heap()->NumberFromDouble(power_double_int(x, y));
}
- CONVERT_DOUBLE_CHECKED(y, args[1]);
- return Heap::AllocateHeapNumber(power_double_double(x, y));
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
+ return isolate->heap()->AllocateHeapNumber(power_double_double(x, y));
}
// Fast version of Math.pow if we know that y is not an integer and
// y is not -0.5 or 0.5. Used as slowcase from codegen.
-static MaybeObject* Runtime_Math_pow_cfunction(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
if (y == 0) {
return Smi::FromInt(1);
} else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return Heap::nan_value();
+ return isolate->heap()->nan_value();
} else {
- return Heap::AllocateHeapNumber(pow(x, y));
+ return isolate->heap()->AllocateHeapNumber(pow(x, y));
}
}
-static MaybeObject* Runtime_RoundNumber(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_round.Increment();
+ isolate->counters()->math_round()->Increment();
if (!args[0]->IsHeapNumber()) {
// Must be smi. Return the argument unchanged for all the other types
@@ -6395,9 +6965,16 @@ static MaybeObject* Runtime_RoundNumber(Arguments args) {
int exponent = number->get_exponent();
int sign = number->get_sign();
- // We compare with kSmiValueSize - 3 because (2^30 - 0.1) has exponent 29 and
- // should be rounded to 2^30, which is not smi.
- if (!sign && exponent <= kSmiValueSize - 3) {
+ if (exponent < -1) {
+ // Number in range ]-0.5..0.5[. These always round to +/-zero.
+ if (sign) return isolate->heap()->minus_zero_value();
+ return Smi::FromInt(0);
+ }
+
+ // We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
+ // should be rounded to 2^30, which is not smi (for 31-bit smis, similar
+ // agument holds for 32-bit smis).
+ if (!sign && exponent < kSmiValueSize - 2) {
return Smi::FromInt(static_cast<int>(value + 0.5));
}
@@ -6407,40 +6984,40 @@ static MaybeObject* Runtime_RoundNumber(Arguments args) {
return number;
}
- if (sign && value >= -0.5) return Heap::minus_zero_value();
+ if (sign && value >= -0.5) return isolate->heap()->minus_zero_value();
// Do not call NumberFromDouble() to avoid extra checks.
- return Heap::AllocateHeapNumber(floor(value + 0.5));
+ return isolate->heap()->AllocateHeapNumber(floor(value + 0.5));
}
-static MaybeObject* Runtime_Math_sin(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_sin.Increment();
+ isolate->counters()->math_sin()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::SIN, x);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::SIN, x);
}
-static MaybeObject* Runtime_Math_sqrt(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_sqrt.Increment();
+ isolate->counters()->math_sqrt()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::AllocateHeapNumber(sqrt(x));
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->heap()->AllocateHeapNumber(sqrt(x));
}
-static MaybeObject* Runtime_Math_tan(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- Counters::math_tan.Increment();
+ isolate->counters()->math_tan()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return TranscendentalCache::Get(TranscendentalCache::TAN, x);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->transcendental_cache()->Get(TranscendentalCache::TAN, x);
}
@@ -6489,13 +7066,13 @@ static int MakeDay(int year, int month, int day) {
}
-static MaybeObject* Runtime_DateMakeDay(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
- CONVERT_SMI_CHECKED(year, args[0]);
- CONVERT_SMI_CHECKED(month, args[1]);
- CONVERT_SMI_CHECKED(date, args[2]);
+ CONVERT_SMI_ARG_CHECKED(year, 0);
+ CONVERT_SMI_ARG_CHECKED(month, 1);
+ CONVERT_SMI_ARG_CHECKED(date, 2);
return Smi::FromInt(MakeDay(year, month, date));
}
@@ -6788,17 +7365,18 @@ static inline void DateYMDFromTime(int date,
}
-static MaybeObject* Runtime_DateYMDFromTime(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateYMDFromTime) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(t, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(t, 0);
CONVERT_CHECKED(JSArray, res_array, args[1]);
int year, month, day;
DateYMDFromTime(static_cast<int>(floor(t / 86400000)), year, month, day);
- RUNTIME_ASSERT(res_array->elements()->map() == Heap::fixed_array_map());
+ RUNTIME_ASSERT(res_array->elements()->map() ==
+ isolate->heap()->fixed_array_map());
FixedArray* elms = FixedArray::cast(res_array->elements());
RUNTIME_ASSERT(elms->length() == 3);
@@ -6806,33 +7384,131 @@ static MaybeObject* Runtime_DateYMDFromTime(Arguments args) {
elms->set(1, Smi::FromInt(month));
elms->set(2, Smi::FromInt(day));
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+
+ Handle<JSFunction> callee = args.at<JSFunction>(0);
+ Object** parameters = reinterpret_cast<Object**>(args[1]);
+ const int argument_count = Smi::cast(args[2])->value();
+
+ Handle<JSObject> result =
+ isolate->factory()->NewArgumentsObject(callee, argument_count);
+ // Allocate the elements if needed.
+ int parameter_count = callee->shared()->formal_parameter_count();
+ if (argument_count > 0) {
+ if (parameter_count > 0) {
+ int mapped_count = Min(argument_count, parameter_count);
+ Handle<FixedArray> parameter_map =
+ isolate->factory()->NewFixedArray(mapped_count + 2, NOT_TENURED);
+ parameter_map->set_map(
+ isolate->heap()->non_strict_arguments_elements_map());
+
+ Handle<Map> old_map(result->map());
+ Handle<Map> new_map =
+ isolate->factory()->CopyMapDropTransitions(old_map);
+ new_map->set_elements_kind(JSObject::NON_STRICT_ARGUMENTS_ELEMENTS);
+
+ result->set_map(*new_map);
+ result->set_elements(*parameter_map);
+
+ // Store the context and the arguments array at the beginning of the
+ // parameter map.
+ Handle<Context> context(isolate->context());
+ Handle<FixedArray> arguments =
+ isolate->factory()->NewFixedArray(argument_count, NOT_TENURED);
+ parameter_map->set(0, *context);
+ parameter_map->set(1, *arguments);
+
+ // Loop over the actual parameters backwards.
+ int index = argument_count - 1;
+ while (index >= mapped_count) {
+ // These go directly in the arguments array and have no
+ // corresponding slot in the parameter map.
+ arguments->set(index, *(parameters - index - 1));
+ --index;
+ }
+
+ ScopeInfo<> scope_info(callee->shared()->scope_info());
+ while (index >= 0) {
+ // Detect duplicate names to the right in the parameter list.
+ Handle<String> name = scope_info.parameter_name(index);
+ int context_slot_count = scope_info.number_of_context_slots();
+ bool duplicate = false;
+ for (int j = index + 1; j < parameter_count; ++j) {
+ if (scope_info.parameter_name(j).is_identical_to(name)) {
+ duplicate = true;
+ break;
+ }
+ }
+
+ if (duplicate) {
+ // This goes directly in the arguments array with a hole in the
+ // parameter map.
+ arguments->set(index, *(parameters - index - 1));
+ parameter_map->set_the_hole(index + 2);
+ } else {
+ // The context index goes in the parameter map with a hole in the
+ // arguments array.
+ int context_index = -1;
+ for (int j = Context::MIN_CONTEXT_SLOTS;
+ j < context_slot_count;
+ ++j) {
+ if (scope_info.context_slot_name(j).is_identical_to(name)) {
+ context_index = j;
+ break;
+ }
+ }
+ ASSERT(context_index >= 0);
+ arguments->set_the_hole(index);
+ parameter_map->set(index + 2, Smi::FromInt(context_index));
+ }
+
+ --index;
+ }
+ } else {
+ // If there is no aliasing, the arguments object elements are not
+ // special in any way.
+ Handle<FixedArray> elements =
+ isolate->factory()->NewFixedArray(argument_count, NOT_TENURED);
+ result->set_elements(*elements);
+ for (int i = 0; i < argument_count; ++i) {
+ elements->set(i, *(parameters - i - 1));
+ }
+ }
+ }
+ return *result;
}
-static MaybeObject* Runtime_NewArgumentsFast(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
JSFunction* callee = JSFunction::cast(args[0]);
Object** parameters = reinterpret_cast<Object**>(args[1]);
- const int length = Smi::cast(args[2])->value();
+ const int length = args.smi_at(2);
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateArgumentsObject(callee, length);
+ { MaybeObject* maybe_result =
+ isolate->heap()->AllocateArgumentsObject(callee, length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Allocate the elements if needed.
if (length > 0) {
// Allocate the fixed array.
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateRawFixedArray(length);
+ { MaybeObject* maybe_obj = isolate->heap()->AllocateRawFixedArray(length);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
AssertNoAllocation no_gc;
FixedArray* array = reinterpret_cast<FixedArray*>(obj);
- array->set_map(Heap::fixed_array_map());
+ array->set_map(isolate->heap()->fixed_array_map());
array->set_length(length);
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
@@ -6845,61 +7521,86 @@ static MaybeObject* Runtime_NewArgumentsFast(Arguments args) {
}
-static MaybeObject* Runtime_NewClosure(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(Context, context, 0);
CONVERT_ARG_CHECKED(SharedFunctionInfo, shared, 1);
CONVERT_BOOLEAN_CHECKED(pretenure, args[2]);
- // Allocate global closures in old space and allocate local closures
- // in new space. Additionally pretenure closures that are assigned
+ // The caller ensures that we pretenure closures that are assigned
// directly to properties.
- pretenure = pretenure || (context->global_context() == *context);
PretenureFlag pretenure_flag = pretenure ? TENURED : NOT_TENURED;
Handle<JSFunction> result =
- Factory::NewFunctionFromSharedFunctionInfo(shared,
- context,
- pretenure_flag);
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
+ context,
+ pretenure_flag);
return *result;
}
-static MaybeObject* Runtime_NewObjectFromBound(Arguments args) {
- HandleScope scope;
+static SmartPointer<Object**> GetNonBoundArguments(int bound_argc,
+ int* total_argc) {
+ // Find frame containing arguments passed to the caller.
+ JavaScriptFrameIterator it;
+ JavaScriptFrame* frame = it.frame();
+ List<JSFunction*> functions(2);
+ frame->GetFunctions(&functions);
+ if (functions.length() > 1) {
+ int inlined_frame_index = functions.length() - 1;
+ JSFunction* inlined_function = functions[inlined_frame_index];
+ int args_count = inlined_function->shared()->formal_parameter_count();
+ ScopedVector<SlotRef> args_slots(args_count);
+ SlotRef::ComputeSlotMappingForArguments(frame,
+ inlined_frame_index,
+ &args_slots);
+
+ *total_argc = bound_argc + args_count;
+ SmartPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+ for (int i = 0; i < args_count; i++) {
+ Handle<Object> val = args_slots[i].GetValue();
+ param_data[bound_argc + i] = val.location();
+ }
+ return param_data;
+ } else {
+ it.AdvanceToArgumentsFrame();
+ frame = it.frame();
+ int args_count = frame->ComputeParametersCount();
+
+ *total_argc = bound_argc + args_count;
+ SmartPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+ for (int i = 0; i < args_count; i++) {
+ Handle<Object> val = Handle<Object>(frame->GetParameter(i));
+ param_data[bound_argc + i] = val.location();
+ }
+ return param_data;
+ }
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
// First argument is a function to use as a constructor.
CONVERT_ARG_CHECKED(JSFunction, function, 0);
// Second argument is either null or an array of bound arguments.
- FixedArray* bound_args = NULL;
+ Handle<FixedArray> bound_args;
int bound_argc = 0;
if (!args[1]->IsNull()) {
CONVERT_ARG_CHECKED(JSArray, params, 1);
RUNTIME_ASSERT(params->HasFastElements());
- bound_args = FixedArray::cast(params->elements());
+ bound_args = Handle<FixedArray>(FixedArray::cast(params->elements()));
bound_argc = Smi::cast(params->length())->value();
}
- // Find frame containing arguments passed to the caller.
- JavaScriptFrameIterator it;
- JavaScriptFrame* frame = it.frame();
- ASSERT(!frame->is_optimized());
- it.AdvanceToArgumentsFrame();
- frame = it.frame();
- int argc = frame->ComputeParametersCount();
-
- // Prepend bound arguments to caller's arguments.
- int total_argc = bound_argc + argc;
- SmartPointer<Object**> param_data(NewArray<Object**>(total_argc));
+ int total_argc = 0;
+ SmartPointer<Object**> param_data =
+ GetNonBoundArguments(bound_argc, &total_argc);
for (int i = 0; i < bound_argc; i++) {
Handle<Object> val = Handle<Object>(bound_args->get(i));
param_data[i] = val.location();
}
- for (int i = 0; i < argc; i++) {
- Handle<Object> val = Handle<Object>(frame->GetParameter(i));
- param_data[bound_argc + i] = val.location();
- }
bool exception = false;
Handle<Object> result =
@@ -6913,10 +7614,11 @@ static MaybeObject* Runtime_NewObjectFromBound(Arguments args) {
}
-static void TrySettingInlineConstructStub(Handle<JSFunction> function) {
- Handle<Object> prototype = Factory::null_value();
+static void TrySettingInlineConstructStub(Isolate* isolate,
+ Handle<JSFunction> function) {
+ Handle<Object> prototype = isolate->factory()->null_value();
if (function->has_instance_prototype()) {
- prototype = Handle<Object>(function->instance_prototype());
+ prototype = Handle<Object>(function->instance_prototype(), isolate);
}
if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
ConstructStubCompiler compiler;
@@ -6929,8 +7631,8 @@ static void TrySettingInlineConstructStub(Handle<JSFunction> function) {
}
-static MaybeObject* Runtime_NewObject(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<Object> constructor = args.at<Object>(0);
@@ -6939,25 +7641,26 @@ static MaybeObject* Runtime_NewObject(Arguments args) {
if (!constructor->IsJSFunction()) {
Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
Handle<Object> type_error =
- Factory::NewTypeError("not_constructor", arguments);
- return Top::Throw(*type_error);
+ isolate->factory()->NewTypeError("not_constructor", arguments);
+ return isolate->Throw(*type_error);
}
Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
// If function should not have prototype, construction is not allowed. In this
// case generated code bailouts here, since function has no initial_map.
- if (!function->should_have_prototype()) {
+ if (!function->should_have_prototype() && !function->shared()->bound()) {
Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
Handle<Object> type_error =
- Factory::NewTypeError("not_constructor", arguments);
- return Top::Throw(*type_error);
+ isolate->factory()->NewTypeError("not_constructor", arguments);
+ return isolate->Throw(*type_error);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug* debug = isolate->debug();
// Handle stepping into constructors if step into is active.
- if (Debug::StepInActive()) {
- Debug::HandleStepIn(function, Handle<Object>::null(), 0, true);
+ if (debug->StepInActive()) {
+ debug->HandleStepIn(function, Handle<Object>::null(), 0, true);
}
#endif
@@ -6967,14 +7670,14 @@ static MaybeObject* Runtime_NewObject(Arguments args) {
// called using 'new' and creates a new JSFunction object that
// is returned. The receiver object is only used for error
// reporting if an error occurs when constructing the new
- // JSFunction. Factory::NewJSObject() should not be used to
+ // JSFunction. FACTORY->NewJSObject() should not be used to
// allocate JSFunctions since it does not properly initialize
// the shared part of the function. Since the receiver is
// ignored anyway, we use the global object as the receiver
// instead of a new JSFunction object. This way, errors are
// reported the same way whether or not 'Function' is called
// using 'new'.
- return Top::context()->global();
+ return isolate->context()->global();
}
}
@@ -6982,7 +7685,7 @@ static MaybeObject* Runtime_NewObject(Arguments args) {
// available. We cannot use EnsureCompiled because that forces a
// compilation through the shared function info which makes it
// impossible for us to optimize.
- Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
if (!function->is_compiled()) CompileLazy(function, CLEAR_EXCEPTION);
if (!function->has_initial_map() &&
@@ -6994,34 +7697,34 @@ static MaybeObject* Runtime_NewObject(Arguments args) {
}
bool first_allocation = !shared->live_objects_may_exist();
- Handle<JSObject> result = Factory::NewJSObject(function);
- RETURN_IF_EMPTY_HANDLE(result);
+ Handle<JSObject> result = isolate->factory()->NewJSObject(function);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
// Delay setting the stub if inobject slack tracking is in progress.
if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
- TrySettingInlineConstructStub(function);
+ TrySettingInlineConstructStub(isolate, function);
}
- Counters::constructed_objects.Increment();
- Counters::constructed_objects_runtime.Increment();
+ isolate->counters()->constructed_objects()->Increment();
+ isolate->counters()->constructed_objects_runtime()->Increment();
return *result;
}
-static MaybeObject* Runtime_FinalizeInstanceSize(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
function->shared()->CompleteInobjectSlackTracking();
- TrySettingInlineConstructStub(function);
+ TrySettingInlineConstructStub(isolate, function);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_LazyCompile(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyCompile) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<JSFunction> function = args.at<JSFunction>(0);
@@ -7051,20 +7754,20 @@ static MaybeObject* Runtime_LazyCompile(Arguments args) {
}
-static MaybeObject* Runtime_LazyRecompile(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<JSFunction> function = args.at<JSFunction>(0);
// If the function is not optimizable or debugger is active continue using the
// code from the full compiler.
if (!function->shared()->code()->optimizable() ||
- Debug::has_break_points()) {
+ isolate->DebuggerHasBreakPoints()) {
if (FLAG_trace_opt) {
PrintF("[failed to optimize ");
function->PrintName();
PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
function->shared()->code()->optimizable() ? "T" : "F",
- Debug::has_break_points() ? "T" : "F");
+ isolate->DebuggerHasBreakPoints() ? "T" : "F");
}
function->ReplaceCode(function->shared()->code());
return function->code();
@@ -7082,43 +7785,43 @@ static MaybeObject* Runtime_LazyRecompile(Arguments args) {
}
-static MaybeObject* Runtime_NotifyDeoptimized(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(args[0]->IsSmi());
Deoptimizer::BailoutType type =
- static_cast<Deoptimizer::BailoutType>(Smi::cast(args[0])->value());
- Deoptimizer* deoptimizer = Deoptimizer::Grab();
- ASSERT(Heap::IsAllocationAllowed());
+ static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+ ASSERT(isolate->heap()->IsAllocationAllowed());
int frames = deoptimizer->output_count();
deoptimizer->MaterializeHeapNumbers();
delete deoptimizer;
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = NULL;
for (int i = 0; i < frames - 1; i++) it.Advance();
frame = it.frame();
RUNTIME_ASSERT(frame->function()->IsJSFunction());
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
Handle<Object> arguments;
for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
- if (frame->GetExpression(i) == Heap::arguments_marker()) {
+ if (frame->GetExpression(i) == isolate->heap()->arguments_marker()) {
if (arguments.is_null()) {
// FunctionGetArguments can't throw an exception, so cast away the
// doubt with an assert.
arguments = Handle<Object>(
Accessors::FunctionGetArguments(*function,
NULL)->ToObjectUnchecked());
- ASSERT(*arguments != Heap::null_value());
- ASSERT(*arguments != Heap::undefined_value());
+ ASSERT(*arguments != isolate->heap()->null_value());
+ ASSERT(*arguments != isolate->heap()->undefined_value());
}
frame->SetExpression(i, *arguments);
}
}
- CompilationCache::MarkForLazyOptimizing(function);
+ isolate->compilation_cache()->MarkForLazyOptimizing(function);
if (type == Deoptimizer::EAGER) {
RUNTIME_ASSERT(function->IsOptimized());
} else {
@@ -7128,7 +7831,7 @@ static MaybeObject* Runtime_NotifyDeoptimized(Arguments args) {
// Avoid doing too much work when running with --always-opt and keep
// the optimized code around.
if (FLAG_always_opt || type == Deoptimizer::LAZY) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Count the number of optimized activations of the function.
@@ -7151,40 +7854,82 @@ static MaybeObject* Runtime_NotifyDeoptimized(Arguments args) {
}
function->ReplaceCode(function->shared()->code());
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_NotifyOSR(Arguments args) {
- Deoptimizer* deoptimizer = Deoptimizer::Grab();
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) {
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
delete deoptimizer;
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_DeoptimizeFunction(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- if (!function->IsOptimized()) return Heap::undefined_value();
+ if (!function->IsOptimized()) return isolate->heap()->undefined_value();
Deoptimizer::DeoptimizeFunction(*function);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
+#if defined(USE_SIMULATOR)
+ return isolate->heap()->true_value();
+#else
+ return isolate->heap()->false_value();
+#endif
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ if (!function->IsOptimizable()) return isolate->heap()->undefined_value();
+ function->MarkForLazyRecompilation();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ if (!V8::UseCrankshaft()) {
+ return Smi::FromInt(4); // 4 == "never".
+ }
+ if (FLAG_always_opt) {
+ return Smi::FromInt(3); // 3 == "always".
+ }
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ return function->IsOptimized() ? Smi::FromInt(1) // 1 == "yes".
+ : Smi::FromInt(2); // 2 == "no".
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ return Smi::FromInt(function->shared()->opt_count());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
// We're not prepared to handle a function with arguments object.
- ASSERT(!function->shared()->scope_info()->HasArgumentsShadow());
+ ASSERT(!function->shared()->uses_arguments());
// We have hit a back edge in an unoptimized frame for a function that was
// selected for on-stack replacement. Find the unoptimized code object.
- Handle<Code> unoptimized(function->shared()->code());
+ Handle<Code> unoptimized(function->shared()->code(), isolate);
// Keep track of whether we've succeeded in optimizing.
bool succeeded = unoptimized->optimizable();
if (succeeded) {
@@ -7193,7 +7938,7 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
// indirectly recursive and (b) an optimized invocation has been
// deoptimized so that we are currently in an unoptimized activation.
// Check for optimized activations of this function.
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(isolate);
while (succeeded && !it.done()) {
JavaScriptFrame* frame = it.frame();
succeeded = !frame->is_optimized() || frame->function() != *function;
@@ -7205,10 +7950,10 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
if (succeeded) {
// The top JS function is this one, the PC is somewhere in the
// unoptimized code.
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
ASSERT(frame->function() == *function);
- ASSERT(frame->code() == *unoptimized);
+ ASSERT(frame->LookupCode() == *unoptimized);
ASSERT(unoptimized->contains(frame->pc()));
// Use linear search of the unoptimized code's stack check table to find
@@ -7265,8 +8010,7 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
}
StackCheckStub check_stub;
Handle<Code> check_code = check_stub.GetCode();
- Handle<Code> replacement_code(
- Builtins::builtin(Builtins::OnStackReplacement));
+ Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
Deoptimizer::RevertStackCheckCode(*unoptimized,
*check_code,
*replacement_code);
@@ -7289,88 +8033,112 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
}
-static MaybeObject* Runtime_GetFunctionDelegate(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionDelegate) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(!args[0]->IsJSFunction());
return *Execution::GetFunctionDelegate(args.at<Object>(0));
}
-static MaybeObject* Runtime_GetConstructorDelegate(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(!args[0]->IsJSFunction());
return *Execution::GetConstructorDelegate(args.at<Object>(0));
}
-static MaybeObject* Runtime_NewContext(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, function, args[0]);
int length = function->shared()->scope_info()->NumberOfContextSlots();
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateFunctionContext(length, function);
+ { MaybeObject* maybe_result =
+ isolate->heap()->AllocateFunctionContext(length, function);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Top::set_context(Context::cast(result));
+ isolate->set_context(Context::cast(result));
return result; // non-failure
}
-MUST_USE_RESULT static MaybeObject* PushContextHelper(Object* object,
- bool is_catch_context) {
- // Convert the object to a proper JavaScript object.
- Object* js_object = object;
- if (!js_object->IsJSObject()) {
- MaybeObject* maybe_js_object = js_object->ToObject();
- if (!maybe_js_object->ToObject(&js_object)) {
- if (!Failure::cast(maybe_js_object)->IsInternalError()) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+ JSObject* extension_object;
+ if (args[0]->IsJSObject()) {
+ extension_object = JSObject::cast(args[0]);
+ } else {
+ // Convert the object to a proper JavaScript object.
+ MaybeObject* maybe_js_object = args[0]->ToObject();
+ if (!maybe_js_object->To(&extension_object)) {
+ if (Failure::cast(maybe_js_object)->IsInternalError()) {
+ HandleScope scope(isolate);
+ Handle<Object> handle = args.at<Object>(0);
+ Handle<Object> result =
+ isolate->factory()->NewTypeError("with_expression",
+ HandleVector(&handle, 1));
+ return isolate->Throw(*result);
+ } else {
return maybe_js_object;
}
- HandleScope scope;
- Handle<Object> handle(object);
- Handle<Object> result =
- Factory::NewTypeError("with_expression", HandleVector(&handle, 1));
- return Top::Throw(*result);
}
}
- Object* result;
- { MaybeObject* maybe_result =
- Heap::AllocateWithContext(Top::context(),
- JSObject::cast(js_object),
- is_catch_context);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ JSFunction* function;
+ if (args[1]->IsSmi()) {
+ // A smi sentinel indicates a context nested inside global code rather
+ // than some function. There is a canonical empty function that can be
+ // gotten from the global context.
+ function = isolate->context()->global_context()->closure();
+ } else {
+ function = JSFunction::cast(args[1]);
}
- Context* context = Context::cast(result);
- Top::set_context(context);
-
- return result;
+ Context* context;
+ MaybeObject* maybe_context =
+ isolate->heap()->AllocateWithContext(function,
+ isolate->context(),
+ extension_object);
+ if (!maybe_context->To(&context)) return maybe_context;
+ isolate->set_context(context);
+ return context;
}
-static MaybeObject* Runtime_PushContext(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- return PushContextHelper(args[0], false);
-}
-
-
-static MaybeObject* Runtime_PushCatchContext(Arguments args) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- return PushContextHelper(args[0], true);
+ ASSERT(args.length() == 3);
+ String* name = String::cast(args[0]);
+ Object* thrown_object = args[1];
+ JSFunction* function;
+ if (args[2]->IsSmi()) {
+ // A smi sentinel indicates a context nested inside global code rather
+ // than some function. There is a canonical empty function that can be
+ // gotten from the global context.
+ function = isolate->context()->global_context()->closure();
+ } else {
+ function = JSFunction::cast(args[2]);
+ }
+ Context* context;
+ MaybeObject* maybe_context =
+ isolate->heap()->AllocateCatchContext(function,
+ isolate->context(),
+ name,
+ thrown_object);
+ if (!maybe_context->To(&context)) return maybe_context;
+ isolate->set_context(context);
+ return context;
}
-static MaybeObject* Runtime_DeleteContextSlot(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(Context, context, 0);
@@ -7383,12 +8151,12 @@ static MaybeObject* Runtime_DeleteContextSlot(Arguments args) {
// If the slot was not found the result is true.
if (holder.is_null()) {
- return Heap::true_value();
+ return isolate->heap()->true_value();
}
// If the slot was found in a context, it should be DONT_DELETE.
if (holder->IsContext()) {
- return Heap::false_value();
+ return isolate->heap()->false_value();
}
// The slot was found in a JSObject, either a context extension object,
@@ -7436,17 +8204,19 @@ static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
#endif
-static inline MaybeObject* Unhole(MaybeObject* x,
+static inline MaybeObject* Unhole(Heap* heap,
+ MaybeObject* x,
PropertyAttributes attributes) {
ASSERT(!x->IsTheHole() || (attributes & READ_ONLY) != 0);
USE(attributes);
- return x->IsTheHole() ? Heap::undefined_value() : x;
+ return x->IsTheHole() ? heap->undefined_value() : x;
}
-static JSObject* ComputeReceiverForNonGlobal(JSObject* holder) {
+static Object* ComputeReceiverForNonGlobal(Isolate* isolate,
+ JSObject* holder) {
ASSERT(!holder->IsGlobalObject());
- Context* top = Top::context();
+ Context* top = isolate->context();
// Get the context extension function.
JSFunction* context_extension_function =
top->global_context()->context_extension_function();
@@ -7456,19 +8226,22 @@ static JSObject* ComputeReceiverForNonGlobal(JSObject* holder) {
// explicitly via a with-statement.
Object* constructor = holder->map()->constructor();
if (constructor != context_extension_function) return holder;
- // Fall back to using the global object as the receiver if the
- // property turns out to be a local variable allocated in a context
- // extension object - introduced via eval.
- return top->global()->global_receiver();
+ // Fall back to using the global object as the implicit receiver if
+ // the property turns out to be a local variable allocated in a
+ // context extension object - introduced via eval. Implicit global
+ // receivers are indicated with the hole value.
+ return isolate->heap()->the_hole_value();
}
-static ObjectPair LoadContextSlotHelper(Arguments args, bool throw_error) {
- HandleScope scope;
+static ObjectPair LoadContextSlotHelper(Arguments args,
+ Isolate* isolate,
+ bool throw_error) {
+ HandleScope scope(isolate);
ASSERT_EQ(2, args.length());
if (!args[0]->IsContext() || !args[1]->IsString()) {
- return MakePair(Top::ThrowIllegalOperation(), NULL);
+ return MakePair(isolate->ThrowIllegalOperation(), NULL);
}
Handle<Context> context = args.at<Context>(0);
Handle<String> name = args.at<String>(1);
@@ -7485,65 +8258,75 @@ static ObjectPair LoadContextSlotHelper(Arguments args, bool throw_error) {
// If the "property" we were looking for is a local variable or an
// argument in a context, the receiver is the global object; see
// ECMA-262, 3rd., 10.1.6 and 10.2.3.
- JSObject* receiver = Top::context()->global()->global_receiver();
+ //
+ // Use the hole as the receiver to signal that the receiver is
+ // implicit and that the global receiver should be used.
+ Handle<Object> receiver = isolate->factory()->the_hole_value();
MaybeObject* value = (holder->IsContext())
? Context::cast(*holder)->get(index)
: JSObject::cast(*holder)->GetElement(index);
- return MakePair(Unhole(value, attributes), receiver);
+ return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
}
// If the holder is found, we read the property from it.
if (!holder.is_null() && holder->IsJSObject()) {
ASSERT(Handle<JSObject>::cast(holder)->HasProperty(*name));
JSObject* object = JSObject::cast(*holder);
- JSObject* receiver;
+ Object* receiver;
if (object->IsGlobalObject()) {
receiver = GlobalObject::cast(object)->global_receiver();
} else if (context->is_exception_holder(*holder)) {
- receiver = Top::context()->global()->global_receiver();
+ // Use the hole as the receiver to signal that the receiver is
+ // implicit and that the global receiver should be used.
+ receiver = isolate->heap()->the_hole_value();
} else {
- receiver = ComputeReceiverForNonGlobal(object);
+ receiver = ComputeReceiverForNonGlobal(isolate, object);
}
+
+ // GetProperty below can cause GC.
+ Handle<Object> receiver_handle(receiver);
+
// No need to unhole the value here. This is taken care of by the
// GetProperty function.
MaybeObject* value = object->GetProperty(*name);
- return MakePair(value, receiver);
+ return MakePair(value, *receiver_handle);
}
if (throw_error) {
// The property doesn't exist - throw exception.
Handle<Object> reference_error =
- Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
- return MakePair(Top::Throw(*reference_error), NULL);
+ isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1));
+ return MakePair(isolate->Throw(*reference_error), NULL);
} else {
- // The property doesn't exist - return undefined
- return MakePair(Heap::undefined_value(), Heap::undefined_value());
+ // The property doesn't exist - return undefined.
+ return MakePair(isolate->heap()->undefined_value(),
+ isolate->heap()->undefined_value());
}
}
-static ObjectPair Runtime_LoadContextSlot(Arguments args) {
- return LoadContextSlotHelper(args, true);
+RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlot) {
+ return LoadContextSlotHelper(args, isolate, true);
}
-static ObjectPair Runtime_LoadContextSlotNoReferenceError(Arguments args) {
- return LoadContextSlotHelper(args, false);
+RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlotNoReferenceError) {
+ return LoadContextSlotHelper(args, isolate, false);
}
-static MaybeObject* Runtime_StoreContextSlot(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
- Handle<Object> value(args[0]);
+ Handle<Object> value(args[0], isolate);
CONVERT_ARG_CHECKED(Context, context, 1);
CONVERT_ARG_CHECKED(String, name, 2);
- CONVERT_SMI_CHECKED(strict_unchecked, args[3]);
+ CONVERT_SMI_ARG_CHECKED(strict_unchecked, 3);
RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
strict_unchecked == kNonStrictMode);
- StrictModeFlag strict = static_cast<StrictModeFlag>(strict_unchecked);
-
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
int index;
PropertyAttributes attributes;
@@ -7556,13 +8339,19 @@ static MaybeObject* Runtime_StoreContextSlot(Arguments args) {
if ((attributes & READ_ONLY) == 0) {
// Context is a fixed array and set cannot fail.
Context::cast(*holder)->set(index, *value);
+ } else if (strict_mode == kStrictMode) {
+ // Setting read only property in strict mode.
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("strict_cannot_assign",
+ HandleVector(&name, 1));
+ return isolate->Throw(*error);
}
} else {
ASSERT((attributes & READ_ONLY) == 0);
Handle<Object> result =
- SetElement(Handle<JSObject>::cast(holder), index, value);
+ SetElement(Handle<JSObject>::cast(holder), index, value, strict_mode);
if (result.is_null()) {
- ASSERT(Top::has_pending_exception());
+ ASSERT(isolate->has_pending_exception());
return Failure::Exception();
}
}
@@ -7577,72 +8366,80 @@ static MaybeObject* Runtime_StoreContextSlot(Arguments args) {
// The property exists in the extension context.
context_ext = Handle<JSObject>::cast(holder);
} else {
- // The property was not found. It needs to be stored in the global context.
+ // The property was not found.
ASSERT(attributes == ABSENT);
+
+ if (strict_mode == kStrictMode) {
+ // Throw in strict mode (assignment to undefined variable).
+ Handle<Object> error =
+ isolate->factory()->NewReferenceError(
+ "not_defined", HandleVector(&name, 1));
+ return isolate->Throw(*error);
+ }
+ // In non-strict mode, the property is stored in the global context.
attributes = NONE;
- context_ext = Handle<JSObject>(Top::context()->global());
+ context_ext = Handle<JSObject>(isolate->context()->global());
}
// Set the property, but ignore if read_only variable on the context
// extension object itself.
if ((attributes & READ_ONLY) == 0 ||
(context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) {
- RETURN_IF_EMPTY_HANDLE(SetProperty(context_ext, name, value, NONE, strict));
- } else if (strict == kStrictMode && (attributes & READ_ONLY) != 0) {
+ RETURN_IF_EMPTY_HANDLE(
+ isolate,
+ SetProperty(context_ext, name, value, NONE, strict_mode));
+ } else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
// Setting read only property in strict mode.
Handle<Object> error =
- Factory::NewTypeError("strict_cannot_assign", HandleVector(&name, 1));
- return Top::Throw(*error);
+ isolate->factory()->NewTypeError(
+ "strict_cannot_assign", HandleVector(&name, 1));
+ return isolate->Throw(*error);
}
return *value;
}
-static MaybeObject* Runtime_Throw(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Throw) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- return Top::Throw(args[0]);
+ return isolate->Throw(args[0]);
}
-static MaybeObject* Runtime_ReThrow(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- return Top::ReThrow(args[0]);
+ return isolate->ReThrow(args[0]);
}
-static MaybeObject* Runtime_PromoteScheduledException(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PromoteScheduledException) {
ASSERT_EQ(0, args.length());
- return Top::PromoteScheduledException();
+ return isolate->PromoteScheduledException();
}
-static MaybeObject* Runtime_ThrowReferenceError(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- Handle<Object> name(args[0]);
+ Handle<Object> name(args[0], isolate);
Handle<Object> reference_error =
- Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
- return Top::Throw(*reference_error);
-}
-
-
-static MaybeObject* Runtime_StackOverflow(Arguments args) {
- NoHandleAllocation na;
- return Top::StackOverflow();
+ isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1));
+ return isolate->Throw(*reference_error);
}
-static MaybeObject* Runtime_StackGuard(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
ASSERT(args.length() == 0);
// First check if this is a real stack overflow.
- if (StackGuard::IsStackOverflow()) {
- return Runtime_StackOverflow(args);
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ NoHandleAllocation na;
+ return isolate->StackOverflow();
}
return Execution::HandleStackGuardInterrupt();
@@ -7735,22 +8532,22 @@ static void PrintTransition(Object* result) {
}
-static MaybeObject* Runtime_TraceEnter(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
ASSERT(args.length() == 0);
NoHandleAllocation ha;
PrintTransition(NULL);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_TraceExit(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceExit) {
NoHandleAllocation ha;
PrintTransition(args[0]);
return args[0]; // return TOS
}
-static MaybeObject* Runtime_DebugPrint(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -7758,7 +8555,7 @@ static MaybeObject* Runtime_DebugPrint(Arguments args) {
if (args[0]->IsString()) {
// If we have a string, assume it's a code "marker"
// and print some interesting cpu debugging info.
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
PrintF("fp = %p, sp = %p, caller_sp = %p: ",
frame->fp(), frame->sp(), frame->caller_sp());
@@ -7781,15 +8578,15 @@ static MaybeObject* Runtime_DebugPrint(Arguments args) {
}
-static MaybeObject* Runtime_DebugTrace(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
ASSERT(args.length() == 0);
NoHandleAllocation ha;
- Top::PrintStack();
- return Heap::undefined_value();
+ isolate->PrintStack();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_DateCurrentTime(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) {
NoHandleAllocation ha;
ASSERT(args.length() == 0);
@@ -7798,12 +8595,12 @@ static MaybeObject* Runtime_DateCurrentTime(Arguments args) {
// time is milliseconds. Therefore, we floor the result of getting
// the OS time.
double millis = floor(OS::TimeCurrentMillis());
- return Heap::NumberFromDouble(millis);
+ return isolate->heap()->NumberFromDouble(millis);
}
-static MaybeObject* Runtime_DateParseString(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, str, 0);
@@ -7818,118 +8615,170 @@ static MaybeObject* Runtime_DateParseString(Arguments args) {
RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
bool result;
if (str->IsAsciiRepresentation()) {
- result = DateParser::Parse(str->ToAsciiVector(), output_array);
+ result = DateParser::Parse(str->ToAsciiVector(),
+ output_array,
+ isolate->unicode_cache());
} else {
ASSERT(str->IsTwoByteRepresentation());
- result = DateParser::Parse(str->ToUC16Vector(), output_array);
+ result = DateParser::Parse(str->ToUC16Vector(),
+ output_array,
+ isolate->unicode_cache());
}
if (result) {
return *output;
} else {
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
}
-static MaybeObject* Runtime_DateLocalTimezone(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
const char* zone = OS::LocalTimezone(x);
- return Heap::AllocateStringFromUtf8(CStrVector(zone));
+ return isolate->heap()->AllocateStringFromUtf8(CStrVector(zone));
}
-static MaybeObject* Runtime_DateLocalTimeOffset(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimeOffset) {
NoHandleAllocation ha;
ASSERT(args.length() == 0);
- return Heap::NumberFromDouble(OS::LocalTimeOffset());
+ return isolate->heap()->NumberFromDouble(OS::LocalTimeOffset());
}
-static MaybeObject* Runtime_DateDaylightSavingsOffset(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateDaylightSavingsOffset) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::NumberFromDouble(OS::DaylightSavingsOffset(x));
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ return isolate->heap()->NumberFromDouble(OS::DaylightSavingsOffset(x));
}
-static MaybeObject* Runtime_GlobalReceiver(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
ASSERT(args.length() == 1);
Object* global = args[0];
- if (!global->IsJSGlobalObject()) return Heap::null_value();
+ if (!global->IsJSGlobalObject()) return isolate->heap()->null_value();
return JSGlobalObject::cast(global)->global_receiver();
}
-static MaybeObject* Runtime_ParseJson(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
+ HandleScope scope(isolate);
ASSERT_EQ(1, args.length());
CONVERT_ARG_CHECKED(String, source, 0);
- Handle<Object> result = JsonParser::Parse(source);
+ source = Handle<String>(source->TryFlattenGetString());
+ // Optimized fast case where we only have ascii characters.
+ Handle<Object> result;
+ if (source->IsSeqAsciiString()) {
+ result = JsonParser<true>::Parse(source);
+ } else {
+ result = JsonParser<false>::Parse(source);
+ }
if (result.is_null()) {
// Syntax error or stack overflow in scanner.
- ASSERT(Top::has_pending_exception());
+ ASSERT(isolate->has_pending_exception());
return Failure::Exception();
}
return *result;
}
-static MaybeObject* Runtime_CompileString(Arguments args) {
- HandleScope scope;
+bool CodeGenerationFromStringsAllowed(Isolate* isolate,
+ Handle<Context> context) {
+ if (context->allow_code_gen_from_strings()->IsFalse()) {
+ // Check with callback if set.
+ AllowCodeGenerationFromStringsCallback callback =
+ isolate->allow_code_gen_callback();
+ if (callback == NULL) {
+ // No callback set and code generation disallowed.
+ return false;
+ } else {
+ // Callback set. Let it decide if code generation is allowed.
+ VMState state(isolate, EXTERNAL);
+ return callback(v8::Utils::ToLocal(context));
+ }
+ }
+ return true;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
+ HandleScope scope(isolate);
ASSERT_EQ(1, args.length());
CONVERT_ARG_CHECKED(String, source, 0);
+ // Extract global context.
+ Handle<Context> context(isolate->context()->global_context());
+
+ // Check if global context allows code generation from
+ // strings. Throw an exception if it doesn't.
+ if (!CodeGenerationFromStringsAllowed(isolate, context)) {
+ return isolate->Throw(*isolate->factory()->NewError(
+ "code_gen_from_strings", HandleVector<Object>(NULL, 0)));
+ }
+
// Compile source string in the global context.
- Handle<Context> context(Top::context()->global_context());
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
context,
true,
kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
- Factory::NewFunctionFromSharedFunctionInfo(shared, context, NOT_TENURED);
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
+ context,
+ NOT_TENURED);
return *fun;
}
-static ObjectPair CompileGlobalEval(Handle<String> source,
+static ObjectPair CompileGlobalEval(Isolate* isolate,
+ Handle<String> source,
Handle<Object> receiver,
- StrictModeFlag mode) {
+ StrictModeFlag strict_mode) {
+ Handle<Context> context = Handle<Context>(isolate->context());
+ Handle<Context> global_context = Handle<Context>(context->global_context());
+
+ // Check if global context allows code generation from
+ // strings. Throw an exception if it doesn't.
+ if (!CodeGenerationFromStringsAllowed(isolate, global_context)) {
+ isolate->Throw(*isolate->factory()->NewError(
+ "code_gen_from_strings", HandleVector<Object>(NULL, 0)));
+ return MakePair(Failure::Exception(), NULL);
+ }
+
// Deal with a normal eval call with a string argument. Compile it
// and return the compiled function bound in the local context.
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source,
- Handle<Context>(Top::context()),
- Top::context()->IsGlobalContext(),
- mode);
+ Handle<Context>(isolate->context()),
+ context->IsGlobalContext(),
+ strict_mode);
if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
- Handle<JSFunction> compiled = Factory::NewFunctionFromSharedFunctionInfo(
- shared,
- Handle<Context>(Top::context()),
- NOT_TENURED);
+ Handle<JSFunction> compiled =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, context, NOT_TENURED);
return MakePair(*compiled, *receiver);
}
-static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
+RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
ASSERT(args.length() == 4);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Object> callee = args.at<Object>(0);
Handle<Object> receiver; // Will be overwritten.
// Compute the calling context.
- Handle<Context> context = Handle<Context>(Top::context());
+ Handle<Context> context = Handle<Context>(isolate->context(), isolate);
#ifdef DEBUG
- // Make sure Top::context() agrees with the old code that traversed
+ // Make sure Isolate::context() agrees with the old code that traversed
// the stack frames to compute the context.
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
@@ -7941,93 +8790,91 @@ static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
int index = -1;
PropertyAttributes attributes = ABSENT;
while (true) {
- receiver = context->Lookup(Factory::eval_symbol(), FOLLOW_PROTOTYPE_CHAIN,
+ receiver = context->Lookup(isolate->factory()->eval_symbol(),
+ FOLLOW_PROTOTYPE_CHAIN,
&index, &attributes);
// Stop search when eval is found or when the global context is
// reached.
if (attributes != ABSENT || context->IsGlobalContext()) break;
- if (context->is_function_context()) {
- context = Handle<Context>(Context::cast(context->closure()->context()));
- } else {
- context = Handle<Context>(context->previous());
- }
+ context = Handle<Context>(context->previous(), isolate);
}
// If eval could not be resolved, it has been deleted and we need to
// throw a reference error.
if (attributes == ABSENT) {
- Handle<Object> name = Factory::eval_symbol();
+ Handle<Object> name = isolate->factory()->eval_symbol();
Handle<Object> reference_error =
- Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
- return MakePair(Top::Throw(*reference_error), NULL);
+ isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1));
+ return MakePair(isolate->Throw(*reference_error), NULL);
}
if (!context->IsGlobalContext()) {
// 'eval' is not bound in the global context. Just call the function
// with the given arguments. This is not necessarily the global eval.
- if (receiver->IsContext()) {
- context = Handle<Context>::cast(receiver);
- receiver = Handle<Object>(context->get(index));
- } else if (receiver->IsJSContextExtensionObject()) {
- receiver = Handle<JSObject>(Top::context()->global()->global_receiver());
+ if (receiver->IsContext() || receiver->IsJSContextExtensionObject()) {
+ receiver = isolate->factory()->the_hole_value();
}
return MakePair(*callee, *receiver);
}
// 'eval' is bound in the global context, but it may have been overwritten.
// Compare it to the builtin 'GlobalEval' function to make sure.
- if (*callee != Top::global_context()->global_eval_fun() ||
+ if (*callee != isolate->global_context()->global_eval_fun() ||
!args[1]->IsString()) {
- return MakePair(*callee, Top::context()->global()->global_receiver());
+ return MakePair(*callee, isolate->heap()->the_hole_value());
}
ASSERT(args[3]->IsSmi());
- return CompileGlobalEval(args.at<String>(1),
+ return CompileGlobalEval(isolate,
+ args.at<String>(1),
args.at<Object>(2),
- static_cast<StrictModeFlag>(
- Smi::cast(args[3])->value()));
+ static_cast<StrictModeFlag>(args.smi_at(3)));
}
-static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(Arguments args) {
+RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEvalNoLookup) {
ASSERT(args.length() == 4);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Object> callee = args.at<Object>(0);
// 'eval' is bound in the global context, but it may have been overwritten.
// Compare it to the builtin 'GlobalEval' function to make sure.
- if (*callee != Top::global_context()->global_eval_fun() ||
+ if (*callee != isolate->global_context()->global_eval_fun() ||
!args[1]->IsString()) {
- return MakePair(*callee, Top::context()->global()->global_receiver());
+ return MakePair(*callee, isolate->heap()->the_hole_value());
}
ASSERT(args[3]->IsSmi());
- return CompileGlobalEval(args.at<String>(1),
+ return CompileGlobalEval(isolate,
+ args.at<String>(1),
args.at<Object>(2),
- static_cast<StrictModeFlag>(
- Smi::cast(args[3])->value()));
+ static_cast<StrictModeFlag>(args.smi_at(3)));
}
-static MaybeObject* Runtime_SetNewFunctionAttributes(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNewFunctionAttributes) {
// This utility adjusts the property attributes for newly created Function
// object ("new Function(...)") by changing the map.
// All it does is changing the prototype property to enumerable
// as specified in ECMA262, 15.3.5.2.
- HandleScope scope;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, func, 0);
- ASSERT(func->map()->instance_type() ==
- Top::function_instance_map()->instance_type());
- ASSERT(func->map()->instance_size() ==
- Top::function_instance_map()->instance_size());
- func->set_map(*Top::function_instance_map());
+
+ Handle<Map> map = func->shared()->strict_mode()
+ ? isolate->strict_mode_function_instance_map()
+ : isolate->function_instance_map();
+
+ ASSERT(func->map()->instance_type() == map->instance_type());
+ ASSERT(func->map()->instance_size() == map->instance_size());
+ func->set_map(*map);
return *func;
}
-static MaybeObject* Runtime_AllocateInNewSpace(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
// Allocate a block of memory in NewSpace (filled with a filler).
// Use as fallback for allocation in generated code when NewSpace
// is full.
@@ -8036,13 +8883,13 @@ static MaybeObject* Runtime_AllocateInNewSpace(Arguments args) {
int size = size_smi->value();
RUNTIME_ASSERT(IsAligned(size, kPointerSize));
RUNTIME_ASSERT(size > 0);
- static const int kMinFreeNewSpaceAfterGC =
- Heap::InitialSemiSpaceSize() * 3/4;
+ Heap* heap = isolate->heap();
+ const int kMinFreeNewSpaceAfterGC = heap->InitialSemiSpaceSize() * 3/4;
RUNTIME_ASSERT(size <= kMinFreeNewSpaceAfterGC);
Object* allocation;
- { MaybeObject* maybe_allocation = Heap::new_space()->AllocateRaw(size);
+ { MaybeObject* maybe_allocation = heap->new_space()->AllocateRaw(size);
if (maybe_allocation->ToObject(&allocation)) {
- Heap::CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size);
+ heap->CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size);
}
return maybe_allocation;
}
@@ -8052,7 +8899,7 @@ static MaybeObject* Runtime_AllocateInNewSpace(Arguments args) {
// Push an object unto an array of objects if it is not already in the
// array. Returns true if the element was pushed on the stack and
// false otherwise.
-static MaybeObject* Runtime_PushIfAbsent(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, array, args[0]);
CONVERT_CHECKED(JSObject, element, args[1]);
@@ -8060,13 +8907,15 @@ static MaybeObject* Runtime_PushIfAbsent(Arguments args) {
int length = Smi::cast(array->length())->value();
FixedArray* elements = FixedArray::cast(array->elements());
for (int i = 0; i < length; i++) {
- if (elements->get(i) == element) return Heap::false_value();
+ if (elements->get(i) == element) return isolate->heap()->false_value();
}
Object* obj;
- { MaybeObject* maybe_obj = array->SetFastElement(length, element);
+ // Strict not needed. Used for cycle detection in Array join implementation.
+ { MaybeObject* maybe_obj =
+ array->SetFastElement(length, element, kNonStrictMode, true);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- return Heap::true_value();
+ return isolate->heap()->true_value();
}
@@ -8083,9 +8932,12 @@ static MaybeObject* Runtime_PushIfAbsent(Arguments args) {
*/
class ArrayConcatVisitor {
public:
- ArrayConcatVisitor(Handle<FixedArray> storage,
+ ArrayConcatVisitor(Isolate* isolate,
+ Handle<FixedArray> storage,
bool fast_elements) :
- storage_(Handle<FixedArray>::cast(GlobalHandles::Create(*storage))),
+ isolate_(isolate),
+ storage_(Handle<FixedArray>::cast(
+ isolate->global_handles()->Create(*storage))),
index_offset_(0u),
fast_elements_(fast_elements) { }
@@ -8112,7 +8964,7 @@ class ArrayConcatVisitor {
ASSERT(!fast_elements_);
Handle<NumberDictionary> dict(NumberDictionary::cast(*storage_));
Handle<NumberDictionary> result =
- Factory::DictionaryAtNumberPut(dict, index, elm);
+ isolate_->factory()->DictionaryAtNumberPut(dict, index, elm);
if (!result.is_identical_to(dict)) {
// Dictionary needed to grow.
clear_storage();
@@ -8129,14 +8981,14 @@ class ArrayConcatVisitor {
}
Handle<JSArray> ToArray() {
- Handle<JSArray> array = Factory::NewJSArray(0);
+ Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
Handle<Object> length =
- Factory::NewNumber(static_cast<double>(index_offset_));
+ isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
Handle<Map> map;
if (fast_elements_) {
- map = Factory::GetFastElementsMap(Handle<Map>(array->map()));
+ map = isolate_->factory()->GetFastElementsMap(Handle<Map>(array->map()));
} else {
- map = Factory::GetSlowElementsMap(Handle<Map>(array->map()));
+ map = isolate_->factory()->GetSlowElementsMap(Handle<Map>(array->map()));
}
array->set_map(*map);
array->set_length(*length);
@@ -8150,14 +9002,14 @@ class ArrayConcatVisitor {
ASSERT(fast_elements_);
Handle<FixedArray> current_storage(*storage_);
Handle<NumberDictionary> slow_storage(
- Factory::NewNumberDictionary(current_storage->length()));
+ isolate_->factory()->NewNumberDictionary(current_storage->length()));
uint32_t current_length = static_cast<uint32_t>(current_storage->length());
for (uint32_t i = 0; i < current_length; i++) {
HandleScope loop_scope;
Handle<Object> element(current_storage->get(i));
if (!element->IsTheHole()) {
Handle<NumberDictionary> new_storage =
- Factory::DictionaryAtNumberPut(slow_storage, i, element);
+ isolate_->factory()->DictionaryAtNumberPut(slow_storage, i, element);
if (!new_storage.is_identical_to(slow_storage)) {
slow_storage = loop_scope.CloseAndEscape(new_storage);
}
@@ -8169,13 +9021,16 @@ class ArrayConcatVisitor {
}
inline void clear_storage() {
- GlobalHandles::Destroy(Handle<Object>::cast(storage_).location());
+ isolate_->global_handles()->Destroy(
+ Handle<Object>::cast(storage_).location());
}
inline void set_storage(FixedArray* storage) {
- storage_ = Handle<FixedArray>::cast(GlobalHandles::Create(storage));
+ storage_ = Handle<FixedArray>::cast(
+ isolate_->global_handles()->Create(storage));
}
+ Isolate* isolate_;
Handle<FixedArray> storage_; // Always a global handle.
// Index after last seen index. Always less than or equal to
// JSObject::kMaxElementCount.
@@ -8223,7 +9078,8 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
template<class ExternalArrayClass, class ElementType>
-static void IterateExternalArrayElements(Handle<JSObject> receiver,
+static void IterateExternalArrayElements(Isolate* isolate,
+ Handle<JSObject> receiver,
bool elements_are_ints,
bool elements_are_guaranteed_smis,
ArrayConcatVisitor* visitor) {
@@ -8248,15 +9104,15 @@ static void IterateExternalArrayElements(Handle<JSObject> receiver,
visitor->visit(j, e);
} else {
Handle<Object> e =
- Factory::NewNumber(static_cast<ElementType>(val));
+ isolate->factory()->NewNumber(static_cast<ElementType>(val));
visitor->visit(j, e);
}
}
}
} else {
for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope;
- Handle<Object> e = Factory::NewNumber(array->get(j));
+ HandleScope loop_scope(isolate);
+ Handle<Object> e = isolate->factory()->NewNumber(array->get(j));
visitor->visit(j, e);
}
}
@@ -8306,44 +9162,49 @@ static void CollectElementIndices(Handle<JSObject> object,
default: {
int dense_elements_length;
switch (kind) {
- case JSObject::PIXEL_ELEMENTS: {
- dense_elements_length =
- PixelArray::cast(object->elements())->length();
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS: {
+ dense_elements_length =
+ ExternalPixelArray::cast(object->elements())->length();
break;
}
case JSObject::EXTERNAL_BYTE_ELEMENTS: {
- dense_elements_length =
- ExternalByteArray::cast(object->elements())->length();
+ dense_elements_length =
+ ExternalByteArray::cast(object->elements())->length();
break;
}
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedByteArray::cast(object->elements())->length();
+ dense_elements_length =
+ ExternalUnsignedByteArray::cast(object->elements())->length();
break;
}
case JSObject::EXTERNAL_SHORT_ELEMENTS: {
- dense_elements_length =
- ExternalShortArray::cast(object->elements())->length();
+ dense_elements_length =
+ ExternalShortArray::cast(object->elements())->length();
break;
}
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedShortArray::cast(object->elements())->length();
+ dense_elements_length =
+ ExternalUnsignedShortArray::cast(object->elements())->length();
break;
}
case JSObject::EXTERNAL_INT_ELEMENTS: {
- dense_elements_length =
- ExternalIntArray::cast(object->elements())->length();
+ dense_elements_length =
+ ExternalIntArray::cast(object->elements())->length();
break;
}
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- dense_elements_length =
- ExternalUnsignedIntArray::cast(object->elements())->length();
+ dense_elements_length =
+ ExternalUnsignedIntArray::cast(object->elements())->length();
break;
}
case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
- dense_elements_length =
- ExternalFloatArray::cast(object->elements())->length();
+ dense_elements_length =
+ ExternalFloatArray::cast(object->elements())->length();
+ break;
+ }
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS: {
+ dense_elements_length =
+ ExternalDoubleArray::cast(object->elements())->length();
break;
}
default:
@@ -8383,8 +9244,10 @@ static void CollectElementIndices(Handle<JSObject> object,
* with the element index and the element's value.
* Afterwards it increments the base-index of the visitor by the array
* length.
+ * Returns false if any access threw an exception, otherwise true.
*/
-static void IterateElements(Handle<JSArray> receiver,
+static bool IterateElements(Isolate* isolate,
+ Handle<JSArray> receiver,
ArrayConcatVisitor* visitor) {
uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
switch (receiver->GetElementsKind()) {
@@ -8395,14 +9258,15 @@ static void IterateElements(Handle<JSArray> receiver,
int fast_length = static_cast<int>(length);
ASSERT(fast_length <= elements->length());
for (int j = 0; j < fast_length; j++) {
- HandleScope loop_scope;
- Handle<Object> element_value(elements->get(j));
+ HandleScope loop_scope(isolate);
+ Handle<Object> element_value(elements->get(j), isolate);
if (!element_value->IsTheHole()) {
visitor->visit(j, element_value);
} else if (receiver->HasElement(j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
element_value = GetElement(receiver, j);
+ if (element_value.is_null()) return false;
visitor->visit(j, element_value);
}
}
@@ -8421,6 +9285,7 @@ static void IterateElements(Handle<JSArray> receiver,
HandleScope loop_scope;
uint32_t index = indices[j];
Handle<Object> element = GetElement(receiver, index);
+ if (element.is_null()) return false;
visitor->visit(index, element);
// Skip to next different index (i.e., omit duplicates).
do {
@@ -8429,8 +9294,9 @@ static void IterateElements(Handle<JSArray> receiver,
}
break;
}
- case JSObject::PIXEL_ELEMENTS: {
- Handle<PixelArray> pixels(PixelArray::cast(receiver->elements()));
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS: {
+ Handle<ExternalPixelArray> pixels(ExternalPixelArray::cast(
+ receiver->elements()));
for (uint32_t j = 0; j < length; j++) {
Handle<Smi> e(Smi::FromInt(pixels->get(j)));
visitor->visit(j, e);
@@ -8439,37 +9305,42 @@ static void IterateElements(Handle<JSArray> receiver,
}
case JSObject::EXTERNAL_BYTE_ELEMENTS: {
IterateExternalArrayElements<ExternalByteArray, int8_t>(
- receiver, true, true, visitor);
+ isolate, receiver, true, true, visitor);
break;
}
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>(
- receiver, true, true, visitor);
+ isolate, receiver, true, true, visitor);
break;
}
case JSObject::EXTERNAL_SHORT_ELEMENTS: {
IterateExternalArrayElements<ExternalShortArray, int16_t>(
- receiver, true, true, visitor);
+ isolate, receiver, true, true, visitor);
break;
}
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>(
- receiver, true, true, visitor);
+ isolate, receiver, true, true, visitor);
break;
}
case JSObject::EXTERNAL_INT_ELEMENTS: {
IterateExternalArrayElements<ExternalIntArray, int32_t>(
- receiver, true, false, visitor);
+ isolate, receiver, true, false, visitor);
break;
}
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>(
- receiver, true, false, visitor);
+ isolate, receiver, true, false, visitor);
break;
}
case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
IterateExternalArrayElements<ExternalFloatArray, float>(
- receiver, false, false, visitor);
+ isolate, receiver, false, false, visitor);
+ break;
+ }
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS: {
+ IterateExternalArrayElements<ExternalDoubleArray, double>(
+ isolate, receiver, false, false, visitor);
break;
}
default:
@@ -8477,6 +9348,7 @@ static void IterateElements(Handle<JSArray> receiver,
break;
}
visitor->increase_index_offset(length);
+ return true;
}
@@ -8486,9 +9358,9 @@ static void IterateElements(Handle<JSArray> receiver,
* TODO(581): Fix non-compliance for very large concatenations and update to
* following the ECMAScript 5 specification.
*/
-static MaybeObject* Runtime_ArrayConcat(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
ASSERT(args.length() == 1);
- HandleScope handle_scope;
+ HandleScope handle_scope(isolate);
CONVERT_ARG_CHECKED(JSArray, arguments, 0);
int argument_count = static_cast<int>(arguments->length()->Number());
@@ -8543,22 +9415,25 @@ static MaybeObject* Runtime_ArrayConcat(Arguments args) {
if (fast_case) {
// The backing storage array must have non-existing elements to
// preserve holes across concat operations.
- storage = Factory::NewFixedArrayWithHoles(estimate_result_length);
+ storage = isolate->factory()->NewFixedArrayWithHoles(
+ estimate_result_length);
} else {
// TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
uint32_t at_least_space_for = estimate_nof_elements +
(estimate_nof_elements >> 2);
storage = Handle<FixedArray>::cast(
- Factory::NewNumberDictionary(at_least_space_for));
+ isolate->factory()->NewNumberDictionary(at_least_space_for));
}
- ArrayConcatVisitor visitor(storage, fast_case);
+ ArrayConcatVisitor visitor(isolate, storage, fast_case);
for (int i = 0; i < argument_count; i++) {
Handle<Object> obj(elements->get(i));
if (obj->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(obj);
- IterateElements(array, &visitor);
+ if (!IterateElements(isolate, array, &visitor)) {
+ return Failure::Exception();
+ }
} else {
visitor.visit(0, obj);
visitor.increase_index_offset(1);
@@ -8571,7 +9446,7 @@ static MaybeObject* Runtime_ArrayConcat(Arguments args) {
// This will not allocate (flatten the string), but it may run
// very slowly for very deeply nested ConsStrings. For debugging use only.
-static MaybeObject* Runtime_GlobalPrint(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -8589,7 +9464,7 @@ static MaybeObject* Runtime_GlobalPrint(Arguments args) {
// and are followed by non-existing element. Does not change the length
// property.
// Returns the number of non-undefined elements collected.
-static MaybeObject* Runtime_RemoveArrayHoles(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSObject, object, args[0]);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
@@ -8598,14 +9473,14 @@ static MaybeObject* Runtime_RemoveArrayHoles(Arguments args) {
// Move contents of argument 0 (an array) to argument 1 (an array)
-static MaybeObject* Runtime_MoveArrayContents(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, from, args[0]);
CONVERT_CHECKED(JSArray, to, args[1]);
HeapObject* new_elements = from->elements();
MaybeObject* maybe_new_map;
- if (new_elements->map() == Heap::fixed_array_map() ||
- new_elements->map() == Heap::fixed_cow_array_map()) {
+ if (new_elements->map() == isolate->heap()->fixed_array_map() ||
+ new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
maybe_new_map = to->map()->GetFastElementsMap();
} else {
maybe_new_map = to->map()->GetSlowElementsMap();
@@ -8625,7 +9500,7 @@ static MaybeObject* Runtime_MoveArrayContents(Arguments args) {
// How many elements does this object/array have?
-static MaybeObject* Runtime_EstimateNumberOfElements(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, object, args[0]);
HeapObject* elements = object->elements();
@@ -8639,8 +9514,8 @@ static MaybeObject* Runtime_EstimateNumberOfElements(Arguments args) {
}
-static MaybeObject* Runtime_SwapElements(Arguments args) {
- HandleScope handle_scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SwapElements) {
+ HandleScope handle_scope(isolate);
ASSERT_EQ(3, args.length());
@@ -8651,17 +9526,21 @@ static MaybeObject* Runtime_SwapElements(Arguments args) {
uint32_t index1, index2;
if (!key1->ToArrayIndex(&index1)
|| !key2->ToArrayIndex(&index2)) {
- return Top::ThrowIllegalOperation();
+ return isolate->ThrowIllegalOperation();
}
Handle<JSObject> jsobject = Handle<JSObject>::cast(object);
Handle<Object> tmp1 = GetElement(jsobject, index1);
+ RETURN_IF_EMPTY_HANDLE(isolate, tmp1);
Handle<Object> tmp2 = GetElement(jsobject, index2);
+ RETURN_IF_EMPTY_HANDLE(isolate, tmp2);
- SetElement(jsobject, index1, tmp2);
- SetElement(jsobject, index2, tmp1);
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ SetElement(jsobject, index1, tmp2, kStrictMode));
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ SetElement(jsobject, index2, tmp1, kStrictMode));
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
@@ -8670,9 +9549,9 @@ static MaybeObject* Runtime_SwapElements(Arguments args) {
// intervals (pair of a negative integer (-start-1) followed by a
// positive (length)) or undefined values.
// Intervals can span over some keys that are not in the object.
-static MaybeObject* Runtime_GetArrayKeys(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSObject, array, 0);
CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
if (array->elements()->IsDictionary()) {
@@ -8688,19 +9567,19 @@ static MaybeObject* Runtime_GetArrayKeys(Arguments args) {
keys->set_undefined(i);
}
}
- return *Factory::NewJSArrayWithElements(keys);
+ return *isolate->factory()->NewJSArrayWithElements(keys);
} else {
ASSERT(array->HasFastElements());
- Handle<FixedArray> single_interval = Factory::NewFixedArray(2);
+ Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
// -1 means start of array.
single_interval->set(0, Smi::FromInt(-1));
uint32_t actual_length =
static_cast<uint32_t>(FixedArray::cast(array->elements())->length());
uint32_t min_length = actual_length < length ? actual_length : length;
Handle<Object> length_object =
- Factory::NewNumber(static_cast<double>(min_length));
+ isolate->factory()->NewNumber(static_cast<double>(min_length));
single_interval->set(1, *length_object);
- return *Factory::NewJSArrayWithElements(single_interval);
+ return *isolate->factory()->NewJSArrayWithElements(single_interval);
}
}
@@ -8710,7 +9589,7 @@ static MaybeObject* Runtime_GetArrayKeys(Arguments args) {
// to the way accessors are implemented, it is set for both the getter
// and setter on the first call to DefineAccessor and ignored on
// subsequent calls.
-static MaybeObject* Runtime_DefineAccessor(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineAccessor) {
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
// Compute attributes.
PropertyAttributes attributes = NONE;
@@ -8730,7 +9609,7 @@ static MaybeObject* Runtime_DefineAccessor(Arguments args) {
}
-static MaybeObject* Runtime_LookupAccessor(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
ASSERT(args.length() == 3);
CONVERT_CHECKED(JSObject, obj, args[0]);
CONVERT_CHECKED(String, name, args[1]);
@@ -8740,7 +9619,7 @@ static MaybeObject* Runtime_LookupAccessor(Arguments args) {
#ifdef ENABLE_DEBUGGER_SUPPORT
-static MaybeObject* Runtime_DebugBreak(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) {
ASSERT(args.length() == 0);
return Execution::DebugBreakHelper();
}
@@ -8762,27 +9641,29 @@ static StackFrame::Id UnwrapFrameId(Smi* wrapped) {
// args[0]: debug event listener function to set or null or undefined for
// clearing the event listener function
// args[1]: object supplied during callback
-static MaybeObject* Runtime_SetDebugEventListener(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) {
ASSERT(args.length() == 2);
RUNTIME_ASSERT(args[0]->IsJSFunction() ||
args[0]->IsUndefined() ||
args[0]->IsNull());
Handle<Object> callback = args.at<Object>(0);
Handle<Object> data = args.at<Object>(1);
- Debugger::SetEventListener(callback, data);
+ isolate->debugger()->SetEventListener(callback, data);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_Break(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) {
ASSERT(args.length() == 0);
- StackGuard::DebugBreak();
- return Heap::undefined_value();
+ isolate->stack_guard()->DebugBreak();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* DebugLookupResultValue(Object* receiver, String* name,
+static MaybeObject* DebugLookupResultValue(Heap* heap,
+ Object* receiver,
+ String* name,
LookupResult* result,
bool* caught_exception) {
Object* value;
@@ -8790,7 +9671,7 @@ static MaybeObject* DebugLookupResultValue(Object* receiver, String* name,
case NORMAL:
value = result->holder()->GetNormalizedProperty(result);
if (value->IsTheHole()) {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
return value;
case FIELD:
@@ -8798,21 +9679,21 @@ static MaybeObject* DebugLookupResultValue(Object* receiver, String* name,
JSObject::cast(
result->holder())->FastPropertyAt(result->GetFieldIndex());
if (value->IsTheHole()) {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
return value;
case CONSTANT_FUNCTION:
return result->GetConstantFunction();
case CALLBACKS: {
Object* structure = result->GetCallbackObject();
- if (structure->IsProxy() || structure->IsAccessorInfo()) {
+ if (structure->IsForeign() || structure->IsAccessorInfo()) {
MaybeObject* maybe_value = receiver->GetPropertyWithCallback(
receiver, structure, name, result->holder());
if (!maybe_value->ToObject(&value)) {
if (maybe_value->IsRetryAfterGC()) return maybe_value;
ASSERT(maybe_value->IsException());
- maybe_value = Top::pending_exception();
- Top::clear_pending_exception();
+ maybe_value = heap->isolate()->pending_exception();
+ heap->isolate()->clear_pending_exception();
if (caught_exception != NULL) {
*caught_exception = true;
}
@@ -8820,19 +9701,20 @@ static MaybeObject* DebugLookupResultValue(Object* receiver, String* name,
}
return value;
} else {
- return Heap::undefined_value();
+ return heap->undefined_value();
}
}
case INTERCEPTOR:
case MAP_TRANSITION:
+ case EXTERNAL_ARRAY_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
- return Heap::undefined_value();
+ return heap->undefined_value();
default:
UNREACHABLE();
}
UNREACHABLE();
- return Heap::undefined_value();
+ return heap->undefined_value();
}
@@ -8848,8 +9730,8 @@ static MaybeObject* DebugLookupResultValue(Object* receiver, String* name,
// 4: Setter function if defined
// Items 2-4 are only filled if the property has either a getter or a setter
// defined through __defineGetter__ and/or __defineSetter__.
-static MaybeObject* Runtime_DebugGetPropertyDetails(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -8862,9 +9744,9 @@ static MaybeObject* Runtime_DebugGetPropertyDetails(Arguments args) {
// into the embedding application can occour, and the embedding application
// could have the assumption that its own global context is the current
// context and not some internal debugger context.
- SaveContext save;
- if (Debug::InDebugger()) {
- Top::set_context(*Debug::debugger_entry()->GetContext());
+ SaveContext save(isolate);
+ if (isolate->debug()->InDebugger()) {
+ isolate->set_context(*isolate->debug()->debugger_entry()->GetContext());
}
// Skip the global proxy as it has no properties and always delegates to the
@@ -8878,17 +9760,17 @@ static MaybeObject* Runtime_DebugGetPropertyDetails(Arguments args) {
// if so.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- Handle<FixedArray> details = Factory::NewFixedArray(2);
+ Handle<FixedArray> details = isolate->factory()->NewFixedArray(2);
Object* element_or_char;
{ MaybeObject* maybe_element_or_char =
- Runtime::GetElementOrCharAt(obj, index);
+ Runtime::GetElementOrCharAt(isolate, obj, index);
if (!maybe_element_or_char->ToObject(&element_or_char)) {
return maybe_element_or_char;
}
}
details->set(0, element_or_char);
details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
- return *Factory::NewJSArrayWithElements(details);
+ return *isolate->factory()->NewJSArrayWithElements(details);
}
// Find the number of objects making up this.
@@ -8906,7 +9788,8 @@ static MaybeObject* Runtime_DebugGetPropertyDetails(Arguments args) {
PropertyType result_type = result.type();
Handle<Object> result_callback_obj;
if (result_type == CALLBACKS) {
- result_callback_obj = Handle<Object>(result.GetCallbackObject());
+ result_callback_obj = Handle<Object>(result.GetCallbackObject(),
+ isolate);
}
Smi* property_details = result.GetPropertyDetails().AsSmi();
// DebugLookupResultValue can cause GC so details from LookupResult needs
@@ -8914,40 +9797,41 @@ static MaybeObject* Runtime_DebugGetPropertyDetails(Arguments args) {
bool caught_exception = false;
Object* raw_value;
{ MaybeObject* maybe_raw_value =
- DebugLookupResultValue(*obj, *name, &result, &caught_exception);
+ DebugLookupResultValue(isolate->heap(), *obj, *name,
+ &result, &caught_exception);
if (!maybe_raw_value->ToObject(&raw_value)) return maybe_raw_value;
}
- Handle<Object> value(raw_value);
+ Handle<Object> value(raw_value, isolate);
// If the callback object is a fixed array then it contains JavaScript
// getter and/or setter.
bool hasJavaScriptAccessors = result_type == CALLBACKS &&
result_callback_obj->IsFixedArray();
Handle<FixedArray> details =
- Factory::NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
+ isolate->factory()->NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
details->set(0, *value);
details->set(1, property_details);
if (hasJavaScriptAccessors) {
details->set(2,
- caught_exception ? Heap::true_value()
- : Heap::false_value());
+ caught_exception ? isolate->heap()->true_value()
+ : isolate->heap()->false_value());
details->set(3, FixedArray::cast(*result_callback_obj)->get(0));
details->set(4, FixedArray::cast(*result_callback_obj)->get(1));
}
- return *Factory::NewJSArrayWithElements(details);
+ return *isolate->factory()->NewJSArrayWithElements(details);
}
if (i < length - 1) {
jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
}
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_DebugGetProperty(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -8957,15 +9841,15 @@ static MaybeObject* Runtime_DebugGetProperty(Arguments args) {
LookupResult result;
obj->Lookup(*name, &result);
if (result.IsProperty()) {
- return DebugLookupResultValue(*obj, *name, &result, NULL);
+ return DebugLookupResultValue(isolate->heap(), *obj, *name, &result, NULL);
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Return the property type calculated from the property details.
// args[0]: smi with property details.
-static MaybeObject* Runtime_DebugPropertyTypeFromDetails(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(Smi, details, args[0]);
PropertyType type = PropertyDetails(details).type();
@@ -8975,7 +9859,7 @@ static MaybeObject* Runtime_DebugPropertyTypeFromDetails(Arguments args) {
// Return the property attribute calculated from the property details.
// args[0]: smi with property details.
-static MaybeObject* Runtime_DebugPropertyAttributesFromDetails(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(Smi, details, args[0]);
PropertyAttributes attributes = PropertyDetails(details).attributes();
@@ -8985,7 +9869,7 @@ static MaybeObject* Runtime_DebugPropertyAttributesFromDetails(Arguments args) {
// Return the property insertion index calculated from the property details.
// args[0]: smi with property details.
-static MaybeObject* Runtime_DebugPropertyIndexFromDetails(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(Smi, details, args[0]);
int index = PropertyDetails(details).index();
@@ -8996,8 +9880,8 @@ static MaybeObject* Runtime_DebugPropertyIndexFromDetails(Arguments args) {
// Return property value from named interceptor.
// args[0]: object
// args[1]: property name
-static MaybeObject* Runtime_DebugNamedInterceptorPropertyValue(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
RUNTIME_ASSERT(obj->HasNamedInterceptor());
@@ -9011,9 +9895,8 @@ static MaybeObject* Runtime_DebugNamedInterceptorPropertyValue(Arguments args) {
// Return element value from indexed interceptor.
// args[0]: object
// args[1]: index
-static MaybeObject* Runtime_DebugIndexedInterceptorElementValue(
- Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
RUNTIME_ASSERT(obj->HasIndexedInterceptor());
@@ -9023,36 +9906,39 @@ static MaybeObject* Runtime_DebugIndexedInterceptorElementValue(
}
-static MaybeObject* Runtime_CheckExecutionState(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckExecutionState) {
ASSERT(args.length() >= 1);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
// Check that the break id is valid.
- if (Debug::break_id() == 0 || break_id != Debug::break_id()) {
- return Top::Throw(Heap::illegal_execution_state_symbol());
+ if (isolate->debug()->break_id() == 0 ||
+ break_id != isolate->debug()->break_id()) {
+ return isolate->Throw(
+ isolate->heap()->illegal_execution_state_symbol());
}
- return Heap::true_value();
+ return isolate->heap()->true_value();
}
-static MaybeObject* Runtime_GetFrameCount(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameCount) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
// Check arguments.
Object* result;
- { MaybeObject* maybe_result = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_result = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Count all frames which are relevant to debugging stack trace.
int n = 0;
- StackFrame::Id id = Debug::break_frame_id();
+ StackFrame::Id id = isolate->debug()->break_frame_id();
if (id == StackFrame::NO_ID) {
// If there is no JavaScript stack frame count is 0.
return Smi::FromInt(0);
}
- for (JavaScriptFrameIterator it(id); !it.done(); it.Advance()) n++;
+ for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) n++;
return Smi::FromInt(n);
}
@@ -9085,47 +9971,50 @@ static const int kFrameDetailsFirstDynamicIndex = 9;
// Arguments name, value
// Locals name, value
// Return value if any
-static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
// Check arguments.
Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
if (!maybe_check->ToObject(&check)) return maybe_check;
}
CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+ Heap* heap = isolate->heap();
// Find the relevant frame with the requested index.
- StackFrame::Id id = Debug::break_frame_id();
+ StackFrame::Id id = isolate->debug()->break_frame_id();
if (id == StackFrame::NO_ID) {
// If there are no JavaScript stack frames return undefined.
- return Heap::undefined_value();
+ return heap->undefined_value();
}
int count = 0;
- JavaScriptFrameIterator it(id);
+ JavaScriptFrameIterator it(isolate, id);
for (; !it.done(); it.Advance()) {
if (count == index) break;
count++;
}
- if (it.done()) return Heap::undefined_value();
+ if (it.done()) return heap->undefined_value();
bool is_optimized_frame =
- it.frame()->code()->kind() == Code::OPTIMIZED_FUNCTION;
+ it.frame()->LookupCode()->kind() == Code::OPTIMIZED_FUNCTION;
// Traverse the saved contexts chain to find the active context for the
// selected frame.
- SaveContext* save = Top::save_context();
+ SaveContext* save = isolate->save_context();
while (save != NULL && !save->below(it.frame())) {
save = save->prev();
}
ASSERT(save != NULL);
// Get the frame id.
- Handle<Object> frame_id(WrapFrameId(it.frame()->id()));
+ Handle<Object> frame_id(WrapFrameId(it.frame()->id()), isolate);
// Find source position.
- int position = it.frame()->code()->SourcePosition(it.frame()->pc());
+ int position =
+ it.frame()->LookupCode()->SourcePosition(it.frame()->pc());
// Check for constructor frame.
bool constructor = it.frame()->IsConstructor();
@@ -9135,41 +10024,38 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
ScopeInfo<> info(*scope_info);
- // Get the context.
- Handle<Context> context(Context::cast(it.frame()->context()));
-
// Get the locals names and values into a temporary array.
//
// TODO(1240907): Hide compiler-introduced stack variables
// (e.g. .result)? For users of the debugger, they will probably be
// confusing.
- Handle<FixedArray> locals = Factory::NewFixedArray(info.NumberOfLocals() * 2);
-
- // Fill in the names of the locals.
- for (int i = 0; i < info.NumberOfLocals(); i++) {
- locals->set(i * 2, *info.LocalName(i));
- }
+ Handle<FixedArray> locals =
+ isolate->factory()->NewFixedArray(info.NumberOfLocals() * 2);
// Fill in the values of the locals.
- for (int i = 0; i < info.NumberOfLocals(); i++) {
- if (is_optimized_frame) {
- // If we are inspecting an optimized frame use undefined as the
- // value for all locals.
- //
- // TODO(1140): We should be able to get the correct values
- // for locals in optimized frames.
- locals->set(i * 2 + 1, Heap::undefined_value());
- } else if (i < info.number_of_stack_slots()) {
- // Get the value from the stack.
+ if (is_optimized_frame) {
+ // If we are inspecting an optimized frame use undefined as the
+ // value for all locals.
+ //
+ // TODO(1140): We should be able to get the correct values
+ // for locals in optimized frames.
+ for (int i = 0; i < info.NumberOfLocals(); i++) {
+ locals->set(i * 2, *info.LocalName(i));
+ locals->set(i * 2 + 1, isolate->heap()->undefined_value());
+ }
+ } else {
+ int i = 0;
+ for (; i < info.number_of_stack_slots(); ++i) {
+ // Use the value from the stack.
+ locals->set(i * 2, *info.LocalName(i));
locals->set(i * 2 + 1, it.frame()->GetExpression(i));
- } else {
- // Traverse the context chain to the function context as all local
- // variables stored in the context will be on the function context.
+ }
+ // Get the context containing declarations.
+ Handle<Context> context(
+ Context::cast(it.frame()->context())->declaration_context());
+ for (; i < info.NumberOfLocals(); ++i) {
Handle<String> name = info.LocalName(i);
- while (!context->is_function_context()) {
- context = Handle<Context>(context->previous());
- }
- ASSERT(context->is_function_context());
+ locals->set(i * 2, *name);
locals->set(i * 2 + 1,
context->get(scope_info->ContextSlotIndex(*name, NULL)));
}
@@ -9179,14 +10065,14 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
// frame or if the frame is optimized it cannot be at a return.
bool at_return = false;
if (!is_optimized_frame && index == 0) {
- at_return = Debug::IsBreakAtReturn(it.frame());
+ at_return = isolate->debug()->IsBreakAtReturn(it.frame());
}
// If positioned just before return find the value to be returned and add it
// to the frame information.
- Handle<Object> return_value = Factory::undefined_value();
+ Handle<Object> return_value = isolate->factory()->undefined_value();
if (at_return) {
- StackFrameIterator it2;
+ StackFrameIterator it2(isolate);
Address internal_frame_sp = NULL;
while (!it2.done()) {
if (it2.frame()->is_internal()) {
@@ -9200,7 +10086,8 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
// entering the debug break exit frame.
if (internal_frame_sp != NULL) {
return_value =
- Handle<Object>(Memory::Object_at(internal_frame_sp));
+ Handle<Object>(Memory::Object_at(internal_frame_sp),
+ isolate);
break;
}
}
@@ -9230,7 +10117,7 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
int details_size = kFrameDetailsFirstDynamicIndex +
2 * (argument_count + info.NumberOfLocals()) +
(at_return ? 1 : 0);
- Handle<FixedArray> details = Factory::NewFixedArray(details_size);
+ Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
// Add the frame id.
details->set(kFrameDetailsFrameIdIndex, *frame_id);
@@ -9249,18 +10136,19 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
if (position != RelocInfo::kNoPosition) {
details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
} else {
- details->set(kFrameDetailsSourcePositionIndex, Heap::undefined_value());
+ details->set(kFrameDetailsSourcePositionIndex, heap->undefined_value());
}
// Add the constructor information.
- details->set(kFrameDetailsConstructCallIndex, Heap::ToBoolean(constructor));
+ details->set(kFrameDetailsConstructCallIndex, heap->ToBoolean(constructor));
// Add the at return information.
- details->set(kFrameDetailsAtReturnIndex, Heap::ToBoolean(at_return));
+ details->set(kFrameDetailsAtReturnIndex, heap->ToBoolean(at_return));
// Add information on whether this frame is invoked in the debugger context.
details->set(kFrameDetailsDebuggerFrameIndex,
- Heap::ToBoolean(*save->context() == *Debug::debug_context()));
+ heap->ToBoolean(*save->context() ==
+ *isolate->debug()->debug_context()));
// Fill the dynamic part.
int details_index = kFrameDetailsFirstDynamicIndex;
@@ -9271,7 +10159,7 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
if (i < info.number_of_parameters()) {
details->set(details_index++, *info.parameter_name(i));
} else {
- details->set(details_index++, Heap::undefined_value());
+ details->set(details_index++, heap->undefined_value());
}
// Parameter value. If we are inspecting an optimized frame, use
@@ -9283,7 +10171,7 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
(i < it.frame()->ComputeParametersCount())) {
details->set(details_index++, it.frame()->GetParameter(i));
} else {
- details->set(details_index++, Heap::undefined_value());
+ details->set(details_index++, heap->undefined_value());
}
}
@@ -9300,7 +10188,7 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
// Add the receiver (same as in function frame).
// THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
// THE FRAME ITERATOR TO WRAP THE RECEIVER.
- Handle<Object> receiver(it.frame()->receiver());
+ Handle<Object> receiver(it.frame()->receiver(), isolate);
if (!receiver->IsJSObject()) {
// If the receiver is NOT a JSObject we have hit an optimization
// where a value object is not converted into a wrapped JS objects.
@@ -9310,17 +10198,19 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
it.Advance();
Handle<Context> calling_frames_global_context(
Context::cast(Context::cast(it.frame()->context())->global_context()));
- receiver = Factory::ToObject(receiver, calling_frames_global_context);
+ receiver =
+ isolate->factory()->ToObject(receiver, calling_frames_global_context);
}
details->set(kFrameDetailsReceiverIndex, *receiver);
ASSERT_EQ(details_size, details_index);
- return *Factory::NewJSArrayWithElements(details);
+ return *isolate->factory()->NewJSArrayWithElements(details);
}
// Copy all the context locals into an object used to materialize a scope.
static bool CopyContextLocalsToScopeObject(
+ Isolate* isolate,
Handle<SerializedScopeInfo> serialized_scope_info,
ScopeInfo<>& scope_info,
Handle<Context> context,
@@ -9332,16 +10222,14 @@ static bool CopyContextLocalsToScopeObject(
int context_index = serialized_scope_info->ContextSlotIndex(
*scope_info.context_slot_name(i), NULL);
- // Don't include the arguments shadow (.arguments) context variable.
- if (*scope_info.context_slot_name(i) != Heap::arguments_shadow_symbol()) {
- RETURN_IF_EMPTY_HANDLE_VALUE(
- SetProperty(scope_object,
- scope_info.context_slot_name(i),
- Handle<Object>(context->get(context_index)),
- NONE,
- kNonStrictMode),
- false);
- }
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
+ SetProperty(scope_object,
+ scope_info.context_slot_name(i),
+ Handle<Object>(context->get(context_index), isolate),
+ NONE,
+ kNonStrictMode),
+ false);
}
return true;
@@ -9350,7 +10238,8 @@ static bool CopyContextLocalsToScopeObject(
// Create a plain JSObject which materializes the local scope for the specified
// frame.
-static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
+static Handle<JSObject> MaterializeLocalScope(Isolate* isolate,
+ JavaScriptFrame* frame) {
Handle<JSFunction> function(JSFunction::cast(frame->function()));
Handle<SharedFunctionInfo> shared(function->shared());
Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
@@ -9358,14 +10247,16 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
// Allocate and initialize a JSObject with all the arguments, stack locals
// heap locals and extension properties of the debugged function.
- Handle<JSObject> local_scope = Factory::NewJSObject(Top::object_function());
+ Handle<JSObject> local_scope =
+ isolate->factory()->NewJSObject(isolate->object_function());
// First fill all parameters.
for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
SetProperty(local_scope,
scope_info.parameter_name(i),
- Handle<Object>(frame->GetParameter(i)),
+ Handle<Object>(frame->GetParameter(i), isolate),
NONE,
kNonStrictMode),
Handle<JSObject>());
@@ -9374,9 +10265,10 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
// Second fill all stack locals.
for (int i = 0; i < scope_info.number_of_stack_slots(); i++) {
RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
SetProperty(local_scope,
scope_info.stack_slot_name(i),
- Handle<Object>(frame->GetExpression(i)),
+ Handle<Object>(frame->GetExpression(i), isolate),
NONE,
kNonStrictMode),
Handle<JSObject>());
@@ -9384,8 +10276,9 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->fcontext());
- if (!CopyContextLocalsToScopeObject(serialized_scope_info, scope_info,
+ Handle<Context> function_context(frame_context->declaration_context());
+ if (!CopyContextLocalsToScopeObject(isolate,
+ serialized_scope_info, scope_info,
function_context, local_scope)) {
return Handle<JSObject>();
}
@@ -9402,6 +10295,7 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
SetProperty(local_scope,
key,
GetProperty(ext, key),
@@ -9417,8 +10311,9 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
// Create a plain JSObject which materializes the closure content for the
// context.
-static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
- ASSERT(context->is_function_context());
+static Handle<JSObject> MaterializeClosure(Isolate* isolate,
+ Handle<Context> context) {
+ ASSERT(context->IsFunctionContext());
Handle<SharedFunctionInfo> shared(context->closure()->shared());
Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
@@ -9426,32 +10321,12 @@ static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
// Allocate and initialize a JSObject with all the content of theis function
// closure.
- Handle<JSObject> closure_scope = Factory::NewJSObject(Top::object_function());
-
- // Check whether the arguments shadow object exists.
- int arguments_shadow_index =
- shared->scope_info()->ContextSlotIndex(Heap::arguments_shadow_symbol(),
- NULL);
- if (arguments_shadow_index >= 0) {
- // In this case all the arguments are available in the arguments shadow
- // object.
- Handle<JSObject> arguments_shadow(
- JSObject::cast(context->get(arguments_shadow_index)));
- for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
- // We don't expect exception-throwing getters on the arguments shadow.
- Object* element = arguments_shadow->GetElement(i)->ToObjectUnchecked();
- RETURN_IF_EMPTY_HANDLE_VALUE(
- SetProperty(closure_scope,
- scope_info.parameter_name(i),
- Handle<Object>(element),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
- }
+ Handle<JSObject> closure_scope =
+ isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals to the context extension.
- if (!CopyContextLocalsToScopeObject(serialized_scope_info, scope_info,
+ if (!CopyContextLocalsToScopeObject(isolate,
+ serialized_scope_info, scope_info,
context, closure_scope)) {
return Handle<JSObject>();
}
@@ -9465,7 +10340,8 @@ static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
- RETURN_IF_EMPTY_HANDLE_VALUE(
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
SetProperty(closure_scope,
key,
GetProperty(ext, key),
@@ -9479,6 +10355,23 @@ static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
}
+// Create a plain JSObject which materializes the scope for the specified
+// catch context.
+static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
+ Handle<Context> context) {
+ ASSERT(context->IsCatchContext());
+ Handle<String> name(String::cast(context->extension()));
+ Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX));
+ Handle<JSObject> catch_scope =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
+ SetProperty(catch_scope, name, thrown_object, NONE, kNonStrictMode),
+ Handle<JSObject>());
+ return catch_scope;
+}
+
+
// Iterate over the actual scopes visible from a stack frame. All scopes are
// backed by an actual context except the local scope, which is inserted
// "artifically" in the context chain.
@@ -9489,15 +10382,12 @@ class ScopeIterator {
ScopeTypeLocal,
ScopeTypeWith,
ScopeTypeClosure,
- // Every catch block contains an implicit with block (its parameter is
- // a JSContextExtensionObject) that extends current scope with a variable
- // holding exception object. Such with blocks are treated as scopes of their
- // own type.
ScopeTypeCatch
};
- explicit ScopeIterator(JavaScriptFrame* frame)
- : frame_(frame),
+ ScopeIterator(Isolate* isolate, JavaScriptFrame* frame)
+ : isolate_(isolate),
+ frame_(frame),
function_(JSFunction::cast(frame->function())),
context_(Context::cast(frame->context())),
local_done_(false),
@@ -9510,9 +10400,13 @@ class ScopeIterator {
// Checking for the existence of .result seems fragile, but the scope info
// saved with the code object does not otherwise have that information.
int index = function_->shared()->scope_info()->
- StackSlotIndex(Heap::result_symbol());
+ StackSlotIndex(isolate_->heap()->result_symbol());
at_local_ = index < 0;
- } else if (context_->is_function_context()) {
+ } else if (context_->IsFunctionContext()) {
+ at_local_ = true;
+ } else if (context_->closure() != *function_) {
+ // The context_ is a with or catch block from the outer function.
+ ASSERT(context_->IsWithContext() || context_->IsCatchContext());
at_local_ = true;
}
}
@@ -9542,16 +10436,12 @@ class ScopeIterator {
}
// Move to the next context.
- if (context_->is_function_context()) {
- context_ = Handle<Context>(Context::cast(context_->closure()->context()));
- } else {
- context_ = Handle<Context>(context_->previous());
- }
+ context_ = Handle<Context>(context_->previous(), isolate_);
// If passing the local scope indicate that the current scope is now the
// local scope.
if (!local_done_ &&
- (context_->IsGlobalContext() || (context_->is_function_context()))) {
+ (context_->IsGlobalContext() || context_->IsFunctionContext())) {
at_local_ = true;
}
}
@@ -9565,18 +10455,13 @@ class ScopeIterator {
ASSERT(context_->global()->IsGlobalObject());
return ScopeTypeGlobal;
}
- if (context_->is_function_context()) {
+ if (context_->IsFunctionContext()) {
return ScopeTypeClosure;
}
- ASSERT(context_->has_extension());
- // Current scope is either an explicit with statement or a with statement
- // implicitely generated for a catch block.
- // If the extension object here is a JSContextExtensionObject then
- // current with statement is one frome a catch block otherwise it's a
- // regular with statement.
- if (context_->extension()->IsJSContextExtensionObject()) {
+ if (context_->IsCatchContext()) {
return ScopeTypeCatch;
}
+ ASSERT(context_->IsWithContext());
return ScopeTypeWith;
}
@@ -9585,20 +10470,17 @@ class ScopeIterator {
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
return Handle<JSObject>(CurrentContext()->global());
- break;
case ScopeIterator::ScopeTypeLocal:
// Materialize the content of the local scope into a JSObject.
- return MaterializeLocalScope(frame_);
- break;
+ return MaterializeLocalScope(isolate_, frame_);
case ScopeIterator::ScopeTypeWith:
- case ScopeIterator::ScopeTypeCatch:
// Return the with object.
- return Handle<JSObject>(CurrentContext()->extension());
- break;
+ return Handle<JSObject>(JSObject::cast(CurrentContext()->extension()));
+ case ScopeIterator::ScopeTypeCatch:
+ return MaterializeCatchScope(isolate_, CurrentContext());
case ScopeIterator::ScopeTypeClosure:
// Materialize the content of the closure scope into a JSObject.
- return MaterializeClosure(CurrentContext());
- break;
+ return MaterializeClosure(isolate_, CurrentContext());
}
UNREACHABLE();
return Handle<JSObject>();
@@ -9629,8 +10511,7 @@ class ScopeIterator {
if (!CurrentContext().is_null()) {
CurrentContext()->Print();
if (CurrentContext()->has_extension()) {
- Handle<JSObject> extension =
- Handle<JSObject>(CurrentContext()->extension());
+ Handle<Object> extension(CurrentContext()->extension());
if (extension->IsJSContextExtensionObject()) {
extension->Print();
}
@@ -9639,34 +10520,27 @@ class ScopeIterator {
break;
}
- case ScopeIterator::ScopeTypeWith: {
+ case ScopeIterator::ScopeTypeWith:
PrintF("With:\n");
- Handle<JSObject> extension =
- Handle<JSObject>(CurrentContext()->extension());
- extension->Print();
+ CurrentContext()->extension()->Print();
break;
- }
- case ScopeIterator::ScopeTypeCatch: {
+ case ScopeIterator::ScopeTypeCatch:
PrintF("Catch:\n");
- Handle<JSObject> extension =
- Handle<JSObject>(CurrentContext()->extension());
- extension->Print();
+ CurrentContext()->extension()->Print();
+ CurrentContext()->get(Context::THROWN_OBJECT_INDEX)->Print();
break;
- }
- case ScopeIterator::ScopeTypeClosure: {
+ case ScopeIterator::ScopeTypeClosure:
PrintF("Closure:\n");
CurrentContext()->Print();
if (CurrentContext()->has_extension()) {
- Handle<JSObject> extension =
- Handle<JSObject>(CurrentContext()->extension());
+ Handle<Object> extension(CurrentContext()->extension());
if (extension->IsJSContextExtensionObject()) {
extension->Print();
}
}
break;
- }
default:
UNREACHABLE();
@@ -9676,6 +10550,7 @@ class ScopeIterator {
#endif
private:
+ Isolate* isolate_;
JavaScriptFrame* frame_;
Handle<JSFunction> function_;
Handle<Context> context_;
@@ -9686,25 +10561,26 @@ class ScopeIterator {
};
-static MaybeObject* Runtime_GetScopeCount(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeCount) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
// Check arguments.
Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
if (!maybe_check->ToObject(&check)) return maybe_check;
}
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
// Get the frame where the debugging is performed.
StackFrame::Id id = UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator it(id);
+ JavaScriptFrameIterator it(isolate, id);
JavaScriptFrame* frame = it.frame();
// Count the visible scopes.
int n = 0;
- for (ScopeIterator it(frame); !it.Done(); it.Next()) {
+ for (ScopeIterator it(isolate, frame); !it.Done(); it.Next()) {
n++;
}
@@ -9724,13 +10600,14 @@ static const int kScopeDetailsSize = 2;
// The array returned contains the following information:
// 0: Scope type
// 1: Scope object
-static MaybeObject* Runtime_GetScopeDetails(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
// Check arguments.
Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
if (!maybe_check->ToObject(&check)) return maybe_check;
}
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
@@ -9738,62 +10615,64 @@ static MaybeObject* Runtime_GetScopeDetails(Arguments args) {
// Get the frame where the debugging is performed.
StackFrame::Id id = UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator frame_it(id);
+ JavaScriptFrameIterator frame_it(isolate, id);
JavaScriptFrame* frame = frame_it.frame();
// Find the requested scope.
int n = 0;
- ScopeIterator it(frame);
+ ScopeIterator it(isolate, frame);
for (; !it.Done() && n < index; it.Next()) {
n++;
}
if (it.Done()) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Calculate the size of the result.
int details_size = kScopeDetailsSize;
- Handle<FixedArray> details = Factory::NewFixedArray(details_size);
+ Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
// Fill in scope details.
details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type()));
Handle<JSObject> scope_object = it.ScopeObject();
- RETURN_IF_EMPTY_HANDLE(scope_object);
+ RETURN_IF_EMPTY_HANDLE(isolate, scope_object);
details->set(kScopeDetailsObjectIndex, *scope_object);
- return *Factory::NewJSArrayWithElements(details);
+ return *isolate->factory()->NewJSArrayWithElements(details);
}
-static MaybeObject* Runtime_DebugPrintScopes(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
#ifdef DEBUG
// Print the scopes for the top frame.
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- for (ScopeIterator it(frame); !it.Done(); it.Next()) {
+ for (ScopeIterator it(isolate, frame); !it.Done(); it.Next()) {
it.DebugPrint();
}
#endif
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_GetThreadCount(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadCount) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
// Check arguments.
Object* result;
- { MaybeObject* maybe_result = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_result = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Count all archived V8 threads.
int n = 0;
- for (ThreadState* thread = ThreadState::FirstInUse();
+ for (ThreadState* thread =
+ isolate->thread_manager()->FirstThreadStateInUse();
thread != NULL;
thread = thread->Next()) {
n++;
@@ -9815,70 +10694,76 @@ static const int kThreadDetailsSize = 2;
// The array returned contains the following information:
// 0: Is current thread?
// 1: Thread id
-static MaybeObject* Runtime_GetThreadDetails(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadDetails) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
// Check arguments.
Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
if (!maybe_check->ToObject(&check)) return maybe_check;
}
CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
// Allocate array for result.
- Handle<FixedArray> details = Factory::NewFixedArray(kThreadDetailsSize);
+ Handle<FixedArray> details =
+ isolate->factory()->NewFixedArray(kThreadDetailsSize);
// Thread index 0 is current thread.
if (index == 0) {
// Fill the details.
- details->set(kThreadDetailsCurrentThreadIndex, Heap::true_value());
+ details->set(kThreadDetailsCurrentThreadIndex,
+ isolate->heap()->true_value());
details->set(kThreadDetailsThreadIdIndex,
- Smi::FromInt(ThreadManager::CurrentId()));
+ Smi::FromInt(ThreadId::Current().ToInteger()));
} else {
// Find the thread with the requested index.
int n = 1;
- ThreadState* thread = ThreadState::FirstInUse();
+ ThreadState* thread =
+ isolate->thread_manager()->FirstThreadStateInUse();
while (index != n && thread != NULL) {
thread = thread->Next();
n++;
}
if (thread == NULL) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Fill the details.
- details->set(kThreadDetailsCurrentThreadIndex, Heap::false_value());
- details->set(kThreadDetailsThreadIdIndex, Smi::FromInt(thread->id()));
+ details->set(kThreadDetailsCurrentThreadIndex,
+ isolate->heap()->false_value());
+ details->set(kThreadDetailsThreadIdIndex,
+ Smi::FromInt(thread->id().ToInteger()));
}
// Convert to JS array and return.
- return *Factory::NewJSArrayWithElements(details);
+ return *isolate->factory()->NewJSArrayWithElements(details);
}
// Sets the disable break state
// args[0]: disable break state
-static MaybeObject* Runtime_SetDisableBreak(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDisableBreak) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_BOOLEAN_CHECKED(disable_break, args[0]);
- Debug::set_disable_break(disable_break);
- return Heap::undefined_value();
+ isolate->debug()->set_disable_break(disable_break);
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_GetBreakLocations(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
Handle<SharedFunctionInfo> shared(fun->shared());
// Find the number of break points
Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
- if (break_locations->IsUndefined()) return Heap::undefined_value();
+ if (break_locations->IsUndefined()) return isolate->heap()->undefined_value();
// Return array as JS array
- return *Factory::NewJSArrayWithElements(
+ return *isolate->factory()->NewJSArrayWithElements(
Handle<FixedArray>::cast(break_locations));
}
@@ -9887,8 +10772,8 @@ static MaybeObject* Runtime_GetBreakLocations(Arguments args) {
// args[0]: function
// args[1]: number: break source position (within the function source)
// args[2]: number: break point object
-static MaybeObject* Runtime_SetFunctionBreakPoint(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
Handle<SharedFunctionInfo> shared(fun->shared());
@@ -9897,13 +10782,15 @@ static MaybeObject* Runtime_SetFunctionBreakPoint(Arguments args) {
Handle<Object> break_point_object_arg = args.at<Object>(2);
// Set break point.
- Debug::SetBreakPoint(shared, break_point_object_arg, &source_position);
+ isolate->debug()->SetBreakPoint(shared, break_point_object_arg,
+ &source_position);
return Smi::FromInt(source_position);
}
-Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
+Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate,
+ Handle<Script> script,
int position) {
// Iterate the heap looking for SharedFunctionInfo generated from the
// script. The inner most SharedFunctionInfo containing the source position
@@ -9962,7 +10849,7 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
}
if (target.is_null()) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// If the candidate found is compiled we are done. NOTE: when lazy
@@ -9986,8 +10873,8 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
// args[0]: script to set break point in
// args[1]: number: break source position (within the script source)
// args[2]: number: break point object
-static MaybeObject* Runtime_SetScriptBreakPoint(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSValue, wrapper, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
@@ -9999,7 +10886,7 @@ static MaybeObject* Runtime_SetScriptBreakPoint(Arguments args) {
Handle<Script> script(Script::cast(wrapper->value()));
Object* result = Runtime::FindSharedFunctionInfoInScript(
- script, source_position);
+ isolate, script, source_position);
if (!result->IsUndefined()) {
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
// Find position within function. The script position might be before the
@@ -10010,33 +10897,33 @@ static MaybeObject* Runtime_SetScriptBreakPoint(Arguments args) {
} else {
position = source_position - shared->start_position();
}
- Debug::SetBreakPoint(shared, break_point_object_arg, &position);
+ isolate->debug()->SetBreakPoint(shared, break_point_object_arg, &position);
position += shared->start_position();
return Smi::FromInt(position);
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Clear a break point
// args[0]: number: break point object
-static MaybeObject* Runtime_ClearBreakPoint(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearBreakPoint) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
Handle<Object> break_point_object_arg = args.at<Object>(0);
// Clear break point.
- Debug::ClearBreakPoint(break_point_object_arg);
+ isolate->debug()->ClearBreakPoint(break_point_object_arg);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Change the state of break on exceptions.
// args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
// args[1]: Boolean indicating on/off.
-static MaybeObject* Runtime_ChangeBreakOnException(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ChangeBreakOnException) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
RUNTIME_ASSERT(args[0]->IsNumber());
CONVERT_BOOLEAN_CHECKED(enable, args[1]);
@@ -10046,21 +10933,21 @@ static MaybeObject* Runtime_ChangeBreakOnException(Arguments args) {
ExceptionBreakType type =
static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
// Update break point state.
- Debug::ChangeBreakOnException(type, enable);
- return Heap::undefined_value();
+ isolate->debug()->ChangeBreakOnException(type, enable);
+ return isolate->heap()->undefined_value();
}
// Returns the state of break on exceptions
// args[0]: boolean indicating uncaught exceptions
-static MaybeObject* Runtime_IsBreakOnException(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsBreakOnException) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(args[0]->IsNumber());
ExceptionBreakType type =
static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
- bool result = Debug::IsBreakOnException(type);
+ bool result = isolate->debug()->IsBreakOnException(type);
return Smi::FromInt(result);
}
@@ -10070,16 +10957,17 @@ static MaybeObject* Runtime_IsBreakOnException(Arguments args) {
// args[1]: step action from the enumeration StepAction
// args[2]: number of times to perform the step, for step out it is the number
// of frames to step down.
-static MaybeObject* Runtime_PrepareStep(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
// Check arguments.
Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
if (!maybe_check->ToObject(&check)) return maybe_check;
}
if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
// Get the step action and check validity.
@@ -10089,55 +10977,72 @@ static MaybeObject* Runtime_PrepareStep(Arguments args) {
step_action != StepOut &&
step_action != StepInMin &&
step_action != StepMin) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
// Get the number of steps.
int step_count = NumberToInt32(args[2]);
if (step_count < 1) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
// Clear all current stepping setup.
- Debug::ClearStepping();
+ isolate->debug()->ClearStepping();
// Prepare step.
- Debug::PrepareStep(static_cast<StepAction>(step_action), step_count);
- return Heap::undefined_value();
+ isolate->debug()->PrepareStep(static_cast<StepAction>(step_action),
+ step_count);
+ return isolate->heap()->undefined_value();
}
// Clear all stepping set by PrepareStep.
-static MaybeObject* Runtime_ClearStepping(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearStepping) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
- Debug::ClearStepping();
- return Heap::undefined_value();
+ isolate->debug()->ClearStepping();
+ return isolate->heap()->undefined_value();
}
// Creates a copy of the with context chain. The copy of the context chain is
// is linked to the function context supplied.
-static Handle<Context> CopyWithContextChain(Handle<Context> context_chain,
- Handle<Context> function_context) {
- // At the bottom of the chain. Return the function context to link to.
- if (context_chain->is_function_context()) {
- return function_context;
+static Handle<Context> CopyWithContextChain(Isolate* isolate,
+ Handle<JSFunction> function,
+ Handle<Context> current,
+ Handle<Context> base) {
+ // At the end of the chain. Return the base context to link to.
+ if (current->IsFunctionContext() || current->IsGlobalContext()) {
+ return base;
+ }
+
+ // Recursively copy the with and catch contexts.
+ HandleScope scope(isolate);
+ Handle<Context> previous(current->previous());
+ Handle<Context> new_previous =
+ CopyWithContextChain(isolate, function, previous, base);
+ Handle<Context> new_current;
+ if (current->IsCatchContext()) {
+ Handle<String> name(String::cast(current->extension()));
+ Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
+ new_current =
+ isolate->factory()->NewCatchContext(function,
+ new_previous,
+ name,
+ thrown_object);
+ } else {
+ Handle<JSObject> extension(JSObject::cast(current->extension()));
+ new_current =
+ isolate->factory()->NewWithContext(function, new_previous, extension);
}
-
- // Recursively copy the with contexts.
- Handle<Context> previous(context_chain->previous());
- Handle<JSObject> extension(JSObject::cast(context_chain->extension()));
- Handle<Context> context = CopyWithContextChain(function_context, previous);
- return Factory::NewWithContext(context,
- extension,
- context_chain->IsCatchContext());
+ return scope.CloseAndEscape(new_current);
}
// Helper function to find or create the arguments object for
// Runtime_DebugEvaluate.
-static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
+static Handle<Object> GetArgumentsObject(Isolate* isolate,
+ JavaScriptFrame* frame,
Handle<JSFunction> function,
Handle<SerializedScopeInfo> scope_info,
const ScopeInfo<>* sinfo,
@@ -10147,22 +11052,24 @@ static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
// does not support eval) then create an 'arguments' object.
int index;
if (sinfo->number_of_stack_slots() > 0) {
- index = scope_info->StackSlotIndex(Heap::arguments_symbol());
+ index = scope_info->StackSlotIndex(isolate->heap()->arguments_symbol());
if (index != -1) {
- return Handle<Object>(frame->GetExpression(index));
+ return Handle<Object>(frame->GetExpression(index), isolate);
}
}
if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
- index = scope_info->ContextSlotIndex(Heap::arguments_symbol(), NULL);
+ index = scope_info->ContextSlotIndex(isolate->heap()->arguments_symbol(),
+ NULL);
if (index != -1) {
- return Handle<Object>(function_context->get(index));
+ return Handle<Object>(function_context->get(index), isolate);
}
}
const int length = frame->ComputeParametersCount();
- Handle<JSObject> arguments = Factory::NewArgumentsObject(function, length);
- Handle<FixedArray> array = Factory::NewFixedArray(length);
+ Handle<JSObject> arguments =
+ isolate->factory()->NewArgumentsObject(function, length);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
AssertNoAllocation no_gc;
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
@@ -10174,6 +11081,10 @@ static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
}
+static const char kSourceStr[] =
+ "(function(arguments,__source__){return eval(__source__);})";
+
+
// Evaluate a piece of JavaScript in the context of a stack frame for
// debugging. This is accomplished by creating a new context which in its
// extension part has all the parameters and locals of the function on the
@@ -10185,14 +11096,15 @@ static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
// stack frame presenting the same view of the values of parameters and
// local variables as if the piece of JavaScript was evaluated at the point
// where the function on the stack frame is currently stopped.
-static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
+ HandleScope scope(isolate);
// Check the execution state and decode arguments frame and source to be
// evaluated.
ASSERT(args.length() == 5);
Object* check_result;
- { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
if (!maybe_check_result->ToObject(&check_result)) {
return maybe_check_result;
}
@@ -10207,7 +11119,7 @@ static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
// Get the frame where the debugging is performed.
StackFrame::Id id = UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator it(id);
+ JavaScriptFrameIterator it(isolate, id);
JavaScriptFrame* frame = it.frame();
Handle<JSFunction> function(JSFunction::cast(frame->function()));
Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
@@ -10215,13 +11127,13 @@ static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
// Traverse the saved contexts chain to find the active context for the
// selected frame.
- SaveContext* save = Top::save_context();
+ SaveContext* save = isolate->save_context();
while (save != NULL && !save->below(frame)) {
save = save->prev();
}
ASSERT(save != NULL);
- SaveContext savex;
- Top::set_context(*(save->context()));
+ SaveContext savex(isolate);
+ isolate->set_context(*(save->context()));
// Create the (empty) function replacing the function on the stack frame for
// the purpose of evaluating in the context created below. It is important
@@ -10230,7 +11142,8 @@ static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
// in Context::Lookup, where context slots for parameters and local variables
// are looked at before the extension object.
Handle<JSFunction> go_between =
- Factory::NewFunction(Factory::empty_string(), Factory::undefined_value());
+ isolate->factory()->NewFunction(isolate->factory()->empty_string(),
+ isolate->factory()->undefined_value());
go_between->set_context(function->context());
#ifdef DEBUG
ScopeInfo<> go_between_sinfo(go_between->shared()->scope_info());
@@ -10239,22 +11152,24 @@ static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
#endif
// Materialize the content of the local scope into a JSObject.
- Handle<JSObject> local_scope = MaterializeLocalScope(frame);
- RETURN_IF_EMPTY_HANDLE(local_scope);
+ Handle<JSObject> local_scope = MaterializeLocalScope(isolate, frame);
+ RETURN_IF_EMPTY_HANDLE(isolate, local_scope);
// Allocate a new context for the debug evaluation and set the extension
// object build.
Handle<Context> context =
- Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between);
+ isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
+ go_between);
context->set_extension(*local_scope);
// Copy any with contexts present and chain them in front of this context.
Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->fcontext());
- context = CopyWithContextChain(frame_context, context);
+ Handle<Context> function_context(frame_context->declaration_context());
+ context = CopyWithContextChain(isolate, go_between, frame_context, context);
if (additional_context->IsJSObject()) {
- context = Factory::NewWithContext(context,
- Handle<JSObject>::cast(additional_context), false);
+ Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
+ context =
+ isolate->factory()->NewWithContext(go_between, context, extension);
}
// Wrap the evaluation statement in a new function compiled in the newly
@@ -10262,12 +11177,10 @@ static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
// 'arguments'. This it to have access to what would have been 'arguments' in
// the function being debugged.
// function(arguments,__source__) {return eval(__source__);}
- static const char* source_str =
- "(function(arguments,__source__){return eval(__source__);})";
- static const int source_str_length = StrLength(source_str);
+
Handle<String> function_source =
- Factory::NewStringFromAscii(Vector<const char>(source_str,
- source_str_length));
+ isolate->factory()->NewStringFromAscii(
+ Vector<const char>(kSourceStr, sizeof(kSourceStr) - 1));
// Currently, the eval code will be executed in non-strict mode,
// even in the strict code context.
@@ -10278,17 +11191,18 @@ static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
- Factory::NewFunctionFromSharedFunctionInfo(shared, context);
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
// Invoke the result of the compilation to get the evaluation function.
bool has_pending_exception;
- Handle<Object> receiver(frame->receiver());
+ Handle<Object> receiver(frame->receiver(), isolate);
Handle<Object> evaluation_function =
Execution::Call(compiled_function, receiver, 0, NULL,
&has_pending_exception);
if (has_pending_exception) return Failure::Exception();
- Handle<Object> arguments = GetArgumentsObject(frame, function, scope_info,
+ Handle<Object> arguments = GetArgumentsObject(isolate, frame,
+ function, scope_info,
&sinfo, function_context);
// Invoke the evaluation function and return the result.
@@ -10310,14 +11224,15 @@ static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
}
-static MaybeObject* Runtime_DebugEvaluateGlobal(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
+ HandleScope scope(isolate);
// Check the execution state and decode arguments frame and source to be
// evaluated.
ASSERT(args.length() == 4);
Object* check_result;
- { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args);
+ { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
if (!maybe_check_result->ToObject(&check_result)) {
return maybe_check_result;
}
@@ -10330,28 +11245,30 @@ static MaybeObject* Runtime_DebugEvaluateGlobal(Arguments args) {
DisableBreak disable_break_save(disable_break);
// Enter the top context from before the debugger was invoked.
- SaveContext save;
+ SaveContext save(isolate);
SaveContext* top = &save;
- while (top != NULL && *top->context() == *Debug::debug_context()) {
+ while (top != NULL && *top->context() == *isolate->debug()->debug_context()) {
top = top->prev();
}
if (top != NULL) {
- Top::set_context(*top->context());
+ isolate->set_context(*top->context());
}
// Get the global context now set to the top context from before the
// debugger was invoked.
- Handle<Context> context = Top::global_context();
+ Handle<Context> context = isolate->global_context();
bool is_global = true;
if (additional_context->IsJSObject()) {
// Create a function context first, than put 'with' context on top of it.
- Handle<JSFunction> go_between = Factory::NewFunction(
- Factory::empty_string(), Factory::undefined_value());
+ Handle<JSFunction> go_between = isolate->factory()->NewFunction(
+ isolate->factory()->empty_string(),
+ isolate->factory()->undefined_value());
go_between->set_context(*context);
context =
- Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between);
+ isolate->factory()->NewFunctionContext(
+ Context::MIN_CONTEXT_SLOTS, go_between);
context->set_extension(JSObject::cast(*additional_context));
is_global = false;
}
@@ -10363,12 +11280,13 @@ static MaybeObject* Runtime_DebugEvaluateGlobal(Arguments args) {
Compiler::CompileEval(source, context, is_global, kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
- Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared,
- context));
+ Handle<JSFunction>(
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
+ context));
// Invoke the result of the compilation to get the evaluation function.
bool has_pending_exception;
- Handle<Object> receiver = Top::global();
+ Handle<Object> receiver = isolate->global();
Handle<Object> result =
Execution::Call(compiled_function, receiver, 0, NULL,
&has_pending_exception);
@@ -10377,12 +11295,12 @@ static MaybeObject* Runtime_DebugEvaluateGlobal(Arguments args) {
}
-static MaybeObject* Runtime_DebugGetLoadedScripts(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
// Fill the script objects.
- Handle<FixedArray> instances = Debug::GetLoadedScripts();
+ Handle<FixedArray> instances = isolate->debug()->GetLoadedScripts();
// Convert the script objects to proper JS objects.
for (int i = 0; i < instances->length(); i++) {
@@ -10397,7 +11315,8 @@ static MaybeObject* Runtime_DebugGetLoadedScripts(Arguments args) {
}
// Return result as a JS array.
- Handle<JSObject> result = Factory::NewJSObject(Top::array_function());
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObject(isolate->array_function());
Handle<JSArray>::cast(result)->SetContent(*instances);
return *result;
}
@@ -10477,11 +11396,11 @@ static int DebugReferencedBy(JSObject* target,
// args[0]: the object to find references to
// args[1]: constructor function for instances to exclude (Mirror)
// args[2]: the the maximum number of objects to return
-static MaybeObject* Runtime_DebugReferencedBy(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
ASSERT(args.length() == 3);
// First perform a full GC in order to avoid references from dead objects.
- Heap::CollectAllGarbage(false);
+ isolate->heap()->CollectAllGarbage(false);
// Check parameters.
CONVERT_CHECKED(JSObject, target, args[0]);
@@ -10493,7 +11412,7 @@ static MaybeObject* Runtime_DebugReferencedBy(Arguments args) {
// Get the constructor function for context extension and arguments array.
JSObject* arguments_boilerplate =
- Top::context()->global_context()->arguments_boilerplate();
+ isolate->context()->global_context()->arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
@@ -10504,7 +11423,7 @@ static MaybeObject* Runtime_DebugReferencedBy(Arguments args) {
// Allocate an array to hold the result.
Object* object;
- { MaybeObject* maybe_object = Heap::AllocateFixedArray(count);
+ { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
FixedArray* instances = FixedArray::cast(object);
@@ -10515,8 +11434,8 @@ static MaybeObject* Runtime_DebugReferencedBy(Arguments args) {
// Return result as JS array.
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateJSObject(
- Top::context()->global_context()->array_function());
+ { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+ isolate->context()->global_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSArray::cast(result)->SetContent(instances);
@@ -10557,11 +11476,11 @@ static int DebugConstructedBy(JSFunction* constructor, int max_references,
// Scan the heap for objects constructed by a specific function.
// args[0]: the constructor to find instances of
// args[1]: the the maximum number of objects to return
-static MaybeObject* Runtime_DebugConstructedBy(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
- Heap::CollectAllGarbage(false);
+ isolate->heap()->CollectAllGarbage(false);
// Check parameters.
CONVERT_CHECKED(JSFunction, constructor, args[0]);
@@ -10574,7 +11493,7 @@ static MaybeObject* Runtime_DebugConstructedBy(Arguments args) {
// Allocate an array to hold the result.
Object* object;
- { MaybeObject* maybe_object = Heap::AllocateFixedArray(count);
+ { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
FixedArray* instances = FixedArray::cast(object);
@@ -10584,8 +11503,8 @@ static MaybeObject* Runtime_DebugConstructedBy(Arguments args) {
// Return result as JS array.
Object* result;
- { MaybeObject* maybe_result = Heap::AllocateJSObject(
- Top::context()->global_context()->array_function());
+ { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+ isolate->context()->global_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
JSArray::cast(result)->SetContent(instances);
@@ -10595,7 +11514,7 @@ static MaybeObject* Runtime_DebugConstructedBy(Arguments args) {
// Find the effective prototype object as returned by __proto__.
// args[0]: the object to find the prototype for.
-static MaybeObject* Runtime_DebugGetPrototype(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, obj, args[0]);
@@ -10605,16 +11524,16 @@ static MaybeObject* Runtime_DebugGetPrototype(Arguments args) {
}
-static MaybeObject* Runtime_SystemBreak(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
ASSERT(args.length() == 0);
CPU::DebugBreak();
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_DebugDisassembleFunction(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
#ifdef DEBUG
- HandleScope scope;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_CHECKED(JSFunction, func, 0);
@@ -10624,13 +11543,13 @@ static MaybeObject* Runtime_DebugDisassembleFunction(Arguments args) {
}
func->code()->PrintLn();
#endif // DEBUG
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_DebugDisassembleConstructor(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
#ifdef DEBUG
- HandleScope scope;
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_CHECKED(JSFunction, func, 0);
@@ -10640,11 +11559,11 @@ static MaybeObject* Runtime_DebugDisassembleConstructor(Arguments args) {
}
shared->construct_stub()->PrintLn();
#endif // DEBUG
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_FunctionGetInferredName(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -10654,7 +11573,7 @@ static MaybeObject* Runtime_FunctionGetInferredName(Arguments args) {
static int FindSharedFunctionInfosForScript(Script* script,
- FixedArray* buffer) {
+ FixedArray* buffer) {
AssertNoAllocation no_allocations;
int counter = 0;
@@ -10680,10 +11599,10 @@ static int FindSharedFunctionInfosForScript(Script* script,
// For a script finds all SharedFunctionInfo's in the heap that points
// to this script. Returns JSArray of SharedFunctionInfo wrapped
// in OpaqueReferences.
-static MaybeObject* Runtime_LiveEditFindSharedFunctionInfosForScript(
- Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*,
+ Runtime_LiveEditFindSharedFunctionInfosForScript) {
ASSERT(args.length() == 1);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_CHECKED(JSValue, script_value, args[0]);
Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
@@ -10691,14 +11610,14 @@ static MaybeObject* Runtime_LiveEditFindSharedFunctionInfosForScript(
const int kBufferSize = 32;
Handle<FixedArray> array;
- array = Factory::NewFixedArray(kBufferSize);
+ array = isolate->factory()->NewFixedArray(kBufferSize);
int number = FindSharedFunctionInfosForScript(*script, *array);
if (number > kBufferSize) {
- array = Factory::NewFixedArray(number);
+ array = isolate->factory()->NewFixedArray(number);
FindSharedFunctionInfosForScript(*script, *array);
}
- Handle<JSArray> result = Factory::NewJSArrayWithElements(array);
+ Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(array);
result->set_length(Smi::FromInt(number));
LiveEdit::WrapSharedFunctionInfos(result);
@@ -10713,16 +11632,16 @@ static MaybeObject* Runtime_LiveEditFindSharedFunctionInfosForScript(
// Returns a JSArray of compilation infos. The array is ordered so that
// each function with all its descendant is always stored in a continues range
// with the function itself going first. The root function is a script function.
-static MaybeObject* Runtime_LiveEditGatherCompileInfo(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_CHECKED(JSValue, script, args[0]);
CONVERT_ARG_CHECKED(String, source, 1);
Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
JSArray* result = LiveEdit::GatherCompileInfo(script_handle, source);
- if (Top::has_pending_exception()) {
+ if (isolate->has_pending_exception()) {
return Failure::Exception();
}
@@ -10732,12 +11651,12 @@ static MaybeObject* Runtime_LiveEditGatherCompileInfo(Arguments args) {
// Changes the source of the script to a new_source.
// If old_script_name is provided (i.e. is a String), also creates a copy of
// the script with its original source and sends notification to debugger.
-static MaybeObject* Runtime_LiveEditReplaceScript(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
ASSERT(args.length() == 3);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_CHECKED(JSValue, original_script_value, args[0]);
CONVERT_ARG_CHECKED(String, new_source, 1);
- Handle<Object> old_script_name(args[2]);
+ Handle<Object> old_script_name(args[2], isolate);
CONVERT_CHECKED(Script, original_script_pointer,
original_script_value->value());
@@ -10751,23 +11670,23 @@ static MaybeObject* Runtime_LiveEditReplaceScript(Arguments args) {
Handle<Script> script_handle(Script::cast(old_script));
return *(GetScriptWrapper(script_handle));
} else {
- return Heap::null_value();
+ return isolate->heap()->null_value();
}
}
-static MaybeObject* Runtime_LiveEditFunctionSourceUpdated(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
ASSERT(args.length() == 1);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSArray, shared_info, 0);
return LiveEdit::FunctionSourceUpdated(shared_info);
}
// Replaces code of SharedFunctionInfo with a new one.
-static MaybeObject* Runtime_LiveEditReplaceFunctionCode(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSArray, new_compile_info, 0);
CONVERT_ARG_CHECKED(JSArray, shared_info, 1);
@@ -10775,17 +11694,17 @@ static MaybeObject* Runtime_LiveEditReplaceFunctionCode(Arguments args) {
}
// Connects SharedFunctionInfo to another script.
-static MaybeObject* Runtime_LiveEditFunctionSetScript(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
ASSERT(args.length() == 2);
- HandleScope scope;
- Handle<Object> function_object(args[0]);
- Handle<Object> script_object(args[1]);
+ HandleScope scope(isolate);
+ Handle<Object> function_object(args[0], isolate);
+ Handle<Object> script_object(args[1], isolate);
if (function_object->IsJSValue()) {
Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object);
if (script_object->IsJSValue()) {
CONVERT_CHECKED(Script, script, JSValue::cast(*script_object)->value());
- script_object = Handle<Object>(script);
+ script_object = Handle<Object>(script, isolate);
}
LiveEdit::SetFunctionScript(function_wrapper, script_object);
@@ -10794,15 +11713,15 @@ static MaybeObject* Runtime_LiveEditFunctionSetScript(Arguments args) {
// and we check it in this function.
}
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// In a code of a parent function replaces original function as embedded object
// with a substitution one.
-static MaybeObject* Runtime_LiveEditReplaceRefToNestedFunction(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
ASSERT(args.length() == 3);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSValue, parent_wrapper, 0);
CONVERT_ARG_CHECKED(JSValue, orig_wrapper, 1);
@@ -10811,7 +11730,7 @@ static MaybeObject* Runtime_LiveEditReplaceRefToNestedFunction(Arguments args) {
LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper,
subst_wrapper);
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
@@ -10820,9 +11739,9 @@ static MaybeObject* Runtime_LiveEditReplaceRefToNestedFunction(Arguments args) {
// array of groups of 3 numbers:
// (change_begin, change_end, change_end_new_position).
// Each group describes a change in text; groups are sorted by change_begin.
-static MaybeObject* Runtime_LiveEditPatchFunctionPositions(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
CONVERT_ARG_CHECKED(JSArray, position_change_array, 1);
@@ -10834,9 +11753,9 @@ static MaybeObject* Runtime_LiveEditPatchFunctionPositions(Arguments args) {
// checks that none of them have activations on stacks (of any thread).
// Returns array of the same length with corresponding results of
// LiveEdit::FunctionPatchabilityStatus type.
-static MaybeObject* Runtime_LiveEditCheckAndDropActivations(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
CONVERT_BOOLEAN_CHECKED(do_drop, args[1]);
@@ -10846,9 +11765,9 @@ static MaybeObject* Runtime_LiveEditCheckAndDropActivations(Arguments args) {
// Compares 2 strings line-by-line, then token-wise and returns diff in form
// of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
// of diff chunks.
-static MaybeObject* Runtime_LiveEditCompareStrings(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(String, s1, 0);
CONVERT_ARG_CHECKED(String, s2, 1);
@@ -10856,20 +11775,19 @@ static MaybeObject* Runtime_LiveEditCompareStrings(Arguments args) {
}
-
// A testing entry. Returns statement position which is the closest to
// source_position.
-static MaybeObject* Runtime_GetFunctionCodePositionFromSource(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- Handle<Code> code(function->code());
+ Handle<Code> code(function->code(), isolate);
if (code->kind() != Code::FUNCTION &&
code->kind() != Code::OPTIMIZED_FUNCTION) {
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
RelocIterator it(*code, RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION));
@@ -10896,9 +11814,9 @@ static MaybeObject* Runtime_GetFunctionCodePositionFromSource(Arguments args) {
// Calls specified function with or without entering the debugger.
// This is used in unit tests to run code as if debugger is entered or simply
// to have a stack with C++ frame in the middle.
-static MaybeObject* Runtime_ExecuteInDebugContext(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
ASSERT(args.length() == 2);
- HandleScope scope;
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
CONVERT_BOOLEAN_CHECKED(without_debugger, args[1]);
@@ -10906,11 +11824,11 @@ static MaybeObject* Runtime_ExecuteInDebugContext(Arguments args) {
bool pending_exception;
{
if (without_debugger) {
- result = Execution::Call(function, Top::global(), 0, NULL,
+ result = Execution::Call(function, isolate->global(), 0, NULL,
&pending_exception);
} else {
EnterDebugger enter_debugger;
- result = Execution::Call(function, Top::global(), 0, NULL,
+ result = Execution::Call(function, isolate->global(), 0, NULL,
&pending_exception);
}
}
@@ -10923,61 +11841,62 @@ static MaybeObject* Runtime_ExecuteInDebugContext(Arguments args) {
// Sets a v8 flag.
-static MaybeObject* Runtime_SetFlags(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
CONVERT_CHECKED(String, arg, args[0]);
SmartPointer<char> flags =
arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
FlagList::SetFlagsFromString(*flags, StrLength(*flags));
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
// Performs a GC.
// Presently, it only does a full GC.
-static MaybeObject* Runtime_CollectGarbage(Arguments args) {
- Heap::CollectAllGarbage(true);
- return Heap::undefined_value();
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
+ isolate->heap()->CollectAllGarbage(true);
+ return isolate->heap()->undefined_value();
}
// Gets the current heap usage.
-static MaybeObject* Runtime_GetHeapUsage(Arguments args) {
- int usage = static_cast<int>(Heap::SizeOfObjects());
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
+ int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
if (!Smi::IsValid(usage)) {
- return *Factory::NewNumberFromInt(usage);
+ return *isolate->factory()->NewNumberFromInt(usage);
}
return Smi::FromInt(usage);
}
// Captures a live object list from the present heap.
-static MaybeObject* Runtime_HasLOLEnabled(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLOLEnabled) {
#ifdef LIVE_OBJECT_LIST
- return Heap::true_value();
+ return isolate->heap()->true_value();
#else
- return Heap::false_value();
+ return isolate->heap()->false_value();
#endif
}
// Captures a live object list from the present heap.
-static MaybeObject* Runtime_CaptureLOL(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CaptureLOL) {
#ifdef LIVE_OBJECT_LIST
return LiveObjectList::Capture();
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Deletes the specified live object list.
-static MaybeObject* Runtime_DeleteLOL(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteLOL) {
#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_CHECKED(id, args[0]);
+ CONVERT_SMI_ARG_CHECKED(id, 0);
bool success = LiveObjectList::Delete(id);
- return success ? Heap::true_value() : Heap::false_value();
+ return success ? isolate->heap()->true_value() :
+ isolate->heap()->false_value();
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
@@ -10987,54 +11906,54 @@ static MaybeObject* Runtime_DeleteLOL(Arguments args) {
// specified by id1 and id2.
// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
// dumped.
-static MaybeObject* Runtime_DumpLOL(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DumpLOL) {
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
- CONVERT_SMI_CHECKED(id1, args[0]);
- CONVERT_SMI_CHECKED(id2, args[1]);
- CONVERT_SMI_CHECKED(start, args[2]);
- CONVERT_SMI_CHECKED(count, args[3]);
+ CONVERT_SMI_ARG_CHECKED(id1, 0);
+ CONVERT_SMI_ARG_CHECKED(id2, 1);
+ CONVERT_SMI_ARG_CHECKED(start, 2);
+ CONVERT_SMI_ARG_CHECKED(count, 3);
CONVERT_ARG_CHECKED(JSObject, filter_obj, 4);
EnterDebugger enter_debugger;
return LiveObjectList::Dump(id1, id2, start, count, filter_obj);
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Gets the specified object as requested by the debugger.
// This is only used for obj ids shown in live object lists.
-static MaybeObject* Runtime_GetLOLObj(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObj) {
#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_CHECKED(obj_id, args[0]);
+ CONVERT_SMI_ARG_CHECKED(obj_id, 0);
Object* result = LiveObjectList::GetObj(obj_id);
return result;
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Gets the obj id for the specified address if valid.
// This is only used for obj ids shown in live object lists.
-static MaybeObject* Runtime_GetLOLObjId(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjId) {
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
CONVERT_ARG_CHECKED(String, address, 0);
Object* result = LiveObjectList::GetObjId(address);
return result;
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Gets the retainers that references the specified object alive.
-static MaybeObject* Runtime_GetLOLObjRetainers(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjRetainers) {
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
- CONVERT_SMI_CHECKED(obj_id, args[0]);
+ CONVERT_SMI_ARG_CHECKED(obj_id, 0);
RUNTIME_ASSERT(args[1]->IsUndefined() || args[1]->IsJSObject());
RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsBoolean());
RUNTIME_ASSERT(args[3]->IsUndefined() || args[3]->IsSmi());
@@ -11051,11 +11970,11 @@ static MaybeObject* Runtime_GetLOLObjRetainers(Arguments args) {
}
int start = 0;
if (args[3]->IsSmi()) {
- start = Smi::cast(args[3])->value();
+ start = args.smi_at(3);
}
int limit = Smi::kMaxValue;
if (args[4]->IsSmi()) {
- limit = Smi::cast(args[4])->value();
+ limit = args.smi_at(4);
}
return LiveObjectList::GetObjRetainers(obj_id,
@@ -11065,17 +11984,17 @@ static MaybeObject* Runtime_GetLOLObjRetainers(Arguments args) {
limit,
filter_obj);
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Gets the reference path between 2 objects.
-static MaybeObject* Runtime_GetLOLPath(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLPath) {
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
- CONVERT_SMI_CHECKED(obj_id1, args[0]);
- CONVERT_SMI_CHECKED(obj_id2, args[1]);
+ CONVERT_SMI_ARG_CHECKED(obj_id1, 0);
+ CONVERT_SMI_ARG_CHECKED(obj_id2, 1);
RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsJSObject());
Handle<JSObject> instance_filter;
@@ -11087,45 +12006,45 @@ static MaybeObject* Runtime_GetLOLPath(Arguments args) {
LiveObjectList::GetPath(obj_id1, obj_id2, instance_filter);
return result;
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Generates the response to a debugger request for a list of all
// previously captured live object lists.
-static MaybeObject* Runtime_InfoLOL(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InfoLOL) {
#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_CHECKED(start, args[0]);
- CONVERT_SMI_CHECKED(count, args[1]);
+ CONVERT_SMI_ARG_CHECKED(start, 0);
+ CONVERT_SMI_ARG_CHECKED(count, 1);
return LiveObjectList::Info(start, count);
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Gets a dump of the specified object as requested by the debugger.
// This is only used for obj ids shown in live object lists.
-static MaybeObject* Runtime_PrintLOLObj(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PrintLOLObj) {
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
- CONVERT_SMI_CHECKED(obj_id, args[0]);
+ CONVERT_SMI_ARG_CHECKED(obj_id, 0);
Object* result = LiveObjectList::PrintObj(obj_id);
return result;
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
// Resets and releases all previously captured live object lists.
-static MaybeObject* Runtime_ResetLOL(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ResetLOL) {
#ifdef LIVE_OBJECT_LIST
LiveObjectList::Reset();
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
@@ -11135,17 +12054,17 @@ static MaybeObject* Runtime_ResetLOL(Arguments args) {
// specified by id1 and id2.
// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
// summarized.
-static MaybeObject* Runtime_SummarizeLOL(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) {
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
- CONVERT_SMI_CHECKED(id1, args[0]);
- CONVERT_SMI_CHECKED(id2, args[1]);
+ CONVERT_SMI_ARG_CHECKED(id1, 0);
+ CONVERT_SMI_ARG_CHECKED(id2, 1);
CONVERT_ARG_CHECKED(JSObject, filter_obj, 2);
EnterDebugger enter_debugger;
return LiveObjectList::Summarize(id1, id2, filter_obj);
#else
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
#endif
}
@@ -11153,25 +12072,25 @@ static MaybeObject* Runtime_SummarizeLOL(Arguments args) {
#ifdef ENABLE_LOGGING_AND_PROFILING
-static MaybeObject* Runtime_ProfilerResume(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(Smi, smi_modules, args[0]);
CONVERT_CHECKED(Smi, smi_tag, args[1]);
v8::V8::ResumeProfilerEx(smi_modules->value(), smi_tag->value());
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_ProfilerPause(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(Smi, smi_modules, args[0]);
CONVERT_CHECKED(Smi, smi_tag, args[1]);
v8::V8::PauseProfilerEx(smi_modules->value(), smi_tag->value());
- return Heap::undefined_value();
+ return isolate->heap()->undefined_value();
}
#endif // ENABLE_LOGGING_AND_PROFILING
@@ -11201,7 +12120,7 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
}
// If no script with the requested script data is found return undefined.
- if (script.is_null()) return Factory::undefined_value();
+ if (script.is_null()) return FACTORY->undefined_value();
// Return the script found.
return GetScriptWrapper(script);
@@ -11211,8 +12130,8 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
// Get the script object from script data. NOTE: Regarding performance
// see the NOTE for GetScriptFromScriptData.
// args[0]: script data for the script to find the source for
-static MaybeObject* Runtime_GetScript(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -11256,18 +12175,20 @@ static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
// Collect the raw data for a stack trace. Returns an array of 4
// element segments each containing a receiver, function, code and
// native code offset.
-static MaybeObject* Runtime_CollectStackTrace(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
ASSERT_EQ(args.length(), 2);
Handle<Object> caller = args.at<Object>(0);
CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]);
- HandleScope scope;
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
limit = Max(limit, 0); // Ensure that limit is not negative.
int initial_size = Min(limit, 10);
- Handle<JSArray> result = Factory::NewJSArray(initial_size * 4);
+ Handle<FixedArray> elements =
+ factory->NewFixedArrayWithHoles(initial_size * 4);
- StackFrameIterator iter;
+ StackFrameIterator iter(isolate);
// If the caller parameter is a function we skip frames until we're
// under it before starting to collect.
bool seen_caller = !caller->IsJSFunction();
@@ -11278,59 +12199,65 @@ static MaybeObject* Runtime_CollectStackTrace(Arguments args) {
if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) {
frames_seen++;
JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- List<FrameSummary> frames(3); // Max 2 levels of inlining.
+ // Set initial size to the maximum inlining level + 1 for the outermost
+ // function.
+ List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
+ if (cursor + 4 > elements->length()) {
+ int new_capacity = JSObject::NewElementsCapacity(elements->length());
+ Handle<FixedArray> new_elements =
+ factory->NewFixedArrayWithHoles(new_capacity);
+ for (int i = 0; i < cursor; i++) {
+ new_elements->set(i, elements->get(i));
+ }
+ elements = new_elements;
+ }
+ ASSERT(cursor + 4 <= elements->length());
+
Handle<Object> recv = frames[i].receiver();
Handle<JSFunction> fun = frames[i].function();
Handle<Code> code = frames[i].code();
Handle<Smi> offset(Smi::FromInt(frames[i].offset()));
- FixedArray* elements = FixedArray::cast(result->elements());
- if (cursor + 3 < elements->length()) {
- elements->set(cursor++, *recv);
- elements->set(cursor++, *fun);
- elements->set(cursor++, *code);
- elements->set(cursor++, *offset);
- } else {
- SetElement(result, cursor++, recv);
- SetElement(result, cursor++, fun);
- SetElement(result, cursor++, code);
- SetElement(result, cursor++, offset);
- }
+ elements->set(cursor++, *recv);
+ elements->set(cursor++, *fun);
+ elements->set(cursor++, *code);
+ elements->set(cursor++, *offset);
}
}
iter.Advance();
}
-
+ Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(cursor));
return *result;
}
// Returns V8 version as a string.
-static MaybeObject* Runtime_GetV8Version(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
ASSERT_EQ(args.length(), 0);
NoHandleAllocation ha;
const char* version_string = v8::V8::GetVersion();
- return Heap::AllocateStringFromAscii(CStrVector(version_string), NOT_TENURED);
+ return isolate->heap()->AllocateStringFromAscii(CStrVector(version_string),
+ NOT_TENURED);
}
-static MaybeObject* Runtime_Abort(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
ASSERT(args.length() == 2);
- OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
- Smi::cast(args[1])->value());
- Top::PrintStack();
+ OS::PrintError("abort: %s\n",
+ reinterpret_cast<char*>(args[0]) + args.smi_at(1));
+ isolate->PrintStack();
OS::Abort();
UNREACHABLE();
return NULL;
}
-static MaybeObject* Runtime_GetFromCache(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
// This is only called from codegen, so checks might be more lax.
CONVERT_CHECKED(JSFunctionResultCache, cache, args[0]);
Object* key = args[1];
@@ -11364,7 +12291,7 @@ static MaybeObject* Runtime_GetFromCache(Arguments args) {
}
// There is no value in the cache. Invoke the function and cache result.
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSFunctionResultCache> cache_handle(cache);
Handle<Object> key_handle(key);
@@ -11373,7 +12300,7 @@ static MaybeObject* Runtime_GetFromCache(Arguments args) {
Handle<JSFunction> factory(JSFunction::cast(
cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
// TODO(antonm): consider passing a receiver when constructing a cache.
- Handle<Object> receiver(Top::global_context()->global());
+ Handle<Object> receiver(isolate->global_context()->global());
// This handle is nor shared, nor used later, so it's safe.
Object** argv[] = { key_handle.location() };
bool pending_exception = false;
@@ -11422,39 +12349,40 @@ static MaybeObject* Runtime_GetFromCache(Arguments args) {
}
-static MaybeObject* Runtime_NewMessageObject(Arguments args) {
- HandleScope scope;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewMessageObject) {
+ HandleScope scope(isolate);
CONVERT_ARG_CHECKED(String, type, 0);
CONVERT_ARG_CHECKED(JSArray, arguments, 1);
- return *Factory::NewJSMessageObject(type,
- arguments,
- 0,
- 0,
- Factory::undefined_value(),
- Factory::undefined_value(),
- Factory::undefined_value());
+ return *isolate->factory()->NewJSMessageObject(
+ type,
+ arguments,
+ 0,
+ 0,
+ isolate->factory()->undefined_value(),
+ isolate->factory()->undefined_value(),
+ isolate->factory()->undefined_value());
}
-static MaybeObject* Runtime_MessageGetType(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetType) {
CONVERT_CHECKED(JSMessageObject, message, args[0]);
return message->type();
}
-static MaybeObject* Runtime_MessageGetArguments(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetArguments) {
CONVERT_CHECKED(JSMessageObject, message, args[0]);
return message->arguments();
}
-static MaybeObject* Runtime_MessageGetStartPosition(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
CONVERT_CHECKED(JSMessageObject, message, args[0]);
return Smi::FromInt(message->start_position());
}
-static MaybeObject* Runtime_MessageGetScript(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
CONVERT_CHECKED(JSMessageObject, message, args[0]);
return message->script();
}
@@ -11463,10 +12391,17 @@ static MaybeObject* Runtime_MessageGetScript(Arguments args) {
#ifdef DEBUG
// ListNatives is ONLY used by the fuzz-natives.js in debug mode
// Exclude the code in release mode.
-static MaybeObject* Runtime_ListNatives(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
ASSERT(args.length() == 0);
HandleScope scope;
- Handle<JSArray> result = Factory::NewJSArray(0);
+#define COUNT_ENTRY(Name, argc, ressize) + 1
+ int entry_count = 0
+ RUNTIME_FUNCTION_LIST(COUNT_ENTRY)
+ INLINE_FUNCTION_LIST(COUNT_ENTRY)
+ INLINE_RUNTIME_FUNCTION_LIST(COUNT_ENTRY);
+#undef COUNT_ENTRY
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(entry_count);
int index = 0;
bool inline_runtime_functions = false;
#define ADD_ENTRY(Name, argc, ressize) \
@@ -11475,16 +12410,17 @@ static MaybeObject* Runtime_ListNatives(Arguments args) {
Handle<String> name; \
/* Inline runtime functions have an underscore in front of the name. */ \
if (inline_runtime_functions) { \
- name = Factory::NewStringFromAscii( \
+ name = factory->NewStringFromAscii( \
Vector<const char>("_" #Name, StrLength("_" #Name))); \
} else { \
- name = Factory::NewStringFromAscii( \
+ name = factory->NewStringFromAscii( \
Vector<const char>(#Name, StrLength(#Name))); \
} \
- Handle<JSArray> pair = Factory::NewJSArray(0); \
- SetElement(pair, 0, name); \
- SetElement(pair, 1, Handle<Smi>(Smi::FromInt(argc))); \
- SetElement(result, index++, pair); \
+ Handle<FixedArray> pair_elements = factory->NewFixedArray(2); \
+ pair_elements->set(0, *name); \
+ pair_elements->set(1, Smi::FromInt(argc)); \
+ Handle<JSArray> pair = factory->NewJSArrayWithElements(pair_elements); \
+ elements->set(index++, *pair); \
}
inline_runtime_functions = false;
RUNTIME_FUNCTION_LIST(ADD_ENTRY)
@@ -11492,22 +12428,24 @@ static MaybeObject* Runtime_ListNatives(Arguments args) {
INLINE_FUNCTION_LIST(ADD_ENTRY)
INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY)
#undef ADD_ENTRY
+ ASSERT_EQ(index, entry_count);
+ Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
return *result;
}
#endif
-static MaybeObject* Runtime_Log(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, format, args[0]);
CONVERT_CHECKED(JSArray, elms, args[1]);
Vector<const char> chars = format->ToAsciiVector();
- Logger::LogRuntime(chars, elms);
- return Heap::undefined_value();
+ LOGGER->LogRuntime(chars, elms);
+ return isolate->heap()->undefined_value();
}
-static MaybeObject* Runtime_IS_VAR(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) {
UNREACHABLE(); // implemented as macro in the parser
return NULL;
}
@@ -11525,20 +12463,22 @@ static MaybeObject* Runtime_IS_VAR(Arguments args) {
{ Runtime::kInline##name, Runtime::INLINE, \
"_" #name, NULL, number_of_args, result_size },
-Runtime::Function kIntrinsicFunctions[] = {
+static const Runtime::Function kIntrinsicFunctions[] = {
RUNTIME_FUNCTION_LIST(F)
INLINE_FUNCTION_LIST(I)
INLINE_RUNTIME_FUNCTION_LIST(I)
};
-MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Object* dictionary) {
+MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
+ Object* dictionary) {
+ ASSERT(Isolate::Current()->heap() == heap);
ASSERT(dictionary != NULL);
ASSERT(StringDictionary::cast(dictionary)->NumberOfElements() == 0);
for (int i = 0; i < kNumFunctions; ++i) {
Object* name_symbol;
{ MaybeObject* maybe_name_symbol =
- Heap::LookupAsciiSymbol(kIntrinsicFunctions[i].name);
+ heap->LookupAsciiSymbol(kIntrinsicFunctions[i].name);
if (!maybe_name_symbol->ToObject(&name_symbol)) return maybe_name_symbol;
}
StringDictionary* string_dictionary = StringDictionary::cast(dictionary);
@@ -11557,10 +12497,11 @@ MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Object* dictionary) {
}
-Runtime::Function* Runtime::FunctionForSymbol(Handle<String> name) {
- int entry = Heap::intrinsic_function_names()->FindEntry(*name);
+const Runtime::Function* Runtime::FunctionForSymbol(Handle<String> name) {
+ Heap* heap = name->GetHeap();
+ int entry = heap->intrinsic_function_names()->FindEntry(*name);
if (entry != kNotFound) {
- Object* smi_index = Heap::intrinsic_function_names()->ValueAt(entry);
+ Object* smi_index = heap->intrinsic_function_names()->ValueAt(entry);
int function_index = Smi::cast(smi_index)->value();
return &(kIntrinsicFunctions[function_index]);
}
@@ -11568,22 +12509,23 @@ Runtime::Function* Runtime::FunctionForSymbol(Handle<String> name) {
}
-Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
+const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
return &(kIntrinsicFunctions[static_cast<int>(id)]);
}
void Runtime::PerformGC(Object* result) {
+ Isolate* isolate = Isolate::Current();
Failure* failure = Failure::cast(result);
if (failure->IsRetryAfterGC()) {
// Try to do a garbage collection; ignore it if it fails. The C
// entry stub will throw an out-of-memory exception in that case.
- Heap::CollectGarbage(failure->allocation_space());
+ isolate->heap()->CollectGarbage(failure->allocation_space());
} else {
// Handle last resort GC and make sure to allow future allocations
// to grow the heap without causing GCs (if possible).
- Counters::gc_last_resort_from_js.Increment();
- Heap::CollectAllGarbage(false);
+ isolate->counters()->gc_last_resort_from_js()->Increment();
+ isolate->heap()->CollectAllGarbage(false);
}
}
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 9dd6eda0a..5efc057df 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,9 @@
#ifndef V8_RUNTIME_H_
#define V8_RUNTIME_H_
+#include "allocation.h"
+#include "zone.h"
+
namespace v8 {
namespace internal {
@@ -64,6 +67,7 @@ namespace internal {
F(SpecialArrayFunctions, 1, 1) \
F(GetGlobalReceiver, 0, 1) \
\
+ F(GetPrototype, 1, 1) \
F(IsInPrototypeChain, 2, 1) \
F(SetHiddenPrototype, 2, 1) \
\
@@ -78,14 +82,20 @@ namespace internal {
F(GetFunctionDelegate, 1, 1) \
F(GetConstructorDelegate, 1, 1) \
F(NewArgumentsFast, 3, 1) \
+ F(NewStrictArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyOSR, 0, 1) \
- F(DeoptimizeFunction, 1, 1) \
+ F(DeoptimizeFunction, 1, 1) \
+ F(RunningInSimulator, 0, 1) \
+ F(OptimizeFunctionOnNextCall, 1, 1) \
+ F(GetOptimizationStatus, 1, 1) \
+ F(GetOptimizationCount, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \
F(SetNewFunctionAttributes, 1, 1) \
F(AllocateInNewSpace, 1, 1) \
+ F(SetNativeFlag, 1, 1) \
\
/* Array join support */ \
F(PushIfAbsent, 2, 1) \
@@ -107,6 +117,7 @@ namespace internal {
F(URIUnescape, 1, 1) \
F(QuoteJSONString, 1, 1) \
F(QuoteJSONStringComma, 1, 1) \
+ F(QuoteJSONStringArray, 1, 1) \
\
F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
@@ -129,6 +140,7 @@ namespace internal {
F(StringAdd, 2, 1) \
F(StringBuilderConcat, 3, 1) \
F(StringBuilderJoin, 3, 1) \
+ F(SparseJoinWithSeparator, 3, 1) \
\
/* Bit operations */ \
F(NumberOr, 2, 1) \
@@ -200,6 +212,7 @@ namespace internal {
F(FunctionSetPrototype, 2, 1) \
F(FunctionGetName, 1, 1) \
F(FunctionSetName, 2, 1) \
+ F(FunctionSetBound, 1, 1) \
F(FunctionRemovePrototype, 1, 1) \
F(FunctionGetSourceCode, 1, 1) \
F(FunctionGetScript, 1, 1) \
@@ -267,8 +280,10 @@ namespace internal {
F(CreateArrayLiteral, 3, 1) \
F(CreateArrayLiteralShallow, 3, 1) \
\
- /* Catch context extension objects */ \
- F(CreateCatchExtensionObject, 2, 1) \
+ /* Harmony proxies */ \
+ F(CreateJSProxy, 2, 1) \
+ F(IsJSProxy, 1, 1) \
+ F(GetHandler, 1, 1) \
\
/* Statements */ \
F(NewClosure, 3, 1) \
@@ -282,9 +297,9 @@ namespace internal {
F(PromoteScheduledException, 0, 1) \
\
/* Contexts */ \
- F(NewContext, 1, 1) \
- F(PushContext, 1, 1) \
- F(PushCatchContext, 1, 1) \
+ F(NewFunctionContext, 1, 1) \
+ F(PushWithContext, 2, 1) \
+ F(PushCatchContext, 3, 1) \
F(DeleteContextSlot, 2, 1) \
F(LoadContextSlot, 2, 2) \
F(LoadContextSlotNoReferenceError, 2, 2) \
@@ -412,7 +427,6 @@ namespace internal {
#define RUNTIME_FUNCTION_LIST_DEBUG(F)
#endif
-
// ----------------------------------------------------------------------------
// RUNTIME_FUNCTION_LIST defines all runtime functions accessed
// either directly by id (via the code generator), or indirectly
@@ -482,6 +496,48 @@ namespace internal {
//---------------------------------------------------------------------------
// Runtime provides access to all C++ runtime functions.
+class RuntimeState {
+ public:
+ StaticResource<StringInputBuffer>* string_input_buffer() {
+ return &string_input_buffer_;
+ }
+ unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
+ return &to_upper_mapping_;
+ }
+ unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
+ return &to_lower_mapping_;
+ }
+ StringInputBuffer* string_input_buffer_compare_bufx() {
+ return &string_input_buffer_compare_bufx_;
+ }
+ StringInputBuffer* string_input_buffer_compare_bufy() {
+ return &string_input_buffer_compare_bufy_;
+ }
+ StringInputBuffer* string_locale_compare_buf1() {
+ return &string_locale_compare_buf1_;
+ }
+ StringInputBuffer* string_locale_compare_buf2() {
+ return &string_locale_compare_buf2_;
+ }
+
+ private:
+ RuntimeState() {}
+ // Non-reentrant string buffer for efficient general use in the runtime.
+ StaticResource<StringInputBuffer> string_input_buffer_;
+ unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
+ unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
+ StringInputBuffer string_input_buffer_compare_bufx_;
+ StringInputBuffer string_input_buffer_compare_bufy_;
+ StringInputBuffer string_locale_compare_buf1_;
+ StringInputBuffer string_locale_compare_buf2_;
+
+ friend class Isolate;
+ friend class Runtime;
+
+ DISALLOW_COPY_AND_ASSIGN(RuntimeState);
+};
+
+
class Runtime : public AllStatic {
public:
enum FunctionId {
@@ -525,58 +581,67 @@ class Runtime : public AllStatic {
// retried with a new, empty StringDictionary, not with the same one.
// Alternatively, heap initialization can be completely restarted.
MUST_USE_RESULT static MaybeObject* InitializeIntrinsicFunctionNames(
- Object* dictionary);
+ Heap* heap, Object* dictionary);
// Get the intrinsic function with the given name, which must be a symbol.
- static Function* FunctionForSymbol(Handle<String> name);
+ static const Function* FunctionForSymbol(Handle<String> name);
// Get the intrinsic function with the given FunctionId.
- static Function* FunctionForId(FunctionId id);
+ static const Function* FunctionForId(FunctionId id);
// General-purpose helper functions for runtime system.
- static int StringMatch(Handle<String> sub, Handle<String> pat, int index);
+ static int StringMatch(Isolate* isolate,
+ Handle<String> sub,
+ Handle<String> pat,
+ int index);
- static bool IsUpperCaseChar(uint16_t ch);
+ static bool IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch);
// TODO(1240886): The following three methods are *not* handle safe,
// but accept handle arguments. This seems fragile.
// Support getting the characters in a string using [] notation as
// in Firefox/SpiderMonkey, Safari and Opera.
- MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Handle<Object> object,
+ MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Isolate* isolate,
+ Handle<Object> object,
uint32_t index);
MUST_USE_RESULT static MaybeObject* GetElement(Handle<Object> object,
uint32_t index);
MUST_USE_RESULT static MaybeObject* SetObjectProperty(
+ Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr,
- StrictModeFlag strict);
+ StrictModeFlag strict_mode);
MUST_USE_RESULT static MaybeObject* ForceSetObjectProperty(
+ Isolate* isolate,
Handle<JSObject> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr);
MUST_USE_RESULT static MaybeObject* ForceDeleteObjectProperty(
+ Isolate* isolate,
Handle<JSObject> object,
Handle<Object> key);
- MUST_USE_RESULT static MaybeObject* GetObjectProperty(Handle<Object> object,
- Handle<Object> key);
+ MUST_USE_RESULT static MaybeObject* GetObjectProperty(
+ Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> key);
// This function is used in FunctionNameUsing* tests.
- static Object* FindSharedFunctionInfoInScript(Handle<Script> script,
+ static Object* FindSharedFunctionInfoInScript(Isolate* isolate,
+ Handle<Script> script,
int position);
// Helper functions used stubs.
static void PerformGC(Object* result);
};
-
} } // namespace v8::internal
#endif // V8_RUNTIME_H_
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 66d839bec..77b97aed8 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -49,41 +49,47 @@ const $Function = global.Function;
const $Boolean = global.Boolean;
const $NaN = 0/0;
-
-// ECMA-262, section 11.9.1, page 55.
+// ECMA-262 Section 11.9.3.
function EQUALS(y) {
if (IS_STRING(this) && IS_STRING(y)) return %StringEquals(this, y);
var x = this;
- // NOTE: We use iteration instead of recursion, because it is
- // difficult to call EQUALS with the correct setting of 'this' in
- // an efficient way.
while (true) {
if (IS_NUMBER(x)) {
- if (y == null) return 1; // not equal
- return %NumberEquals(x, %ToNumber(y));
+ while (true) {
+ if (IS_NUMBER(y)) return %NumberEquals(x, y);
+ if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ if (!IS_SPEC_OBJECT(y)) {
+ // String or boolean.
+ return %NumberEquals(x, %ToNumber(y));
+ }
+ y = %ToPrimitive(y, NO_HINT);
+ }
} else if (IS_STRING(x)) {
- if (IS_STRING(y)) return %StringEquals(x, y);
+ while (true) {
+ if (IS_STRING(y)) return %StringEquals(x, y);
+ if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
+ if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
+ if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ y = %ToPrimitive(y, NO_HINT);
+ }
+ } else if (IS_BOOLEAN(x)) {
+ if (IS_BOOLEAN(y)) return %_ObjectEquals(x, y) ? 0 : 1;
+ if (IS_NULL_OR_UNDEFINED(y)) return 1;
if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
- if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
- if (y == null) return 1; // not equal
+ if (IS_STRING(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
+ // y is object.
+ x = %ToNumber(x);
y = %ToPrimitive(y, NO_HINT);
- } else if (IS_BOOLEAN(x)) {
- if (IS_BOOLEAN(y)) {
- return %_ObjectEquals(x, y) ? 0 : 1;
- }
- if (y == null) return 1; // not equal
- return %NumberEquals(%ToNumber(x), %ToNumber(y));
- } else if (x == null) {
- // NOTE: This checks for both null and undefined.
- return (y == null) ? 0 : 1;
+ } else if (IS_NULL_OR_UNDEFINED(x)) {
+ return IS_NULL_OR_UNDEFINED(y) ? 0 : 1;
} else {
- // x is not a number, boolean, null or undefined.
- if (y == null) return 1; // not equal
+ // x is an object.
if (IS_SPEC_OBJECT(y)) {
return %_ObjectEquals(x, y) ? 0 : 1;
}
-
+ if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ if (IS_BOOLEAN(y)) y = %ToNumber(y);
x = %ToPrimitive(x, NO_HINT);
}
}
@@ -638,6 +644,6 @@ function DefaultString(x) {
// NOTE: Setting the prototype for Array must take place as early as
// possible due to code generation for array literals. When
// generating code for a array literal a boilerplate array is created
-// that is cloned when running the code. It is essiential that the
+// that is cloned when running the code. It is essential that the
// boilerplate gets the right prototype.
%FunctionSetPrototype($Array, new $Array(0));
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index d2ec54c38..28cf6e64c 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -25,11 +25,14 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
#include "safepoint-table.h"
#include "deoptimizer.h"
#include "disasm.h"
#include "macro-assembler.h"
+#include "zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index 8803d06f5..de537f982 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -28,11 +28,10 @@
#ifndef V8_SAFEPOINT_TABLE_H_
#define V8_SAFEPOINT_TABLE_H_
-#include "v8.h"
-
+#include "allocation.h"
#include "heap.h"
+#include "v8memory.h"
#include "zone.h"
-#include "zone-inl.h"
namespace v8 {
namespace internal {
@@ -228,6 +227,14 @@ class SafepointTableBuilder BASE_EMBEDDED {
deoptimization_info_[index].pc_after_gap = pc;
}
+ // Get the end pc offset of the last safepoint, including the code generated
+ // until the end of the gap following it.
+ unsigned GetPcAfterGap() {
+ int index = deoptimization_info_.length();
+ if (index == 0) return 0;
+ return deoptimization_info_[index - 1].pc_after_gap;
+ }
+
// Emit the safepoint table after the body. The number of bits per
// entry must be enough to hold all the pointer indexes.
void Emit(Assembler* assembler, int bits_per_entry);
diff --git a/deps/v8/src/scanner-base.cc b/deps/v8/src/scanner-base.cc
index 80bca4e28..16f8db5a9 100644
--- a/deps/v8/src/scanner-base.cc
+++ b/deps/v8/src/scanner-base.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,37 +35,10 @@ namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
-// Character predicates
-
-unibrow::Predicate<IdentifierStart, 128> ScannerConstants::kIsIdentifierStart;
-unibrow::Predicate<IdentifierPart, 128> ScannerConstants::kIsIdentifierPart;
-unibrow::Predicate<unibrow::WhiteSpace, 128> ScannerConstants::kIsWhiteSpace;
-unibrow::Predicate<unibrow::LineTerminator, 128>
- ScannerConstants::kIsLineTerminator;
-
-StaticResource<ScannerConstants::Utf8Decoder> ScannerConstants::utf8_decoder_;
-
-// Compound predicates.
-
-bool ScannerConstants::IsIdentifier(unibrow::CharacterStream* buffer) {
- // Checks whether the buffer contains an identifier (no escape).
- if (!buffer->has_more()) return false;
- if (!kIsIdentifierStart.get(buffer->GetNext())) {
- return false;
- }
- while (buffer->has_more()) {
- if (!kIsIdentifierPart.get(buffer->GetNext())) {
- return false;
- }
- }
- return true;
-}
-
-// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner()
- : octal_pos_(kNoOctalLocation) { }
+Scanner::Scanner(UnicodeCache* unicode_cache)
+ : unicode_cache_(unicode_cache) { }
uc32 Scanner::ScanHexEscape(uc32 c, int length) {
@@ -96,38 +69,30 @@ uc32 Scanner::ScanHexEscape(uc32 c, int length) {
}
-// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
-// ECMA-262. Other JS VMs support them.
-uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
- uc32 x = c - '0';
- int i = 0;
- for (; i < length; i++) {
- int d = c0_ - '0';
- if (d < 0 || d > 7) break;
- int nx = x * 8 + d;
- if (nx >= 256) break;
- x = nx;
- Advance();
- }
- // Anything excelt '\0' is an octal escape sequence, illegal in strict mode.
- // Remember the position of octal escape sequences so that better error
- // can be reported later (in strict mode).
- if (c != '0' || i > 0) {
- octal_pos_ = source_pos() - i - 1; // Already advanced
- }
- return x;
-}
-
// ----------------------------------------------------------------------------
// JavaScriptScanner
-JavaScriptScanner::JavaScriptScanner() : Scanner() {}
+JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants)
+ : Scanner(scanner_contants), octal_pos_(Location::invalid()) { }
+void JavaScriptScanner::Initialize(UC16CharacterStream* source) {
+ source_ = source;
+ // Need to capture identifiers in order to recognize "get" and "set"
+ // in object literals.
+ Init();
+ // Skip initial whitespace allowing HTML comment ends just like
+ // after a newline and scan first token.
+ has_line_terminator_before_next_ = true;
+ SkipWhiteSpace();
+ Scan();
+}
+
Token::Value JavaScriptScanner::Next() {
current_ = next_;
has_line_terminator_before_next_ = false;
+ has_multiline_comment_before_next_ = false;
Scan();
return current_.token;
}
@@ -151,9 +116,9 @@ bool JavaScriptScanner::SkipWhiteSpace() {
while (true) {
// We treat byte-order marks (BOMs) as whitespace for better
// compatibility with Spidermonkey and other JavaScript engines.
- while (ScannerConstants::kIsWhiteSpace.get(c0_) || IsByteOrderMark(c0_)) {
+ while (unicode_cache_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
// IsWhiteSpace() includes line terminators!
- if (ScannerConstants::kIsLineTerminator.get(c0_)) {
+ if (unicode_cache_->IsLineTerminator(c0_)) {
// Ignore line terminators, but remember them. This is necessary
// for automatic semicolon insertion.
has_line_terminator_before_next_ = true;
@@ -192,8 +157,8 @@ Token::Value JavaScriptScanner::SkipSingleLineComment() {
// to be part of the single-line comment; it is recognized
// separately by the lexical grammar and becomes part of the
// stream of input elements for the syntactic grammar (see
- // ECMA-262, section 7.4, page 12).
- while (c0_ >= 0 && !ScannerConstants::kIsLineTerminator.get(c0_)) {
+ // ECMA-262, section 7.4).
+ while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
Advance();
}
@@ -208,13 +173,14 @@ Token::Value JavaScriptScanner::SkipMultiLineComment() {
while (c0_ >= 0) {
char ch = c0_;
Advance();
+ if (unicode_cache_->IsLineTerminator(ch)) {
+ // Following ECMA-262, section 7.4, a comment containing
+ // a newline will make the comment count as a line-terminator.
+ has_multiline_comment_before_next_ = true;
+ }
// If we have reached the end of the multi-line comment, we
// consume the '/' and insert a whitespace. This way all
- // multi-line comments are treated as whitespace - even the ones
- // containing line terminators. This contradicts ECMA-262, section
- // 7.4, page 12, that says that multi-line comments containing
- // line terminators should be treated as a line terminator, but it
- // matches the behaviour of SpiderMonkey and KJS.
+ // multi-line comments are treated as whitespace.
if (ch == '*' && c0_ == '/') {
c0_ = ' ';
return Token::WHITESPACE;
@@ -458,7 +424,7 @@ void JavaScriptScanner::Scan() {
break;
default:
- if (ScannerConstants::kIsIdentifierStart.get(c0_)) {
+ if (unicode_cache_->IsIdentifierStart(c0_)) {
token = ScanIdentifierOrKeyword();
} else if (IsDecimalDigit(c0_)) {
token = ScanNumber(false);
@@ -496,6 +462,7 @@ void JavaScriptScanner::SeekForward(int pos) {
// of the end of a function (at the "}" token). It doesn't matter
// whether there was a line terminator in the part we skip.
has_line_terminator_before_next_ = false;
+ has_multiline_comment_before_next_ = false;
}
Scan();
}
@@ -506,7 +473,7 @@ void JavaScriptScanner::ScanEscape() {
Advance();
// Skip escaped newlines.
- if (ScannerConstants::kIsLineTerminator.get(c)) {
+ if (unicode_cache_->IsLineTerminator(c)) {
// Allow CR+LF newlines in multiline string literals.
if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance();
// Allow LF+CR newlines in multiline string literals.
@@ -543,13 +510,38 @@ void JavaScriptScanner::ScanEscape() {
}
+// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
+// ECMA-262. Other JS VMs support them.
+uc32 JavaScriptScanner::ScanOctalEscape(uc32 c, int length) {
+ uc32 x = c - '0';
+ int i = 0;
+ for (; i < length; i++) {
+ int d = c0_ - '0';
+ if (d < 0 || d > 7) break;
+ int nx = x * 8 + d;
+ if (nx >= 256) break;
+ x = nx;
+ Advance();
+ }
+ // Anything except '\0' is an octal escape sequence, illegal in strict mode.
+ // Remember the position of octal escape sequences so that an error
+ // can be reported later (in strict mode).
+ // We don't report the error immediately, because the octal escape can
+ // occur before the "use strict" directive.
+ if (c != '0' || i > 0) {
+ octal_pos_ = Location(source_pos() - i - 1, source_pos() - 1);
+ }
+ return x;
+}
+
+
Token::Value JavaScriptScanner::ScanString() {
uc32 quote = c0_;
Advance(); // consume quote
LiteralScope literal(this);
while (c0_ != quote && c0_ >= 0
- && !ScannerConstants::kIsLineTerminator.get(c0_)) {
+ && !unicode_cache_->IsLineTerminator(c0_)) {
uc32 c = c0_;
Advance();
if (c == '\\') {
@@ -587,6 +579,7 @@ Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
} else {
// if the first character is '0' we must check for octals and hex
if (c0_ == '0') {
+ int start_pos = source_pos(); // For reporting octal positions.
AddLiteralCharAdvance();
// either 0, 0exxx, 0Exxx, 0.xxx, an octal number, or a hex number
@@ -611,7 +604,7 @@ Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
}
if (c0_ < '0' || '7' < c0_) {
// Octal literal finished.
- octal_pos_ = next_.location.beg_pos;
+ octal_pos_ = Location(start_pos, source_pos());
break;
}
AddLiteralCharAdvance();
@@ -648,7 +641,7 @@ Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
// not be an identifier start or a decimal digit; see ECMA-262
// section 7.8.3, page 17 (note that we read only one decimal digit
// if the value is 0).
- if (IsDecimalDigit(c0_) || ScannerConstants::kIsIdentifierStart.get(c0_))
+ if (IsDecimalDigit(c0_) || unicode_cache_->IsIdentifierStart(c0_))
return Token::ILLEGAL;
literal.Complete();
@@ -670,14 +663,14 @@ uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
- ASSERT(ScannerConstants::kIsIdentifierStart.get(c0_));
+ ASSERT(unicode_cache_->IsIdentifierStart(c0_));
LiteralScope literal(this);
KeywordMatcher keyword_match;
// Scan identifier start character.
if (c0_ == '\\') {
uc32 c = ScanIdentifierUnicodeEscape();
// Only allow legal identifier start characters.
- if (!ScannerConstants::kIsIdentifierStart.get(c)) return Token::ILLEGAL;
+ if (!unicode_cache_->IsIdentifierStart(c)) return Token::ILLEGAL;
AddLiteralChar(c);
return ScanIdentifierSuffix(&literal);
}
@@ -690,7 +683,7 @@ Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
}
// Scan the rest of the identifier characters.
- while (ScannerConstants::kIsIdentifierPart.get(c0_)) {
+ while (unicode_cache_->IsIdentifierPart(c0_)) {
if (c0_ != '\\') {
uc32 next_char = c0_;
Advance();
@@ -708,11 +701,11 @@ Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
// Scan the rest of the identifier characters.
- while (ScannerConstants::kIsIdentifierPart.get(c0_)) {
+ while (unicode_cache_->IsIdentifierPart(c0_)) {
if (c0_ == '\\') {
uc32 c = ScanIdentifierUnicodeEscape();
// Only allow legal identifier part characters.
- if (!ScannerConstants::kIsIdentifierPart.get(c)) return Token::ILLEGAL;
+ if (!unicode_cache_->IsIdentifierPart(c)) return Token::ILLEGAL;
AddLiteralChar(c);
} else {
AddLiteralChar(c0_);
@@ -742,10 +735,10 @@ bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
AddLiteralChar('=');
while (c0_ != '/' || in_character_class) {
- if (ScannerConstants::kIsLineTerminator.get(c0_) || c0_ < 0) return false;
+ if (unicode_cache_->IsLineTerminator(c0_) || c0_ < 0) return false;
if (c0_ == '\\') { // Escape sequence.
AddLiteralCharAdvance();
- if (ScannerConstants::kIsLineTerminator.get(c0_) || c0_ < 0) return false;
+ if (unicode_cache_->IsLineTerminator(c0_) || c0_ < 0) return false;
AddLiteralCharAdvance();
// If the escape allows more characters, i.e., \x??, \u????, or \c?,
// only "safe" characters are allowed (letters, digits, underscore),
@@ -754,6 +747,9 @@ bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
// worrying whether the following characters are part of the escape
// or not, since any '/', '\\' or '[' is guaranteed to not be part
// of the escape sequence.
+
+ // TODO(896): At some point, parse RegExps more throughly to capture
+ // octal esacpes in strict mode.
} else { // Unescaped character.
if (c0_ == '[') in_character_class = true;
if (c0_ == ']') in_character_class = false;
@@ -771,7 +767,7 @@ bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
bool JavaScriptScanner::ScanRegExpFlags() {
// Scan regular expression flags.
LiteralScope literal(this);
- while (ScannerConstants::kIsIdentifierPart.get(c0_)) {
+ while (unicode_cache_->IsIdentifierPart(c0_)) {
if (c0_ == '\\') {
uc32 c = ScanIdentifierUnicodeEscape();
if (c != static_cast<uc32>(unibrow::Utf8::kBadChar)) {
@@ -803,7 +799,7 @@ KeywordMatcher::FirstState KeywordMatcher::first_states_[] = {
{ NULL, I, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
- { "let", KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD },
+ { "let", KEYWORD_PREFIX, Token::FUTURE_STRICT_RESERVED_WORD },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, N, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
@@ -816,7 +812,7 @@ KeywordMatcher::FirstState KeywordMatcher::first_states_[] = {
{ NULL, V, Token::ILLEGAL },
{ NULL, W, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
- { "yield", KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD }
+ { "yield", KEYWORD_PREFIX, Token::FUTURE_STRICT_RESERVED_WORD }
};
@@ -853,7 +849,7 @@ void KeywordMatcher::Step(unibrow::uchar input) {
case C:
if (MatchState(input, 'a', CA)) return;
if (MatchKeywordStart(input, "class", 1,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_RESERVED_WORD)) return;
if (MatchState(input, 'o', CO)) return;
break;
case CA:
@@ -879,14 +875,14 @@ void KeywordMatcher::Step(unibrow::uchar input) {
case E:
if (MatchKeywordStart(input, "else", 1, Token::ELSE)) return;
if (MatchKeywordStart(input, "enum", 1,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_RESERVED_WORD)) return;
if (MatchState(input, 'x', EX)) return;
break;
case EX:
if (MatchKeywordStart(input, "export", 2,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "extends", 2,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_RESERVED_WORD)) return;
break;
case F:
if (MatchKeywordStart(input, "false", 1, Token::FALSE_LITERAL)) return;
@@ -904,41 +900,40 @@ void KeywordMatcher::Step(unibrow::uchar input) {
break;
case IMP:
if (MatchKeywordStart(input, "implements", 3,
- Token::FUTURE_RESERVED_WORD )) return;
+ Token::FUTURE_STRICT_RESERVED_WORD )) return;
if (MatchKeywordStart(input, "import", 3,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_RESERVED_WORD)) return;
break;
case IN:
token_ = Token::IDENTIFIER;
if (MatchKeywordStart(input, "interface", 2,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_STRICT_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "instanceof", 2, Token::INSTANCEOF)) return;
break;
case N:
- if (MatchKeywordStart(input, "native", 1, Token::NATIVE)) return;
if (MatchKeywordStart(input, "new", 1, Token::NEW)) return;
if (MatchKeywordStart(input, "null", 1, Token::NULL_LITERAL)) return;
break;
case P:
if (MatchKeywordStart(input, "package", 1,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_STRICT_RESERVED_WORD)) return;
if (MatchState(input, 'r', PR)) return;
if (MatchKeywordStart(input, "public", 1,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_STRICT_RESERVED_WORD)) return;
break;
case PR:
if (MatchKeywordStart(input, "private", 2,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_STRICT_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "protected", 2,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_STRICT_RESERVED_WORD)) return;
break;
case S:
if (MatchKeywordStart(input, "static", 1,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_STRICT_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "super", 1,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "switch", 1,
- Token::SWITCH)) return;
+ Token::SWITCH)) return;
break;
case T:
if (MatchState(input, 'h', TH)) return;
diff --git a/deps/v8/src/scanner-base.h b/deps/v8/src/scanner-base.h
index f5fe7f7ce..3d67d4e1e 100644
--- a/deps/v8/src/scanner-base.h
+++ b/deps/v8/src/scanner-base.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,14 +30,13 @@
#ifndef V8_SCANNER_BASE_H_
#define V8_SCANNER_BASE_H_
-#include "globals.h"
-#include "checks.h"
#include "allocation.h"
+#include "char-predicates.h"
+#include "checks.h"
+#include "globals.h"
#include "token.h"
#include "unicode-inl.h"
-#include "char-predicates.h"
#include "utils.h"
-#include "list-inl.h"
namespace v8 {
namespace internal {
@@ -119,28 +118,34 @@ class UC16CharacterStream {
};
+class UnicodeCache {
// ---------------------------------------------------------------------
-// Constants used by scanners.
-
-class ScannerConstants : AllStatic {
+// Caching predicates used by scanners.
public:
+ UnicodeCache() {}
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
- static StaticResource<Utf8Decoder>* utf8_decoder() {
+ StaticResource<Utf8Decoder>* utf8_decoder() {
return &utf8_decoder_;
}
- static unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
- static unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
- static unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
- static unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
-
- static bool IsIdentifier(unibrow::CharacterStream* buffer);
+ bool IsIdentifierStart(unibrow::uchar c) { return kIsIdentifierStart.get(c); }
+ bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
+ bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
+ bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
private:
- static StaticResource<Utf8Decoder> utf8_decoder_;
+
+ unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
+ unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
+ unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
+ unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
+ StaticResource<Utf8Decoder> utf8_decoder_;
+
+ DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
};
+
// ----------------------------------------------------------------------------
// LiteralBuffer - Collector of chars of literals.
@@ -238,6 +243,8 @@ class LiteralBuffer {
bool is_ascii_;
int position_;
Vector<byte> backing_store_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
};
@@ -263,7 +270,7 @@ class Scanner {
bool complete_;
};
- Scanner();
+ explicit Scanner(UnicodeCache* scanner_contants);
// Returns the current token again.
Token::Value current_token() { return current_.token; }
@@ -279,23 +286,17 @@ class Scanner {
return beg_pos >= 0 && end_pos >= beg_pos;
}
+ static Location invalid() { return Location(-1, -1); }
+
int beg_pos;
int end_pos;
};
- static Location NoLocation() {
- return Location(-1, -1);
- }
-
// Returns the location information for the current token
// (the token returned by Next()).
Location location() const { return current_.location; }
Location peek_location() const { return next_.location; }
- // Returns the location of the last seen octal literal
- int octal_position() const { return octal_pos_; }
- void clear_octal_position() { octal_pos_ = -1; }
-
// Returns the literal string, if any, for the current token (the
// token returned by Next()). The string is 0-terminated and in
// UTF-8 format; they may contain 0-characters. Literal strings are
@@ -319,6 +320,16 @@ class Scanner {
return current_.literal_chars->length();
}
+ bool literal_contains_escapes() const {
+ Location location = current_.location;
+ int source_length = (location.end_pos - location.beg_pos);
+ if (current_.token == Token::STRING) {
+ // Subtract delimiters.
+ source_length -= 2;
+ }
+ return current_.literal_chars->length() != source_length;
+ }
+
// Returns the literal string for the next token (the token that
// would be returned if Next() were called).
bool is_next_literal_ascii() {
@@ -410,14 +421,13 @@ class Scanner {
uc32 ScanHexEscape(uc32 c, int length);
- // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
- uc32 ScanOctalEscape(uc32 c, int length);
-
// Return the current source position.
int source_pos() {
return source_->pos() - kCharacterLookaheadBufferSize;
}
+ UnicodeCache* unicode_cache_;
+
// Buffers collecting literal strings, numbers, etc.
LiteralBuffer literal_buffer1_;
LiteralBuffer literal_buffer2_;
@@ -428,9 +438,6 @@ class Scanner {
// Input stream. Must be initialized to an UC16CharacterStream.
UC16CharacterStream* source_;
- // Start position of the octal literal last scanned.
- int octal_pos_;
-
// One Unicode character look-ahead; c0_ < 0 at the end of the input.
uc32 c0_;
};
@@ -462,14 +469,18 @@ class JavaScriptScanner : public Scanner {
bool complete_;
};
- JavaScriptScanner();
+ explicit JavaScriptScanner(UnicodeCache* scanner_contants);
+
+ void Initialize(UC16CharacterStream* source);
// Returns the next token.
Token::Value Next();
- // Returns true if there was a line terminator before the peek'ed token.
- bool has_line_terminator_before_next() const {
- return has_line_terminator_before_next_;
+ // Returns true if there was a line terminator before the peek'ed token,
+ // possibly inside a multi-line comment.
+ bool HasAnyLineTerminatorBeforeNext() const {
+ return has_line_terminator_before_next_ ||
+ has_multiline_comment_before_next_;
}
// Scans the input as a regular expression pattern, previous
@@ -483,6 +494,13 @@ class JavaScriptScanner : public Scanner {
// Used for checking if a property name is an identifier.
static bool IsIdentifier(unibrow::CharacterStream* buffer);
+ // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+ uc32 ScanOctalEscape(uc32 c, int length);
+
+ // Returns the location of the last seen octal literal
+ Location octal_position() const { return octal_pos_; }
+ void clear_octal_position() { octal_pos_ = Location::invalid(); }
+
// Seek forward to the given position. This operation does not
// work in general, for instance when there are pushed back
// characters, but works for seeking forward until simple delimiter
@@ -512,7 +530,16 @@ class JavaScriptScanner : public Scanner {
// If the escape sequence cannot be decoded the result is kBadChar.
uc32 ScanIdentifierUnicodeEscape();
+ // Start position of the octal literal last scanned.
+ Location octal_pos_;
+
+ // Whether there is a line terminator whitespace character after
+ // the current token, and before the next. Does not count newlines
+ // inside multiline comments.
bool has_line_terminator_before_next_;
+ // Whether there is a multi-line comment that contains a
+ // line-terminator after the current token, and before the next.
+ bool has_multiline_comment_before_next_;
};
@@ -522,14 +549,26 @@ class JavaScriptScanner : public Scanner {
class KeywordMatcher {
// Incrementally recognize keywords.
//
-// Recognized keywords:
-// break case catch const* continue debugger* default delete do else
-// finally false for function if in instanceof native* new null
-// return switch this throw true try typeof var void while with
+// We distinguish between normal future reserved words and words that are
+// considered to be future reserved words only in strict mode as required by
+// ECMA-262 7.6.1.2.
+//
+// Recognized as keywords:
+// break, case, catch, const*, continue, debugger, default, delete, do,
+// else, finally, false, for, function, if, in, instanceof, new, null,
+// return, switch, this, throw, true, try, typeof, var, void, while, with.
+//
+// Recognized as Future Reserved Keywords:
+// class, enum, export, extends, import, super.
+//
+// Recognized as Future Reserved Keywords (strict mode only):
+// implements, interface, let, package, private, protected, public,
+// static, yield.
+//
+// *: Actually a "future reserved keyword". It's the only one we are
+// recognizing outside of ES5 strict mode, the remaining are allowed
+// as identifiers.
//
-// *: Actually "future reserved keywords". These are the only ones we
-// recognize, the remaining are allowed as identifiers.
-// In ES5 strict mode, we should disallow all reserved keywords.
public:
KeywordMatcher()
: state_(INITIAL),
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index d54d9f91f..5919073cd 100755
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -324,263 +324,4 @@ void Scanner::LiteralScope::Complete() {
complete_ = true;
}
-
-// ----------------------------------------------------------------------------
-// V8JavaScriptScanner
-
-V8JavaScriptScanner::V8JavaScriptScanner() : JavaScriptScanner() { }
-
-
-void V8JavaScriptScanner::Initialize(UC16CharacterStream* source) {
- source_ = source;
- // Need to capture identifiers in order to recognize "get" and "set"
- // in object literals.
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- SkipWhiteSpace();
- Scan();
-}
-
-
-// ----------------------------------------------------------------------------
-// JsonScanner
-
-JsonScanner::JsonScanner() : Scanner() { }
-
-
-void JsonScanner::Initialize(UC16CharacterStream* source) {
- source_ = source;
- Init();
- // Skip initial whitespace.
- SkipJsonWhiteSpace();
- // Preload first token as look-ahead.
- ScanJson();
-}
-
-
-Token::Value JsonScanner::Next() {
- // BUG 1215673: Find a thread safe way to set a stack limit in
- // pre-parse mode. Otherwise, we cannot safely pre-parse from other
- // threads.
- current_ = next_;
- // Check for stack-overflow before returning any tokens.
- ScanJson();
- return current_.token;
-}
-
-
-bool JsonScanner::SkipJsonWhiteSpace() {
- int start_position = source_pos();
- // JSON WhiteSpace is tab, carrige-return, newline and space.
- while (c0_ == ' ' || c0_ == '\n' || c0_ == '\r' || c0_ == '\t') {
- Advance();
- }
- return source_pos() != start_position;
-}
-
-
-void JsonScanner::ScanJson() {
- next_.literal_chars = NULL;
- Token::Value token;
- do {
- // Remember the position of the next token
- next_.location.beg_pos = source_pos();
- switch (c0_) {
- case '\t':
- case '\r':
- case '\n':
- case ' ':
- Advance();
- token = Token::WHITESPACE;
- break;
- case '{':
- Advance();
- token = Token::LBRACE;
- break;
- case '}':
- Advance();
- token = Token::RBRACE;
- break;
- case '[':
- Advance();
- token = Token::LBRACK;
- break;
- case ']':
- Advance();
- token = Token::RBRACK;
- break;
- case ':':
- Advance();
- token = Token::COLON;
- break;
- case ',':
- Advance();
- token = Token::COMMA;
- break;
- case '"':
- token = ScanJsonString();
- break;
- case '-':
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- token = ScanJsonNumber();
- break;
- case 't':
- token = ScanJsonIdentifier("true", Token::TRUE_LITERAL);
- break;
- case 'f':
- token = ScanJsonIdentifier("false", Token::FALSE_LITERAL);
- break;
- case 'n':
- token = ScanJsonIdentifier("null", Token::NULL_LITERAL);
- break;
- default:
- if (c0_ < 0) {
- Advance();
- token = Token::EOS;
- } else {
- Advance();
- token = Select(Token::ILLEGAL);
- }
- }
- } while (token == Token::WHITESPACE);
-
- next_.location.end_pos = source_pos();
- next_.token = token;
-}
-
-
-Token::Value JsonScanner::ScanJsonString() {
- ASSERT_EQ('"', c0_);
- Advance();
- LiteralScope literal(this);
- while (c0_ != '"') {
- // Check for control character (0x00-0x1f) or unterminated string (<0).
- if (c0_ < 0x20) return Token::ILLEGAL;
- if (c0_ != '\\') {
- AddLiteralCharAdvance();
- } else {
- Advance();
- switch (c0_) {
- case '"':
- case '\\':
- case '/':
- AddLiteralChar(c0_);
- break;
- case 'b':
- AddLiteralChar('\x08');
- break;
- case 'f':
- AddLiteralChar('\x0c');
- break;
- case 'n':
- AddLiteralChar('\x0a');
- break;
- case 'r':
- AddLiteralChar('\x0d');
- break;
- case 't':
- AddLiteralChar('\x09');
- break;
- case 'u': {
- uc32 value = 0;
- for (int i = 0; i < 4; i++) {
- Advance();
- int digit = HexValue(c0_);
- if (digit < 0) {
- return Token::ILLEGAL;
- }
- value = value * 16 + digit;
- }
- AddLiteralChar(value);
- break;
- }
- default:
- return Token::ILLEGAL;
- }
- Advance();
- }
- }
- literal.Complete();
- Advance();
- return Token::STRING;
-}
-
-
-Token::Value JsonScanner::ScanJsonNumber() {
- LiteralScope literal(this);
- bool negative = false;
-
- if (c0_ == '-') {
- AddLiteralCharAdvance();
- negative = true;
- }
- if (c0_ == '0') {
- AddLiteralCharAdvance();
- // Prefix zero is only allowed if it's the only digit before
- // a decimal point or exponent.
- if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL;
- } else {
- int i = 0;
- int digits = 0;
- if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL;
- do {
- i = i * 10 + c0_ - '0';
- digits++;
- AddLiteralCharAdvance();
- } while (c0_ >= '0' && c0_ <= '9');
- if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
- number_ = (negative ? -i : i);
- return Token::NUMBER;
- }
- }
- if (c0_ == '.') {
- AddLiteralCharAdvance();
- if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
- do {
- AddLiteralCharAdvance();
- } while (c0_ >= '0' && c0_ <= '9');
- }
- if (AsciiAlphaToLower(c0_) == 'e') {
- AddLiteralCharAdvance();
- if (c0_ == '-' || c0_ == '+') AddLiteralCharAdvance();
- if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
- do {
- AddLiteralCharAdvance();
- } while (c0_ >= '0' && c0_ <= '9');
- }
- literal.Complete();
- ASSERT_NOT_NULL(next_.literal_chars);
- number_ = StringToDouble(next_.literal_chars->ascii_literal(),
- NO_FLAGS, // Hex, octal or trailing junk.
- OS::nan_value());
- return Token::NUMBER;
-}
-
-
-Token::Value JsonScanner::ScanJsonIdentifier(const char* text,
- Token::Value token) {
- LiteralScope literal(this);
- while (*text != '\0') {
- if (c0_ != *text) return Token::ILLEGAL;
- Advance();
- text++;
- }
- if (ScannerConstants::kIsIdentifierPart.get(c0_)) return Token::ILLEGAL;
- literal.Complete();
- return token;
-}
-
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index cf2084f55..e66dd60d8 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -126,69 +126,6 @@ class ExternalTwoByteStringUC16CharacterStream: public UC16CharacterStream {
const uc16* raw_data_; // Pointer to the actual array of characters.
};
-
-// ----------------------------------------------------------------------------
-// V8JavaScriptScanner
-// JavaScript scanner getting its input from either a V8 String or a unicode
-// CharacterStream.
-
-class V8JavaScriptScanner : public JavaScriptScanner {
- public:
- V8JavaScriptScanner();
- void Initialize(UC16CharacterStream* source);
-};
-
-
-class JsonScanner : public Scanner {
- public:
- JsonScanner();
-
- void Initialize(UC16CharacterStream* source);
-
- // Returns the next token.
- Token::Value Next();
-
- // Returns the value of a number token.
- double number() {
- return number_;
- }
-
-
- protected:
- // Skip past JSON whitespace (only space, tab, newline and carrige-return).
- bool SkipJsonWhiteSpace();
-
- // Scan a single JSON token. The JSON lexical grammar is specified in the
- // ECMAScript 5 standard, section 15.12.1.1.
- // Recognizes all of the single-character tokens directly, or calls a function
- // to scan a number, string or identifier literal.
- // The only allowed whitespace characters between tokens are tab,
- // carriage-return, newline and space.
- void ScanJson();
-
- // A JSON number (production JSONNumber) is a subset of the valid JavaScript
- // decimal number literals.
- // It includes an optional minus sign, must have at least one
- // digit before and after a decimal point, may not have prefixed zeros (unless
- // the integer part is zero), and may include an exponent part (e.g., "e-10").
- // Hexadecimal and octal numbers are not allowed.
- Token::Value ScanJsonNumber();
-
- // A JSON string (production JSONString) is subset of valid JavaScript string
- // literals. The string must only be double-quoted (not single-quoted), and
- // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
- // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
- Token::Value ScanJsonString();
-
- // Used to recognizes one of the literals "true", "false", or "null". These
- // are the only valid JSON identifiers (productions JSONBooleanLiteral,
- // JSONNullLiteral).
- Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
-
- // Holds the value of a scanned number token.
- double number_;
-};
-
} } // namespace v8::internal
#endif // V8_SCANNER_H_
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index e06235af7..ccc2cc820 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -50,8 +50,9 @@ static int CompareLocal(Variable* const* v, Variable* const* w) {
template<class Allocator>
ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
- : function_name_(Factory::empty_symbol()),
+ : function_name_(FACTORY->empty_symbol()),
calls_eval_(scope->calls_eval()),
+ is_strict_mode_(scope->is_strict_mode()),
parameters_(scope->num_parameters()),
stack_slots_(scope->num_stack_slots()),
context_slots_(scope->num_heap_slots()),
@@ -141,7 +142,7 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
context_slots_.length());
ASSERT(var->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
context_modes_.length());
- context_slots_.Add(Factory::empty_symbol());
+ context_slots_.Add(FACTORY->empty_symbol());
context_modes_.Add(Variable::INTERNAL);
}
}
@@ -238,7 +239,7 @@ static Object** ReadList(Object** p,
template<class Allocator>
ScopeInfo<Allocator>::ScopeInfo(SerializedScopeInfo* data)
- : function_name_(Factory::empty_symbol()),
+ : function_name_(FACTORY->empty_symbol()),
parameters_(4),
stack_slots_(8),
context_slots_(8),
@@ -248,6 +249,7 @@ ScopeInfo<Allocator>::ScopeInfo(SerializedScopeInfo* data)
Object** p = p0;
p = ReadSymbol(p, &function_name_);
p = ReadBool(p, &calls_eval_);
+ p = ReadBool(p, &is_strict_mode_);
p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
p = ReadList<Allocator>(p, &parameters_);
p = ReadList<Allocator>(p, &stack_slots_);
@@ -301,21 +303,22 @@ static Object** WriteList(Object** p,
template<class Allocator>
Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
- // function name, calls eval, length for 3 tables:
- const int extra_slots = 1 + 1 + 3;
+ // function name, calls eval, is_strict_mode, length for 3 tables:
+ const int extra_slots = 1 + 1 + 1 + 3;
int length = extra_slots +
context_slots_.length() * 2 +
parameters_.length() +
stack_slots_.length();
Handle<SerializedScopeInfo> data(
- SerializedScopeInfo::cast(*Factory::NewFixedArray(length, TENURED)));
+ SerializedScopeInfo::cast(*FACTORY->NewFixedArray(length, TENURED)));
AssertNoAllocation nogc;
Object** p0 = data->data_start();
Object** p = p0;
p = WriteSymbol(p, function_name_);
p = WriteBool(p, calls_eval_);
+ p = WriteBool(p, is_strict_mode_);
p = WriteList(p, &context_slots_, &context_modes_);
p = WriteList(p, &parameters_);
p = WriteList(p, &stack_slots_);
@@ -357,13 +360,14 @@ Handle<SerializedScopeInfo> SerializedScopeInfo::Create(Scope* scope) {
SerializedScopeInfo* SerializedScopeInfo::Empty() {
- return reinterpret_cast<SerializedScopeInfo*>(Heap::empty_fixed_array());
+ return reinterpret_cast<SerializedScopeInfo*>(HEAP->empty_fixed_array());
}
Object** SerializedScopeInfo::ContextEntriesAddr() {
ASSERT(length() > 0);
- return data_start() + 2; // +2 for function name and calls eval.
+ // +3 for function name, calls eval, strict mode.
+ return data_start() + 3;
}
@@ -392,7 +396,18 @@ bool SerializedScopeInfo::CallsEval() {
p = ReadBool(p, &calls_eval);
return calls_eval;
}
- return true;
+ return false;
+}
+
+
+bool SerializedScopeInfo::IsStrictMode() {
+ if (length() > 0) {
+ Object** p = data_start() + 2; // +2 for function name, calls eval.
+ bool strict_mode;
+ p = ReadBool(p, &strict_mode);
+ return strict_mode;
+ }
+ return false;
}
@@ -448,7 +463,8 @@ int SerializedScopeInfo::StackSlotIndex(String* name) {
int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) {
ASSERT(name->IsSymbol());
- int result = ContextSlotCache::Lookup(this, name, mode);
+ Isolate* isolate = GetIsolate();
+ int result = isolate->context_slot_cache()->Lookup(this, name, mode);
if (result != ContextSlotCache::kNotFound) return result;
if (length() > 0) {
// Slots start after length entry.
@@ -465,13 +481,13 @@ int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) {
Variable::Mode mode_value = static_cast<Variable::Mode>(v);
if (mode != NULL) *mode = mode_value;
result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
- ContextSlotCache::Update(this, name, mode_value, result);
+ isolate->context_slot_cache()->Update(this, name, mode_value, result);
return result;
}
p += 2;
}
}
- ContextSlotCache::Update(this, name, Variable::INTERNAL, -1);
+ isolate->context_slot_cache()->Update(this, name, Variable::INTERNAL, -1);
return -1;
}
@@ -547,7 +563,7 @@ void ContextSlotCache::Update(Object* data,
int slot_index) {
String* symbol;
ASSERT(slot_index > kNotFound);
- if (Heap::LookupSymbolIfExists(name, &symbol)) {
+ if (HEAP->LookupSymbolIfExists(name, &symbol)) {
int index = Hash(data, symbol);
Key& key = keys_[index];
key.data = data;
@@ -566,12 +582,6 @@ void ContextSlotCache::Clear() {
}
-ContextSlotCache::Key ContextSlotCache::keys_[ContextSlotCache::kLength];
-
-
-uint32_t ContextSlotCache::values_[ContextSlotCache::kLength];
-
-
#ifdef DEBUG
void ContextSlotCache::ValidateEntry(Object* data,
@@ -579,7 +589,7 @@ void ContextSlotCache::ValidateEntry(Object* data,
Variable::Mode mode,
int slot_index) {
String* symbol;
- if (Heap::LookupSymbolIfExists(name, &symbol)) {
+ if (HEAP->LookupSymbolIfExists(name, &symbol)) {
int index = Hash(data, name);
Key& key = keys_[index];
ASSERT(key.data == data);
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index dd49a4e08..86c33f61f 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,7 @@
#ifndef V8_SCOPEINFO_H_
#define V8_SCOPEINFO_H_
+#include "allocation.h"
#include "variables.h"
#include "zone-inl.h"
@@ -92,6 +93,7 @@ class ScopeInfo BASE_EMBEDDED {
private:
Handle<String> function_name_;
bool calls_eval_;
+ bool is_strict_mode_;
List<Handle<String>, Allocator > parameters_;
List<Handle<String>, Allocator > stack_slots_;
List<Handle<String>, Allocator > context_slots_;
@@ -112,10 +114,8 @@ class SerializedScopeInfo : public FixedArray {
// Does this scope call eval?
bool CallsEval();
- // Does this scope have an arguments shadow?
- bool HasArgumentsShadow() {
- return StackSlotIndex(Heap::arguments_shadow_symbol()) >= 0;
- }
+ // Is this scope a strict mode scope?
+ bool IsStrictMode();
// Return the number of stack slots for code.
int NumberOfStackSlots();
@@ -173,28 +173,36 @@ class ContextSlotCache {
public:
// Lookup context slot index for (data, name).
// If absent, kNotFound is returned.
- static int Lookup(Object* data,
- String* name,
- Variable::Mode* mode);
+ int Lookup(Object* data,
+ String* name,
+ Variable::Mode* mode);
// Update an element in the cache.
- static void Update(Object* data,
- String* name,
- Variable::Mode mode,
- int slot_index);
+ void Update(Object* data,
+ String* name,
+ Variable::Mode mode,
+ int slot_index);
// Clear the cache.
- static void Clear();
+ void Clear();
static const int kNotFound = -2;
private:
+ ContextSlotCache() {
+ for (int i = 0; i < kLength; ++i) {
+ keys_[i].data = NULL;
+ keys_[i].name = NULL;
+ values_[i] = kNotFound;
+ }
+ }
+
inline static int Hash(Object* data, String* name);
#ifdef DEBUG
- static void ValidateEntry(Object* data,
- String* name,
- Variable::Mode mode,
- int slot_index);
+ void ValidateEntry(Object* data,
+ String* name,
+ Variable::Mode mode,
+ int slot_index);
#endif
static const int kLength = 256;
@@ -212,7 +220,7 @@ class ContextSlotCache {
ASSERT(index == this->index());
}
- inline Value(uint32_t value) : value_(value) {}
+ explicit inline Value(uint32_t value) : value_(value) {}
uint32_t raw() { return value_; }
@@ -228,8 +236,11 @@ class ContextSlotCache {
uint32_t value_;
};
- static Key keys_[kLength];
- static uint32_t values_[kLength];
+ Key keys_[kLength];
+ uint32_t values_[kLength];
+
+ friend class Isolate;
+ DISALLOW_COPY_AND_ASSIGN(ContextSlotCache);
};
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 6a720f5f0..74d0c2a2e 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,12 +40,14 @@ namespace internal {
// ----------------------------------------------------------------------------
// A Zone allocator for use with LocalsMap.
+// TODO(isolates): It is probably worth it to change the Allocator class to
+// take a pointer to an isolate.
class ZoneAllocator: public Allocator {
public:
/* nothing to do */
virtual ~ZoneAllocator() {}
- virtual void* New(size_t size) { return Zone::New(static_cast<int>(size)); }
+ virtual void* New(size_t size) { return ZONE->New(static_cast<int>(size)); }
/* ignored - Zone is freed in one fell swoop */
virtual void Delete(void* p) {}
@@ -155,24 +157,6 @@ Scope::Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info)
}
AddInnerScope(inner_scope);
-
- // This scope's arguments shadow (if present) is context-allocated if an inner
- // scope accesses this one's parameters. Allocate the arguments_shadow_
- // variable if necessary.
- Variable::Mode mode;
- int arguments_shadow_index =
- scope_info_->ContextSlotIndex(Heap::arguments_shadow_symbol(), &mode);
- if (arguments_shadow_index >= 0) {
- ASSERT(mode == Variable::INTERNAL);
- arguments_shadow_ = new Variable(this,
- Factory::arguments_shadow_symbol(),
- Variable::INTERNAL,
- true,
- Variable::ARGUMENTS);
- arguments_shadow_->set_rewrite(
- new Slot(arguments_shadow_, Slot::CONTEXT, arguments_shadow_index));
- arguments_shadow_->set_is_used(true);
- }
}
@@ -181,20 +165,23 @@ void Scope::SetDefaults(Type type,
Handle<SerializedScopeInfo> scope_info) {
outer_scope_ = outer_scope;
type_ = type;
- scope_name_ = Factory::empty_symbol();
+ scope_name_ = FACTORY->empty_symbol();
dynamics_ = NULL;
receiver_ = NULL;
function_ = NULL;
arguments_ = NULL;
- arguments_shadow_ = NULL;
illegal_redecl_ = NULL;
scope_inside_with_ = false;
scope_contains_with_ = false;
scope_calls_eval_ = false;
+ // Inherit the strict mode from the parent scope.
+ strict_mode_ = (outer_scope != NULL) && outer_scope->strict_mode_;
outer_scope_calls_eval_ = false;
+ outer_scope_calls_non_strict_eval_ = false;
inner_scope_calls_eval_ = false;
outer_scope_is_eval_scope_ = false;
force_eager_compilation_ = false;
+ num_var_or_const_ = 0;
num_stack_slots_ = 0;
num_heap_slots_ = 0;
scope_info_ = scope_info;
@@ -238,7 +225,7 @@ bool Scope::Analyze(CompilationInfo* info) {
top->AllocateVariables(info->calling_context());
#ifdef DEBUG
- if (Bootstrapper::IsActive()
+ if (info->isolate()->bootstrapper()->IsActive()
? FLAG_print_builtin_scopes
: FLAG_print_scopes) {
info->function()->scope()->Print();
@@ -270,7 +257,7 @@ void Scope::Initialize(bool inside_with) {
// such parameter is 'this' which is passed on the stack when
// invoking scripts
Variable* var =
- variables_.Declare(this, Factory::this_symbol(), Variable::VAR,
+ variables_.Declare(this, FACTORY->this_symbol(), Variable::VAR,
false, Variable::THIS);
var->set_rewrite(new Slot(var, Slot::PARAMETER, -1));
receiver_ = var;
@@ -279,7 +266,7 @@ void Scope::Initialize(bool inside_with) {
// Declare 'arguments' variable which exists in all functions.
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
- variables_.Declare(this, Factory::arguments_symbol(), Variable::VAR,
+ variables_.Declare(this, FACTORY->arguments_symbol(), Variable::VAR,
true, Variable::ARGUMENTS);
}
}
@@ -290,52 +277,33 @@ Variable* Scope::LocalLookup(Handle<String> name) {
if (result != NULL || !resolved()) {
return result;
}
- // If the scope is resolved, we can find a variable in serialized scope info.
-
- // We should never lookup 'arguments' in this scope
- // as it is implicitly present in any scope.
- ASSERT(*name != *Factory::arguments_symbol());
-
- // Assert that there is no local slot with the given name.
+ // If the scope is resolved, we can find a variable in serialized scope
+ // info.
+ //
+ // We should never lookup 'arguments' in this scope as it is implicitly
+ // present in every scope.
+ ASSERT(*name != *FACTORY->arguments_symbol());
+ // There should be no local slot with the given name.
ASSERT(scope_info_->StackSlotIndex(*name) < 0);
// Check context slot lookup.
Variable::Mode mode;
int index = scope_info_->ContextSlotIndex(*name, &mode);
- if (index >= 0) {
- Variable* var =
- variables_.Declare(this, name, mode, true, Variable::NORMAL);
- var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
- return var;
- }
-
- index = scope_info_->ParameterIndex(*name);
- if (index >= 0) {
- // ".arguments" must be present in context slots.
- ASSERT(arguments_shadow_ != NULL);
- Variable* var =
- variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
- Property* rewrite =
- new Property(new VariableProxy(arguments_shadow_),
- new Literal(Handle<Object>(Smi::FromInt(index))),
- RelocInfo::kNoPosition,
- Property::SYNTHETIC);
- rewrite->set_is_arguments_access(true);
- var->set_rewrite(rewrite);
- return var;
- }
-
- index = scope_info_->FunctionContextSlotIndex(*name);
- if (index >= 0) {
- // Check that there is no local slot with the given name.
- ASSERT(scope_info_->StackSlotIndex(*name) < 0);
- Variable* var =
- variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
- var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
- return var;
+ if (index < 0) {
+ // Check parameters.
+ mode = Variable::VAR;
+ index = scope_info_->ParameterIndex(*name);
+ if (index < 0) {
+ // Check the function name.
+ index = scope_info_->FunctionContextSlotIndex(*name);
+ if (index < 0) return NULL;
+ }
}
- return NULL;
+ Variable* var =
+ variables_.Declare(this, name, mode, true, Variable::NORMAL);
+ var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
+ return var;
}
@@ -357,12 +325,22 @@ Variable* Scope::DeclareFunctionVar(Handle<String> name) {
}
+void Scope::DeclareParameter(Handle<String> name) {
+ ASSERT(!resolved());
+ ASSERT(is_function_scope());
+ Variable* var =
+ variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
+ params_.Add(var);
+}
+
+
Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
- // DYNAMIC variables are introduces during variable allocation,
- // INTERNAL variables are allocated explicitly, and TEMPORARY
- // variables are allocated via NewTemporary().
ASSERT(!resolved());
+ // This function handles VAR and CONST modes. DYNAMIC variables are
+ // introduces during variable allocation, INTERNAL variables are allocated
+ // explicitly, and TEMPORARY variables are allocated via NewTemporary().
ASSERT(mode == Variable::VAR || mode == Variable::CONST);
+ ++num_var_or_const_;
return variables_.Declare(this, name, mode, true, Variable::NORMAL);
}
@@ -374,19 +352,14 @@ Variable* Scope::DeclareGlobal(Handle<String> name) {
}
-void Scope::AddParameter(Variable* var) {
- ASSERT(is_function_scope());
- ASSERT(LocalLookup(var->name()) == var);
- params_.Add(var);
-}
-
-
-VariableProxy* Scope::NewUnresolved(Handle<String> name, bool inside_with) {
+VariableProxy* Scope::NewUnresolved(Handle<String> name,
+ bool inside_with,
+ int position) {
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
ASSERT(!resolved());
- VariableProxy* proxy = new VariableProxy(name, false, inside_with);
+ VariableProxy* proxy = new VariableProxy(name, false, inside_with, position);
unresolved_.Add(proxy);
return proxy;
}
@@ -474,8 +447,17 @@ void Scope::AllocateVariables(Handle<Context> context) {
// and assume they may invoke eval themselves. Eventually we could capture
// this information in the ScopeInfo and then use it here (by traversing
// the call chain stack, at compile time).
+
bool eval_scope = is_eval_scope();
- PropagateScopeInfo(eval_scope, eval_scope);
+ bool outer_scope_calls_eval = false;
+ bool outer_scope_calls_non_strict_eval = false;
+ if (!is_global_scope()) {
+ context->ComputeEvalScopeInfo(&outer_scope_calls_eval,
+ &outer_scope_calls_non_strict_eval);
+ }
+ PropagateScopeInfo(outer_scope_calls_eval,
+ outer_scope_calls_non_strict_eval,
+ eval_scope);
// 2) Resolve variables.
Scope* global_scope = NULL;
@@ -607,10 +589,14 @@ void Scope::Print(int n) {
if (HasTrivialOuterContext()) {
Indent(n1, "// scope has trivial outer context\n");
}
+ if (is_strict_mode()) Indent(n1, "// strict mode scope\n");
if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
if (outer_scope_calls_eval_) Indent(n1, "// outer scope calls 'eval'\n");
+ if (outer_scope_calls_non_strict_eval_) {
+ Indent(n1, "// outer scope calls 'eval' in non-strict context\n");
+ }
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
if (outer_scope_is_eval_scope_) {
Indent(n1, "// outer scope is 'eval' scope\n");
@@ -837,20 +823,30 @@ void Scope::ResolveVariablesRecursively(Scope* global_scope,
bool Scope::PropagateScopeInfo(bool outer_scope_calls_eval,
+ bool outer_scope_calls_non_strict_eval,
bool outer_scope_is_eval_scope) {
if (outer_scope_calls_eval) {
outer_scope_calls_eval_ = true;
}
+ if (outer_scope_calls_non_strict_eval) {
+ outer_scope_calls_non_strict_eval_ = true;
+ }
+
if (outer_scope_is_eval_scope) {
outer_scope_is_eval_scope_ = true;
}
bool calls_eval = scope_calls_eval_ || outer_scope_calls_eval_;
bool is_eval = is_eval_scope() || outer_scope_is_eval_scope_;
+ bool calls_non_strict_eval =
+ (scope_calls_eval_ && !is_strict_mode()) ||
+ outer_scope_calls_non_strict_eval_;
for (int i = 0; i < inner_scopes_.length(); i++) {
Scope* inner_scope = inner_scopes_[i];
- if (inner_scope->PropagateScopeInfo(calls_eval, is_eval)) {
+ if (inner_scope->PropagateScopeInfo(calls_eval,
+ calls_non_strict_eval,
+ is_eval)) {
inner_scope_calls_eval_ = true;
}
if (inner_scope->force_eager_compilation_) {
@@ -893,7 +889,7 @@ bool Scope::MustAllocateInContext(Variable* var) {
bool Scope::HasArgumentsParameter() {
for (int i = 0; i < params_.length(); i++) {
- if (params_[i]->name().is_identical_to(Factory::arguments_symbol()))
+ if (params_[i]->name().is_identical_to(FACTORY->arguments_symbol()))
return true;
}
return false;
@@ -912,104 +908,53 @@ void Scope::AllocateHeapSlot(Variable* var) {
void Scope::AllocateParameterLocals() {
ASSERT(is_function_scope());
- Variable* arguments = LocalLookup(Factory::arguments_symbol());
+ Variable* arguments = LocalLookup(FACTORY->arguments_symbol());
ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly
+
+ bool uses_nonstrict_arguments = false;
+
if (MustAllocate(arguments) && !HasArgumentsParameter()) {
// 'arguments' is used. Unless there is also a parameter called
- // 'arguments', we must be conservative and access all parameters via
- // the arguments object: The i'th parameter is rewritten into
- // '.arguments[i]' (*). If we have a parameter named 'arguments', a
- // (new) value is always assigned to it via the function
- // invocation. Then 'arguments' denotes that specific parameter value
- // and cannot be used to access the parameters, which is why we don't
- // need to rewrite in that case.
- //
- // (*) Instead of having a parameter called 'arguments', we may have an
- // assignment to 'arguments' in the function body, at some arbitrary
- // point in time (possibly through an 'eval()' call!). After that
- // assignment any re-write of parameters would be invalid (was bug
- // 881452). Thus, we introduce a shadow '.arguments'
- // variable which also points to the arguments object. For rewrites we
- // use '.arguments' which remains valid even if we assign to
- // 'arguments'. To summarize: If we need to rewrite, we allocate an
- // 'arguments' object dynamically upon function invocation. The compiler
- // introduces 2 local variables 'arguments' and '.arguments', both of
- // which originally point to the arguments object that was
- // allocated. All parameters are rewritten into property accesses via
- // the '.arguments' variable. Thus, any changes to properties of
- // 'arguments' are reflected in the variables and vice versa. If the
- // 'arguments' variable is changed, '.arguments' still points to the
- // correct arguments object and the rewrites still work.
+ // 'arguments', we must be conservative and allocate all parameters to
+ // the context assuming they will be captured by the arguments object.
+ // If we have a parameter named 'arguments', a (new) value is always
+ // assigned to it via the function invocation. Then 'arguments' denotes
+ // that specific parameter value and cannot be used to access the
+ // parameters, which is why we don't need to allocate an arguments
+ // object in that case.
// We are using 'arguments'. Tell the code generator that is needs to
// allocate the arguments object by setting 'arguments_'.
arguments_ = arguments;
- // We also need the '.arguments' shadow variable. Declare it and create
- // and bind the corresponding proxy. It's ok to declare it only now
- // because it's a local variable that is allocated after the parameters
- // have been allocated.
- //
- // Note: This is "almost" at temporary variable but we cannot use
- // NewTemporary() because the mode needs to be INTERNAL since this
- // variable may be allocated in the heap-allocated context (temporaries
- // are never allocated in the context).
- arguments_shadow_ = new Variable(this,
- Factory::arguments_shadow_symbol(),
- Variable::INTERNAL,
- true,
- Variable::ARGUMENTS);
- arguments_shadow_->set_is_used(true);
- temps_.Add(arguments_shadow_);
-
- // Allocate the parameters by rewriting them into '.arguments[i]' accesses.
- for (int i = 0; i < params_.length(); i++) {
- Variable* var = params_[i];
- ASSERT(var->scope() == this);
- if (MustAllocate(var)) {
- if (MustAllocateInContext(var)) {
- // It is ok to set this only now, because arguments is a local
- // variable that is allocated after the parameters have been
- // allocated.
- arguments_shadow_->MarkAsAccessedFromInnerScope();
- }
- Property* rewrite =
- new Property(new VariableProxy(arguments_shadow_),
- new Literal(Handle<Object>(Smi::FromInt(i))),
- RelocInfo::kNoPosition,
- Property::SYNTHETIC);
- rewrite->set_is_arguments_access(true);
- var->set_rewrite(rewrite);
- }
+ // In strict mode 'arguments' does not alias formal parameters.
+ // Therefore in strict mode we allocate parameters as if 'arguments'
+ // were not used.
+ uses_nonstrict_arguments = !is_strict_mode();
+ }
+
+ // The same parameter may occur multiple times in the parameters_ list.
+ // If it does, and if it is not copied into the context object, it must
+ // receive the highest parameter index for that parameter; thus iteration
+ // order is relevant!
+ for (int i = params_.length() - 1; i >= 0; --i) {
+ Variable* var = params_[i];
+ ASSERT(var->scope() == this);
+ if (uses_nonstrict_arguments) {
+ // Give the parameter a use from an inner scope, to force allocation
+ // to the context.
+ var->MarkAsAccessedFromInnerScope();
}
- } else {
- // The arguments object is not used, so we can access parameters directly.
- // The same parameter may occur multiple times in the parameters_ list.
- // If it does, and if it is not copied into the context object, it must
- // receive the highest parameter index for that parameter; thus iteration
- // order is relevant!
- for (int i = 0; i < params_.length(); i++) {
- Variable* var = params_[i];
- ASSERT(var->scope() == this);
- if (MustAllocate(var)) {
- if (MustAllocateInContext(var)) {
- ASSERT(var->rewrite() == NULL ||
- (var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::CONTEXT));
- if (var->rewrite() == NULL) {
- // Only set the heap allocation if the parameter has not
- // been allocated yet.
- AllocateHeapSlot(var);
- }
- } else {
- ASSERT(var->rewrite() == NULL ||
- (var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::PARAMETER));
- // Set the parameter index always, even if the parameter
- // was seen before! (We need to access the actual parameter
- // supplied for the last occurrence of a multiply declared
- // parameter.)
+ if (MustAllocate(var)) {
+ if (MustAllocateInContext(var)) {
+ ASSERT(var->rewrite() == NULL || var->IsContextSlot());
+ if (var->rewrite() == NULL) {
+ AllocateHeapSlot(var);
+ }
+ } else {
+ ASSERT(var->rewrite() == NULL || var->IsParameter());
+ if (var->rewrite() == NULL) {
var->set_rewrite(new Slot(var, Slot::PARAMETER, i));
}
}
@@ -1021,8 +966,9 @@ void Scope::AllocateParameterLocals() {
void Scope::AllocateNonParameterLocal(Variable* var) {
ASSERT(var->scope() == this);
ASSERT(var->rewrite() == NULL ||
- (!var->IsVariable(Factory::result_symbol())) ||
- (var->AsSlot() == NULL || var->AsSlot()->type() != Slot::LOCAL));
+ !var->IsVariable(FACTORY->result_symbol()) ||
+ var->AsSlot() == NULL ||
+ var->AsSlot()->type() != Slot::LOCAL);
if (var->rewrite() == NULL && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
AllocateHeapSlot(var);
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 4a48a4c45..d4e8e2bd9 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -97,8 +97,6 @@ class Scope: public ZoneObject {
Scope(Scope* outer_scope, Type type);
- virtual ~Scope() { }
-
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
@@ -110,31 +108,31 @@ class Scope: public ZoneObject {
// The scope name is only used for printing/debugging.
void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
- virtual void Initialize(bool inside_with);
-
- // Called just before leaving a scope.
- virtual void Leave() {
- // No cleanup or fixup necessary.
- }
+ void Initialize(bool inside_with);
// ---------------------------------------------------------------------------
// Declarations
// Lookup a variable in this scope. Returns the variable or NULL if not found.
- virtual Variable* LocalLookup(Handle<String> name);
+ Variable* LocalLookup(Handle<String> name);
// Lookup a variable in this scope or outer scopes.
// Returns the variable or NULL if not found.
- virtual Variable* Lookup(Handle<String> name);
+ Variable* Lookup(Handle<String> name);
// Declare the function variable for a function literal. This variable
// is in an intermediate scope between this function scope and the the
// outer scope. Only possible for function scopes; at most one variable.
Variable* DeclareFunctionVar(Handle<String> name);
+ // Declare a parameter in this scope. When there are duplicated
+ // parameters the rightmost one 'wins'. However, the implementation
+ // expects all parameters to be declared and from left to right.
+ void DeclareParameter(Handle<String> name);
+
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
- virtual Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
+ Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
// Declare an implicit global variable in this scope which must be a
// global scope. The variable was introduced (possibly from an inner
@@ -142,14 +140,10 @@ class Scope: public ZoneObject {
// with statements or eval calls.
Variable* DeclareGlobal(Handle<String> name);
- // Add a parameter to the parameter list. The parameter must have been
- // declared via Declare. The same parameter may occur more than once in
- // the parameter list; they must be added in source order, from left to
- // right.
- void AddParameter(Variable* var);
-
// Create a new unresolved variable.
- virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with);
+ VariableProxy* NewUnresolved(Handle<String> name,
+ bool inside_with,
+ int position = RelocInfo::kNoPosition);
// Remove a unresolved variable. During parsing, an unresolved variable
// may have been added optimistically, but then only the variable name
@@ -163,7 +157,7 @@ class Scope: public ZoneObject {
// for printing and cannot be used to find the variable. In particular,
// the only way to get hold of the temporary is by keeping the Variable*
// around.
- virtual Variable* NewTemporary(Handle<String> name);
+ Variable* NewTemporary(Handle<String> name);
// Adds the specific declaration node to the list of declarations in
// this scope. The declarations are processed as part of entering
@@ -196,6 +190,10 @@ class Scope: public ZoneObject {
// Inform the scope that the corresponding code contains an eval call.
void RecordEvalCall() { scope_calls_eval_ = true; }
+ // Enable strict mode for the scope (unless disabled by a global flag).
+ void EnableStrictMode() {
+ strict_mode_ = FLAG_strict_mode;
+ }
// ---------------------------------------------------------------------------
// Predicates.
@@ -204,10 +202,17 @@ class Scope: public ZoneObject {
bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
+ bool is_strict_mode() const { return strict_mode_; }
+ bool is_strict_mode_eval_scope() const {
+ return is_eval_scope() && is_strict_mode();
+ }
// Information about which scopes calls eval.
bool calls_eval() const { return scope_calls_eval_; }
bool outer_scope_calls_eval() const { return outer_scope_calls_eval_; }
+ bool outer_scope_calls_non_strict_eval() const {
+ return outer_scope_calls_non_strict_eval_;
+ }
// Is this scope inside a with statement.
bool inside_with() const { return scope_inside_with_; }
@@ -223,7 +228,7 @@ class Scope: public ZoneObject {
// A new variable proxy corresponding to the (function) receiver.
VariableProxy* receiver() const {
VariableProxy* proxy =
- new VariableProxy(Factory::this_symbol(), true, false);
+ new VariableProxy(FACTORY->this_symbol(), true, false);
proxy->BindTo(receiver_);
return proxy;
}
@@ -246,18 +251,12 @@ class Scope: public ZoneObject {
int num_parameters() const { return params_.length(); }
// The local variable 'arguments' if we need to allocate it; NULL otherwise.
- // If arguments() exist, arguments_shadow() exists, too.
Variable* arguments() const { return arguments_; }
- // The '.arguments' shadow variable if we need to allocate it; NULL otherwise.
- // If arguments_shadow() exist, arguments() exists, too.
- Variable* arguments_shadow() const { return arguments_shadow_; }
-
// Declarations list.
ZoneList<Declaration*>* declarations() { return &decls_; }
-
// ---------------------------------------------------------------------------
// Variable allocation.
@@ -275,6 +274,9 @@ class Scope: public ZoneObject {
// cases the context parameter is an empty handle.
void AllocateVariables(Handle<Context> context);
+ // Current number of var or const locals.
+ int num_var_or_const() { return num_var_or_const_; }
+
// Result of variable allocation.
int num_stack_slots() const { return num_stack_slots_; }
int num_heap_slots() const { return num_heap_slots_; }
@@ -286,7 +288,7 @@ class Scope: public ZoneObject {
bool AllowsLazyCompilation() const;
// True if the outer context of this scope is always the global context.
- virtual bool HasTrivialOuterContext() const;
+ bool HasTrivialOuterContext() const;
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
@@ -348,8 +350,6 @@ class Scope: public ZoneObject {
Variable* function_;
// Convenience variable; function scopes only.
Variable* arguments_;
- // Convenience variable; function scopes only.
- Variable* arguments_shadow_;
// Illegal redeclaration.
Expression* illegal_redecl_;
@@ -358,13 +358,18 @@ class Scope: public ZoneObject {
bool scope_inside_with_; // this scope is inside a 'with' of some outer scope
bool scope_contains_with_; // this scope contains a 'with' statement
bool scope_calls_eval_; // this scope contains an 'eval' call
+ bool strict_mode_; // this scope is a strict mode scope
// Computed via PropagateScopeInfo.
bool outer_scope_calls_eval_;
+ bool outer_scope_calls_non_strict_eval_;
bool inner_scope_calls_eval_;
bool outer_scope_is_eval_scope_;
bool force_eager_compilation_;
+ // Computed as variables are declared.
+ int num_var_or_const_;
+
// Computed via AllocateVariables; function scopes only.
int num_stack_slots_;
int num_heap_slots_;
@@ -389,6 +394,7 @@ class Scope: public ZoneObject {
// Scope analysis.
bool PropagateScopeInfo(bool outer_scope_calls_eval,
+ bool outer_scope_calls_non_strict_eval,
bool outer_scope_is_eval_scope);
bool HasTrivialContext() const;
@@ -420,57 +426,6 @@ class Scope: public ZoneObject {
Handle<SerializedScopeInfo> scope_info);
};
-
-// Scope used during pre-parsing.
-class DummyScope : public Scope {
- public:
- DummyScope()
- : Scope(GLOBAL_SCOPE),
- nesting_level_(1), // Allows us to Leave the initial scope.
- inside_with_level_(kNotInsideWith) {
- outer_scope_ = this;
- scope_inside_with_ = false;
- }
-
- virtual void Initialize(bool inside_with) {
- nesting_level_++;
- if (inside_with && inside_with_level_ == kNotInsideWith) {
- inside_with_level_ = nesting_level_;
- }
- ASSERT(inside_with_level_ <= nesting_level_);
- }
-
- virtual void Leave() {
- nesting_level_--;
- ASSERT(nesting_level_ >= 0);
- if (nesting_level_ < inside_with_level_) {
- inside_with_level_ = kNotInsideWith;
- }
- ASSERT(inside_with_level_ <= nesting_level_);
- }
-
- virtual Variable* Lookup(Handle<String> name) { return NULL; }
-
- virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with) {
- return NULL;
- }
-
- virtual Variable* NewTemporary(Handle<String> name) { return NULL; }
-
- virtual bool HasTrivialOuterContext() const {
- return (nesting_level_ == 0 || inside_with_level_ <= 0);
- }
-
- private:
- static const int kNotInsideWith = -1;
- // Number of surrounding scopes of the current scope.
- int nesting_level_;
- // Nesting level of outermost scope that is contained in a with statement,
- // or kNotInsideWith if there are no with's around the current scope.
- int inside_with_level_;
-};
-
-
} } // namespace v8::internal
#endif // V8_SCOPES_H_
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index f8e98d33f..d960afde4 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,6 +29,7 @@
#include "accessors.h"
#include "api.h"
+#include "bootstrapper.h"
#include "execution.h"
#include "global-handles.h"
#include "ic-inl.h"
@@ -38,8 +39,6 @@
#include "serialize.h"
#include "stub-cache.h"
#include "v8threads.h"
-#include "top.h"
-#include "bootstrapper.h"
namespace v8 {
namespace internal {
@@ -68,9 +67,14 @@ static int* GetInternalPointer(StatsCounter* counter) {
// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
class ExternalReferenceTable {
public:
- static ExternalReferenceTable* instance() {
- if (!instance_) instance_ = new ExternalReferenceTable();
- return instance_;
+ static ExternalReferenceTable* instance(Isolate* isolate) {
+ ExternalReferenceTable* external_reference_table =
+ isolate->external_reference_table();
+ if (external_reference_table == NULL) {
+ external_reference_table = new ExternalReferenceTable(isolate);
+ isolate->set_external_reference_table(external_reference_table);
+ }
+ return external_reference_table;
}
int size() const { return refs_.length(); }
@@ -84,9 +88,9 @@ class ExternalReferenceTable {
int max_id(int code) { return max_id_[code]; }
private:
- static ExternalReferenceTable* instance_;
-
- ExternalReferenceTable() : refs_(64) { PopulateTable(); }
+ explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) {
+ PopulateTable(isolate);
+ }
~ExternalReferenceTable() { }
struct ExternalReferenceEntry {
@@ -95,10 +99,13 @@ class ExternalReferenceTable {
const char* name;
};
- void PopulateTable();
+ void PopulateTable(Isolate* isolate);
// For a few types of references, we can get their address from their id.
- void AddFromId(TypeCode type, uint16_t id, const char* name);
+ void AddFromId(TypeCode type,
+ uint16_t id,
+ const char* name,
+ Isolate* isolate);
// For other types of references, the caller will figure out the address.
void Add(Address address, TypeCode type, uint16_t id, const char* name);
@@ -108,31 +115,30 @@ class ExternalReferenceTable {
};
-ExternalReferenceTable* ExternalReferenceTable::instance_ = NULL;
-
-
void ExternalReferenceTable::AddFromId(TypeCode type,
uint16_t id,
- const char* name) {
+ const char* name,
+ Isolate* isolate) {
Address address;
switch (type) {
case C_BUILTIN: {
- ExternalReference ref(static_cast<Builtins::CFunctionId>(id));
+ ExternalReference ref(static_cast<Builtins::CFunctionId>(id), isolate);
address = ref.address();
break;
}
case BUILTIN: {
- ExternalReference ref(static_cast<Builtins::Name>(id));
+ ExternalReference ref(static_cast<Builtins::Name>(id), isolate);
address = ref.address();
break;
}
case RUNTIME_FUNCTION: {
- ExternalReference ref(static_cast<Runtime::FunctionId>(id));
+ ExternalReference ref(static_cast<Runtime::FunctionId>(id), isolate);
address = ref.address();
break;
}
case IC_UTILITY: {
- ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)));
+ ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)),
+ isolate);
address = ref.address();
break;
}
@@ -159,7 +165,7 @@ void ExternalReferenceTable::Add(Address address,
}
-void ExternalReferenceTable::PopulateTable() {
+void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
max_id_[type_code] = 0;
}
@@ -190,7 +196,7 @@ void ExternalReferenceTable::PopulateTable() {
#define DEF_ENTRY_C(name, ignored) \
{ BUILTIN, \
- Builtins::name, \
+ Builtins::k##name, \
"Builtins::" #name },
#define DEF_ENTRY_A(name, kind, state, extra) DEF_ENTRY_C(name, ignored)
@@ -220,24 +226,27 @@ void ExternalReferenceTable::PopulateTable() {
}; // end of ref_table[].
for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) {
- AddFromId(ref_table[i].type, ref_table[i].id, ref_table[i].name);
+ AddFromId(ref_table[i].type,
+ ref_table[i].id,
+ ref_table[i].name,
+ isolate);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Debug addresses
- Add(Debug_Address(Debug::k_after_break_target_address).address(),
+ Add(Debug_Address(Debug::k_after_break_target_address).address(isolate),
DEBUG_ADDRESS,
Debug::k_after_break_target_address << kDebugIdShift,
"Debug::after_break_target_address()");
- Add(Debug_Address(Debug::k_debug_break_slot_address).address(),
+ Add(Debug_Address(Debug::k_debug_break_slot_address).address(isolate),
DEBUG_ADDRESS,
Debug::k_debug_break_slot_address << kDebugIdShift,
"Debug::debug_break_slot_address()");
- Add(Debug_Address(Debug::k_debug_break_return_address).address(),
+ Add(Debug_Address(Debug::k_debug_break_return_address).address(isolate),
DEBUG_ADDRESS,
Debug::k_debug_break_return_address << kDebugIdShift,
"Debug::debug_break_return_address()");
- Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(),
+ Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(isolate),
DEBUG_ADDRESS,
Debug::k_restarter_frame_function_pointer << kDebugIdShift,
"Debug::restarter_frame_function_pointer_address()");
@@ -245,14 +254,14 @@ void ExternalReferenceTable::PopulateTable() {
// Stat counters
struct StatsRefTableEntry {
- StatsCounter* counter;
+ StatsCounter* (Counters::*counter)();
uint16_t id;
const char* name;
};
- static const StatsRefTableEntry stats_ref_table[] = {
+ const StatsRefTableEntry stats_ref_table[] = {
#define COUNTER_ENTRY(name, caption) \
- { &Counters::name, \
+ { &Counters::name, \
Counters::k_##name, \
"Counters::" #name },
@@ -261,33 +270,28 @@ void ExternalReferenceTable::PopulateTable() {
#undef COUNTER_ENTRY
}; // end of stats_ref_table[].
+ Counters* counters = isolate->counters();
for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) {
- Add(reinterpret_cast<Address>(
- GetInternalPointer(stats_ref_table[i].counter)),
+ Add(reinterpret_cast<Address>(GetInternalPointer(
+ (counters->*(stats_ref_table[i].counter))())),
STATS_COUNTER,
stats_ref_table[i].id,
stats_ref_table[i].name);
}
// Top addresses
- const char* top_address_format = "Top::%s";
const char* AddressNames[] = {
-#define C(name) #name,
- TOP_ADDRESS_LIST(C)
- TOP_ADDRESS_LIST_PROF(C)
+#define C(name) "Isolate::" #name,
+ ISOLATE_ADDRESS_LIST(C)
+ ISOLATE_ADDRESS_LIST_PROF(C)
NULL
#undef C
};
- int top_format_length = StrLength(top_address_format) - 2;
- for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
- const char* address_name = AddressNames[i];
- Vector<char> name =
- Vector<char>::New(top_format_length + StrLength(address_name) + 1);
- const char* chars = name.start();
- OS::SNPrintF(name, top_address_format, address_name);
- Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars);
+ for (uint16_t i = 0; i < Isolate::k_isolate_address_count; ++i) {
+ Add(isolate->get_address_from_id((Isolate::AddressId)i),
+ TOP_ADDRESS, i, AddressNames[i]);
}
// Accessors
@@ -300,143 +304,145 @@ void ExternalReferenceTable::PopulateTable() {
ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
#undef ACCESSOR_DESCRIPTOR_DECLARATION
+ StubCache* stub_cache = isolate->stub_cache();
+
// Stub cache tables
- Add(SCTableReference::keyReference(StubCache::kPrimary).address(),
+ Add(stub_cache->key_reference(StubCache::kPrimary).address(),
STUB_CACHE_TABLE,
1,
"StubCache::primary_->key");
- Add(SCTableReference::valueReference(StubCache::kPrimary).address(),
+ Add(stub_cache->value_reference(StubCache::kPrimary).address(),
STUB_CACHE_TABLE,
2,
"StubCache::primary_->value");
- Add(SCTableReference::keyReference(StubCache::kSecondary).address(),
+ Add(stub_cache->key_reference(StubCache::kSecondary).address(),
STUB_CACHE_TABLE,
3,
"StubCache::secondary_->key");
- Add(SCTableReference::valueReference(StubCache::kSecondary).address(),
+ Add(stub_cache->value_reference(StubCache::kSecondary).address(),
STUB_CACHE_TABLE,
4,
"StubCache::secondary_->value");
// Runtime entries
- Add(ExternalReference::perform_gc_function().address(),
+ Add(ExternalReference::perform_gc_function(isolate).address(),
RUNTIME_ENTRY,
1,
"Runtime::PerformGC");
- Add(ExternalReference::fill_heap_number_with_random_function().address(),
+ Add(ExternalReference::fill_heap_number_with_random_function(
+ isolate).address(),
RUNTIME_ENTRY,
2,
"V8::FillHeapNumberWithRandom");
-
- Add(ExternalReference::random_uint32_function().address(),
+ Add(ExternalReference::random_uint32_function(isolate).address(),
RUNTIME_ENTRY,
3,
"V8::Random");
-
- Add(ExternalReference::delete_handle_scope_extensions().address(),
+ Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
RUNTIME_ENTRY,
4,
"HandleScope::DeleteExtensions");
// Miscellaneous
- Add(ExternalReference::the_hole_value_location().address(),
+ Add(ExternalReference::the_hole_value_location(isolate).address(),
UNCLASSIFIED,
2,
"Factory::the_hole_value().location()");
- Add(ExternalReference::roots_address().address(),
+ Add(ExternalReference::roots_address(isolate).address(),
UNCLASSIFIED,
3,
"Heap::roots_address()");
- Add(ExternalReference::address_of_stack_limit().address(),
+ Add(ExternalReference::address_of_stack_limit(isolate).address(),
UNCLASSIFIED,
4,
"StackGuard::address_of_jslimit()");
- Add(ExternalReference::address_of_real_stack_limit().address(),
+ Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
UNCLASSIFIED,
5,
"StackGuard::address_of_real_jslimit()");
#ifndef V8_INTERPRETED_REGEXP
- Add(ExternalReference::address_of_regexp_stack_limit().address(),
+ Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
UNCLASSIFIED,
6,
"RegExpStack::limit_address()");
- Add(ExternalReference::address_of_regexp_stack_memory_address().address(),
+ Add(ExternalReference::address_of_regexp_stack_memory_address(
+ isolate).address(),
UNCLASSIFIED,
7,
"RegExpStack::memory_address()");
- Add(ExternalReference::address_of_regexp_stack_memory_size().address(),
+ Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
UNCLASSIFIED,
8,
"RegExpStack::memory_size()");
- Add(ExternalReference::address_of_static_offsets_vector().address(),
+ Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
UNCLASSIFIED,
9,
"OffsetsVector::static_offsets_vector");
#endif // V8_INTERPRETED_REGEXP
- Add(ExternalReference::new_space_start().address(),
+ Add(ExternalReference::new_space_start(isolate).address(),
UNCLASSIFIED,
10,
"Heap::NewSpaceStart()");
- Add(ExternalReference::new_space_mask().address(),
+ Add(ExternalReference::new_space_mask(isolate).address(),
UNCLASSIFIED,
11,
"Heap::NewSpaceMask()");
- Add(ExternalReference::heap_always_allocate_scope_depth().address(),
+ Add(ExternalReference::heap_always_allocate_scope_depth(isolate).address(),
UNCLASSIFIED,
12,
"Heap::always_allocate_scope_depth()");
- Add(ExternalReference::new_space_allocation_limit_address().address(),
+ Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
UNCLASSIFIED,
13,
"Heap::NewSpaceAllocationLimitAddress()");
- Add(ExternalReference::new_space_allocation_top_address().address(),
+ Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
UNCLASSIFIED,
14,
"Heap::NewSpaceAllocationTopAddress()");
#ifdef ENABLE_DEBUGGER_SUPPORT
- Add(ExternalReference::debug_break().address(),
+ Add(ExternalReference::debug_break(isolate).address(),
UNCLASSIFIED,
15,
"Debug::Break()");
- Add(ExternalReference::debug_step_in_fp_address().address(),
+ Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
UNCLASSIFIED,
16,
"Debug::step_in_fp_addr()");
#endif
- Add(ExternalReference::double_fp_operation(Token::ADD).address(),
+ Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(),
UNCLASSIFIED,
17,
"add_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::SUB).address(),
+ Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(),
UNCLASSIFIED,
18,
"sub_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::MUL).address(),
+ Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(),
UNCLASSIFIED,
19,
"mul_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::DIV).address(),
+ Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(),
UNCLASSIFIED,
20,
"div_two_doubles");
- Add(ExternalReference::double_fp_operation(Token::MOD).address(),
+ Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(),
UNCLASSIFIED,
21,
"mod_two_doubles");
- Add(ExternalReference::compare_doubles().address(),
+ Add(ExternalReference::compare_doubles(isolate).address(),
UNCLASSIFIED,
22,
"compare_doubles");
#ifndef V8_INTERPRETED_REGEXP
- Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
+ Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
UNCLASSIFIED,
23,
"NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
- Add(ExternalReference::re_check_stack_guard_state().address(),
+ Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
UNCLASSIFIED,
24,
"RegExpMacroAssembler*::CheckStackGuardState()");
- Add(ExternalReference::re_grow_stack().address(),
+ Add(ExternalReference::re_grow_stack(isolate).address(),
UNCLASSIFIED,
25,
"NativeRegExpMacroAssembler::GrowStack()");
@@ -446,15 +452,15 @@ void ExternalReferenceTable::PopulateTable() {
"NativeRegExpMacroAssembler::word_character_map");
#endif // V8_INTERPRETED_REGEXP
// Keyed lookup cache.
- Add(ExternalReference::keyed_lookup_cache_keys().address(),
+ Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
UNCLASSIFIED,
27,
"KeyedLookupCache::keys()");
- Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
+ Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
UNCLASSIFIED,
28,
"KeyedLookupCache::field_offsets()");
- Add(ExternalReference::transcendental_cache_array_address().address(),
+ Add(ExternalReference::transcendental_cache_array_address(isolate).address(),
UNCLASSIFIED,
29,
"TranscendentalCache::caches()");
@@ -470,11 +476,11 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
32,
"HandleScope::level");
- Add(ExternalReference::new_deoptimizer_function().address(),
+ Add(ExternalReference::new_deoptimizer_function(isolate).address(),
UNCLASSIFIED,
33,
"Deoptimizer::New()");
- Add(ExternalReference::compute_output_frames_function().address(),
+ Add(ExternalReference::compute_output_frames_function(isolate).address(),
UNCLASSIFIED,
34,
"Deoptimizer::ComputeOutputFrames()");
@@ -486,33 +492,38 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
36,
"LDoubleConstant::one_half");
- Add(ExternalReference::address_of_minus_zero().address(),
+ Add(ExternalReference::isolate_address().address(),
UNCLASSIFIED,
37,
+ "isolate");
+ Add(ExternalReference::address_of_minus_zero().address(),
+ UNCLASSIFIED,
+ 38,
"LDoubleConstant::minus_zero");
Add(ExternalReference::address_of_negative_infinity().address(),
UNCLASSIFIED,
- 38,
+ 39,
"LDoubleConstant::negative_infinity");
- Add(ExternalReference::power_double_double_function().address(),
+ Add(ExternalReference::power_double_double_function(isolate).address(),
UNCLASSIFIED,
- 39,
+ 40,
"power_double_double_function");
- Add(ExternalReference::power_double_int_function().address(),
+ Add(ExternalReference::power_double_int_function(isolate).address(),
UNCLASSIFIED,
- 40,
+ 41,
"power_double_int_function");
- Add(ExternalReference::arguments_marker_location().address(),
+ Add(ExternalReference::arguments_marker_location(isolate).address(),
UNCLASSIFIED,
- 41,
+ 42,
"Factory::arguments_marker().location()");
}
ExternalReferenceEncoder::ExternalReferenceEncoder()
- : encodings_(Match) {
+ : encodings_(Match),
+ isolate_(Isolate::Current()) {
ExternalReferenceTable* external_references =
- ExternalReferenceTable::instance();
+ ExternalReferenceTable::instance(isolate_);
for (int i = 0; i < external_references->size(); ++i) {
Put(external_references->address(i), i);
}
@@ -522,20 +533,22 @@ ExternalReferenceEncoder::ExternalReferenceEncoder()
uint32_t ExternalReferenceEncoder::Encode(Address key) const {
int index = IndexOf(key);
ASSERT(key == NULL || index >= 0);
- return index >=0 ? ExternalReferenceTable::instance()->code(index) : 0;
+ return index >=0 ?
+ ExternalReferenceTable::instance(isolate_)->code(index) : 0;
}
const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
int index = IndexOf(key);
- return index >=0 ? ExternalReferenceTable::instance()->name(index) : NULL;
+ return index >= 0 ?
+ ExternalReferenceTable::instance(isolate_)->name(index) : NULL;
}
int ExternalReferenceEncoder::IndexOf(Address key) const {
if (key == NULL) return -1;
HashMap::Entry* entry =
- const_cast<HashMap &>(encodings_).Lookup(key, Hash(key), false);
+ const_cast<HashMap&>(encodings_).Lookup(key, Hash(key), false);
return entry == NULL
? -1
: static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
@@ -549,9 +562,10 @@ void ExternalReferenceEncoder::Put(Address key, int index) {
ExternalReferenceDecoder::ExternalReferenceDecoder()
- : encodings_(NewArray<Address*>(kTypeCodeCount)) {
+ : encodings_(NewArray<Address*>(kTypeCodeCount)),
+ isolate_(Isolate::Current()) {
ExternalReferenceTable* external_references =
- ExternalReferenceTable::instance();
+ ExternalReferenceTable::instance(isolate_);
for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
int max = external_references->max_id(type) + 1;
encodings_[type] = NewArray<Address>(max + 1);
@@ -572,10 +586,12 @@ ExternalReferenceDecoder::~ExternalReferenceDecoder() {
bool Serializer::serialization_enabled_ = false;
bool Serializer::too_late_to_enable_now_ = false;
-ExternalReferenceDecoder* Deserializer::external_reference_decoder_ = NULL;
-Deserializer::Deserializer(SnapshotByteSource* source) : source_(source) {
+Deserializer::Deserializer(SnapshotByteSource* source)
+ : isolate_(NULL),
+ source_(source),
+ external_reference_decoder_(NULL) {
}
@@ -601,7 +617,6 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) {
high_water_[space_index] = address + size;
} else {
ASSERT(SpaceIsLarge(space_index));
- ASSERT(size > Page::kPageSize - Page::kObjectStartOffset);
LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
Object* new_allocation;
if (space_index == kLargeData) {
@@ -655,27 +670,31 @@ HeapObject* Deserializer::GetAddressFromStart(int space) {
void Deserializer::Deserialize() {
+ isolate_ = Isolate::Current();
// Don't GC while deserializing - just expand the heap.
AlwaysAllocateScope always_allocate;
// Don't use the free lists while deserializing.
LinearAllocationScope allocate_linearly;
// No active threads.
- ASSERT_EQ(NULL, ThreadState::FirstInUse());
+ ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
// No active handles.
- ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
+ ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
// Make sure the entire partial snapshot cache is traversed, filling it with
// valid object pointers.
- partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
+ isolate_->set_serialize_partial_snapshot_cache_length(
+ Isolate::kPartialSnapshotCacheCapacity);
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder();
- Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
- Heap::IterateWeakRoots(this, VISIT_ALL);
+ isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
- Heap::set_global_contexts_list(Heap::undefined_value());
+ isolate_->heap()->set_global_contexts_list(
+ isolate_->heap()->undefined_value());
}
void Deserializer::DeserializePartial(Object** root) {
+ isolate_ = Isolate::Current();
// Don't GC while deserializing - just expand the heap.
AlwaysAllocateScope always_allocate;
// Don't use the free lists while deserializing.
@@ -689,7 +708,7 @@ void Deserializer::DeserializePartial(Object** root) {
Deserializer::~Deserializer() {
ASSERT(source_->AtEOF());
- if (external_reference_decoder_ != NULL) {
+ if (external_reference_decoder_) {
delete external_reference_decoder_;
external_reference_decoder_ = NULL;
}
@@ -720,9 +739,14 @@ void Deserializer::ReadObject(int space_number,
Object** current = reinterpret_cast<Object**>(address);
Object** limit = current + (size >> kPointerSizeLog2);
if (FLAG_log_snapshot_positions) {
- LOG(SnapshotPositionEvent(address, source_->position()));
+ LOG(isolate_, SnapshotPositionEvent(address, source_->position()));
}
ReadChunk(current, limit, space_number, address);
+#ifdef DEBUG
+ bool is_codespace = (space == HEAP->code_space()) ||
+ ((space == HEAP->lo_space()) && (space_number == kLargeCode));
+ ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace);
+#endif
}
@@ -732,20 +756,20 @@ void Deserializer::ReadObject(int space_number,
#define ASSIGN_DEST_SPACE(space_number) \
Space* dest_space; \
if (space_number == NEW_SPACE) { \
- dest_space = Heap::new_space(); \
+ dest_space = isolate->heap()->new_space(); \
} else if (space_number == OLD_POINTER_SPACE) { \
- dest_space = Heap::old_pointer_space(); \
+ dest_space = isolate->heap()->old_pointer_space(); \
} else if (space_number == OLD_DATA_SPACE) { \
- dest_space = Heap::old_data_space(); \
+ dest_space = isolate->heap()->old_data_space(); \
} else if (space_number == CODE_SPACE) { \
- dest_space = Heap::code_space(); \
+ dest_space = isolate->heap()->code_space(); \
} else if (space_number == MAP_SPACE) { \
- dest_space = Heap::map_space(); \
+ dest_space = isolate->heap()->map_space(); \
} else if (space_number == CELL_SPACE) { \
- dest_space = Heap::cell_space(); \
+ dest_space = isolate->heap()->cell_space(); \
} else { \
ASSERT(space_number >= LO_SPACE); \
- dest_space = Heap::lo_space(); \
+ dest_space = isolate->heap()->lo_space(); \
}
@@ -756,6 +780,7 @@ void Deserializer::ReadChunk(Object** current,
Object** limit,
int source_space,
Address address) {
+ Isolate* const isolate = isolate_;
while (current < limit) {
int data = source_->Get();
switch (data) {
@@ -784,14 +809,15 @@ void Deserializer::ReadChunk(Object** current,
ReadObject(space_number, dest_space, &new_object); \
} else if (where == kRootArray) { \
int root_id = source_->GetInt(); \
- new_object = Heap::roots_address()[root_id]; \
+ new_object = isolate->heap()->roots_address()[root_id]; \
} else if (where == kPartialSnapshotCache) { \
int cache_index = source_->GetInt(); \
- new_object = partial_snapshot_cache_[cache_index]; \
+ new_object = isolate->serialize_partial_snapshot_cache() \
+ [cache_index]; \
} else if (where == kExternalReference) { \
int reference_id = source_->GetInt(); \
- Address address = \
- external_reference_decoder_->Decode(reference_id); \
+ Address address = external_reference_decoder_-> \
+ Decode(reference_id); \
new_object = reinterpret_cast<Object*>(address); \
} else if (where == kBackref) { \
emit_write_barrier = \
@@ -829,7 +855,7 @@ void Deserializer::ReadChunk(Object** current,
} \
} \
if (emit_write_barrier) { \
- Heap::RecordWrite(address, static_cast<int>( \
+ isolate->heap()->RecordWrite(address, static_cast<int>( \
reinterpret_cast<Address>(current) - address)); \
} \
if (!current_was_incremented) { \
@@ -878,7 +904,7 @@ void Deserializer::ReadChunk(Object** current,
CASE_STATEMENT(where, how, within, CODE_SPACE) \
CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \
CASE_STATEMENT(where, how, within, kLargeCode) \
- CASE_BODY(where, how, within, LO_SPACE, kUnknownOffsetFromStart)
+ CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart)
#define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number, \
space_number, \
@@ -991,9 +1017,11 @@ void Deserializer::ReadChunk(Object** current,
case kNativesStringResource: {
int index = source_->Get();
- Vector<const char> source_vector = Natives::GetScriptSource(index);
+ Vector<const char> source_vector = Natives::GetRawScriptSource(index);
NativesExternalStringResource* resource =
- new NativesExternalStringResource(source_vector.start());
+ new NativesExternalStringResource(isolate->bootstrapper(),
+ source_vector.start(),
+ source_vector.length());
*current++ = reinterpret_cast<Object*>(resource);
break;
}
@@ -1058,6 +1086,9 @@ Serializer::Serializer(SnapshotByteSink* sink)
current_root_index_(0),
external_reference_encoder_(new ExternalReferenceEncoder),
large_object_total_(0) {
+ // The serializer is meant to be used only to generate initial heap images
+ // from a context in which there is only one isolate.
+ ASSERT(Isolate::Current()->IsDefaultIsolate());
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
@@ -1070,35 +1101,40 @@ Serializer::~Serializer() {
void StartupSerializer::SerializeStrongReferences() {
+ Isolate* isolate = Isolate::Current();
// No active threads.
- CHECK_EQ(NULL, ThreadState::FirstInUse());
+ CHECK_EQ(NULL, Isolate::Current()->thread_manager()->FirstThreadStateInUse());
// No active or weak handles.
- CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
- CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
+ CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
+ CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
// We don't support serializing installed extensions.
- for (RegisteredExtension* ext = RegisteredExtension::first_extension();
+ for (RegisteredExtension* ext = v8::RegisteredExtension::first_extension();
ext != NULL;
ext = ext->next()) {
CHECK_NE(v8::INSTALLED, ext->state());
}
- Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG);
}
void PartialSerializer::Serialize(Object** object) {
this->VisitPointer(object);
+ Isolate* isolate = Isolate::Current();
// After we have done the partial serialization the partial snapshot cache
// will contain some references needed to decode the partial snapshot. We
// fill it up with undefineds so it has a predictable length so the
// deserialization code doesn't need to know the length.
- for (int index = partial_snapshot_cache_length_;
- index < kPartialSnapshotCacheCapacity;
+ for (int index = isolate->serialize_partial_snapshot_cache_length();
+ index < Isolate::kPartialSnapshotCacheCapacity;
index++) {
- partial_snapshot_cache_[index] = Heap::undefined_value();
- startup_serializer_->VisitPointer(&partial_snapshot_cache_[index]);
+ isolate->serialize_partial_snapshot_cache()[index] =
+ isolate->heap()->undefined_value();
+ startup_serializer_->VisitPointer(
+ &isolate->serialize_partial_snapshot_cache()[index]);
}
- partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
+ isolate->set_serialize_partial_snapshot_cache_length(
+ Isolate::kPartialSnapshotCacheCapacity);
}
@@ -1117,11 +1153,6 @@ void Serializer::VisitPointers(Object** start, Object** end) {
}
-Object* SerializerDeserializer::partial_snapshot_cache_[
- kPartialSnapshotCacheCapacity];
-int SerializerDeserializer::partial_snapshot_cache_length_ = 0;
-
-
// This ensures that the partial snapshot cache keeps things alive during GC and
// tracks their movement. When it is called during serialization of the startup
// snapshot the partial snapshot is empty, so nothing happens. When the partial
@@ -1131,9 +1162,11 @@ int SerializerDeserializer::partial_snapshot_cache_length_ = 0;
// deserialization we therefore need to visit the cache array. This fills it up
// with pointers to deserialized objects.
void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
+ Isolate* isolate = Isolate::Current();
visitor->VisitPointers(
- &partial_snapshot_cache_[0],
- &partial_snapshot_cache_[partial_snapshot_cache_length_]);
+ isolate->serialize_partial_snapshot_cache(),
+ &isolate->serialize_partial_snapshot_cache()[
+ isolate->serialize_partial_snapshot_cache_length()]);
}
@@ -1141,33 +1174,39 @@ void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
// the root iteration code (above) will iterate over array elements, writing the
// references to deserialized objects in them.
void SerializerDeserializer::SetSnapshotCacheSize(int size) {
- partial_snapshot_cache_length_ = size;
+ Isolate::Current()->set_serialize_partial_snapshot_cache_length(size);
}
int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
- for (int i = 0; i < partial_snapshot_cache_length_; i++) {
- Object* entry = partial_snapshot_cache_[i];
+ Isolate* isolate = Isolate::Current();
+
+ for (int i = 0;
+ i < isolate->serialize_partial_snapshot_cache_length();
+ i++) {
+ Object* entry = isolate->serialize_partial_snapshot_cache()[i];
if (entry == heap_object) return i;
}
// We didn't find the object in the cache. So we add it to the cache and
// then visit the pointer so that it becomes part of the startup snapshot
// and we can refer to it from the partial snapshot.
- int length = partial_snapshot_cache_length_;
- CHECK(length < kPartialSnapshotCacheCapacity);
- partial_snapshot_cache_[length] = heap_object;
- startup_serializer_->VisitPointer(&partial_snapshot_cache_[length]);
+ int length = isolate->serialize_partial_snapshot_cache_length();
+ CHECK(length < Isolate::kPartialSnapshotCacheCapacity);
+ isolate->serialize_partial_snapshot_cache()[length] = heap_object;
+ startup_serializer_->VisitPointer(
+ &isolate->serialize_partial_snapshot_cache()[length]);
// We don't recurse from the startup snapshot generator into the partial
// snapshot generator.
- ASSERT(length == partial_snapshot_cache_length_);
- return partial_snapshot_cache_length_++;
+ ASSERT(length == isolate->serialize_partial_snapshot_cache_length());
+ isolate->set_serialize_partial_snapshot_cache_length(length + 1);
+ return length;
}
int PartialSerializer::RootIndex(HeapObject* heap_object) {
for (int i = 0; i < Heap::kRootListLength; i++) {
- Object* root = Heap::roots_address()[i];
+ Object* root = HEAP->roots_address()[i];
if (root == heap_object) return i;
}
return kInvalidRootIndex;
@@ -1250,13 +1289,13 @@ void StartupSerializer::SerializeObject(
void StartupSerializer::SerializeWeakReferences() {
- for (int i = partial_snapshot_cache_length_;
- i < kPartialSnapshotCacheCapacity;
+ for (int i = Isolate::Current()->serialize_partial_snapshot_cache_length();
+ i < Isolate::kPartialSnapshotCacheCapacity;
i++) {
sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization");
sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
}
- Heap::IterateWeakRoots(this, VISIT_ALL);
+ HEAP->IterateWeakRoots(this, VISIT_ALL);
}
@@ -1317,7 +1356,8 @@ void Serializer::ObjectSerializer::Serialize() {
"ObjectSerialization");
sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
- LOG(SnapshotPositionEvent(object_->address(), sink_->Position()));
+ LOG(i::Isolate::Current(),
+ SnapshotPositionEvent(object_->address(), sink_->Position()));
// Mark this object as already serialized.
bool start_new_page;
@@ -1418,7 +1458,7 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString(
Address references_start = reinterpret_cast<Address>(resource_pointer);
OutputRawData(references_start);
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Object* source = Heap::natives_source_cache()->get(i);
+ Object* source = HEAP->natives_source_cache()->get(i);
if (!source->IsUndefined()) {
ExternalAsciiString* string = ExternalAsciiString::cast(source);
typedef v8::String::ExternalAsciiStringResource Resource;
@@ -1468,7 +1508,7 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
int Serializer::SpaceOfObject(HeapObject* object) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
AllocationSpace s = static_cast<AllocationSpace>(i);
- if (Heap::InSpace(object, s)) {
+ if (HEAP->InSpace(object, s)) {
if (i == LO_SPACE) {
if (object->IsCode()) {
return kLargeCode;
@@ -1489,7 +1529,7 @@ int Serializer::SpaceOfObject(HeapObject* object) {
int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
AllocationSpace s = static_cast<AllocationSpace>(i);
- if (Heap::InSpace(object, s)) {
+ if (HEAP->InSpace(object, s)) {
return i;
}
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index e80c302d0..d83722d00 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -79,6 +79,8 @@ class ExternalReferenceEncoder {
static bool Match(void* key1, void* key2) { return key1 == key2; }
void Put(Address key, int index);
+
+ Isolate* isolate_;
};
@@ -105,6 +107,8 @@ class ExternalReferenceDecoder {
void Put(uint32_t key, Address value) {
*Lookup(key) = value;
}
+
+ Isolate* isolate_;
};
@@ -246,10 +250,6 @@ class SerializerDeserializer: public ObjectVisitor {
static inline bool SpaceIsPaged(int space) {
return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
}
-
- static int partial_snapshot_cache_length_;
- static const int kPartialSnapshotCacheCapacity = 1400;
- static Object* partial_snapshot_cache_[];
};
@@ -313,6 +313,9 @@ class Deserializer: public SerializerDeserializer {
Address Allocate(int space_number, Space* space, int size);
void ReadObject(int space_number, Space* space, Object** write_back);
+ // Cached current isolate.
+ Isolate* isolate_;
+
// Keep track of the pages in the paged spaces.
// (In large object space we are keeping track of individual objects
// rather than pages.) In new space we just need the address of the
@@ -320,7 +323,6 @@ class Deserializer: public SerializerDeserializer {
List<Address> pages_[SerializerDeserializer::kNumberOfSpaces];
SnapshotByteSource* source_;
- static ExternalReferenceDecoder* external_reference_decoder_;
// This is the address of the next object that will be allocated in each
// space. It is used to calculate the addresses of back-references.
Address high_water_[LAST_SPACE + 1];
@@ -329,6 +331,8 @@ class Deserializer: public SerializerDeserializer {
// START_NEW_PAGE_SERIALIZATION tag.
Address last_object_address_;
+ ExternalReferenceDecoder* external_reference_decoder_;
+
DISALLOW_COPY_AND_ASSIGN(Deserializer);
};
@@ -398,6 +402,7 @@ class SerializationAddressMapper {
};
+// There can be only one serializer per V8 process.
class Serializer : public SerializerDeserializer {
public:
explicit Serializer(SnapshotByteSink* sink);
@@ -539,7 +544,7 @@ class PartialSerializer : public Serializer {
ASSERT(!o->IsScript());
return o->IsString() || o->IsSharedFunctionInfo() ||
o->IsHeapNumber() || o->IsCode() ||
- o->map() == Heap::fixed_cow_array_map();
+ o->map() == HEAP->fixed_cow_array_map();
}
private:
@@ -555,7 +560,7 @@ class StartupSerializer : public Serializer {
// strong roots have been serialized we can create a partial snapshot
// which will repopulate the cache with objects neede by that partial
// snapshot.
- partial_snapshot_cache_length_ = 0;
+ Isolate::Current()->set_serialize_partial_snapshot_cache_length(0);
}
// Serialize the current state of the heap. The order is:
// 1) Strong references.
diff --git a/deps/v8/src/small-pointer-list.h b/deps/v8/src/small-pointer-list.h
new file mode 100644
index 000000000..6291d9ee8
--- /dev/null
+++ b/deps/v8/src/small-pointer-list.h
@@ -0,0 +1,163 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SMALL_POINTER_LIST_H_
+#define V8_SMALL_POINTER_LIST_H_
+
+#include "checks.h"
+#include "v8globals.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// SmallPointerList is a list optimized for storing no or just a
+// single value. When more values are given it falls back to ZoneList.
+//
+// The interface tries to be as close to List from list.h as possible.
+template <typename T>
+class SmallPointerList {
+ public:
+ SmallPointerList() : data_(kEmptyTag) {}
+
+ bool is_empty() const { return length() == 0; }
+
+ int length() const {
+ if ((data_ & kTagMask) == kEmptyTag) return 0;
+ if ((data_ & kTagMask) == kSingletonTag) return 1;
+ return list()->length();
+ }
+
+ void Add(T* pointer) {
+ ASSERT(IsAligned(reinterpret_cast<intptr_t>(pointer), kPointerAlignment));
+ if ((data_ & kTagMask) == kEmptyTag) {
+ data_ = reinterpret_cast<intptr_t>(pointer) | kSingletonTag;
+ return;
+ }
+ if ((data_ & kTagMask) == kSingletonTag) {
+ PointerList* list = new PointerList(2);
+ list->Add(single_value());
+ list->Add(pointer);
+ ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
+ data_ = reinterpret_cast<intptr_t>(list) | kListTag;
+ return;
+ }
+ list()->Add(pointer);
+ }
+
+ // Note: returns T* and not T*& (unlike List from list.h).
+ // This makes the implementation simpler and more const correct.
+ T* at(int i) const {
+ ASSERT((data_ & kTagMask) != kEmptyTag);
+ if ((data_ & kTagMask) == kSingletonTag) {
+ ASSERT(i == 0);
+ return single_value();
+ }
+ return list()->at(i);
+ }
+
+ // See the note above.
+ T* operator[](int i) const { return at(i); }
+
+ // Remove the given element from the list (if present).
+ void RemoveElement(T* pointer) {
+ if ((data_ & kTagMask) == kEmptyTag) return;
+ if ((data_ & kTagMask) == kSingletonTag) {
+ if (pointer == single_value()) {
+ data_ = kEmptyTag;
+ }
+ return;
+ }
+ list()->RemoveElement(pointer);
+ }
+
+ T* RemoveLast() {
+ ASSERT((data_ & kTagMask) != kEmptyTag);
+ if ((data_ & kTagMask) == kSingletonTag) {
+ T* result = single_value();
+ data_ = kEmptyTag;
+ return result;
+ }
+ return list()->RemoveLast();
+ }
+
+ void Rewind(int pos) {
+ if ((data_ & kTagMask) == kEmptyTag) {
+ ASSERT(pos == 0);
+ return;
+ }
+ if ((data_ & kTagMask) == kSingletonTag) {
+ ASSERT(pos == 0 || pos == 1);
+ if (pos == 0) {
+ data_ = kEmptyTag;
+ }
+ return;
+ }
+ list()->Rewind(pos);
+ }
+
+ int CountOccurrences(T* pointer, int start, int end) const {
+ if ((data_ & kTagMask) == kEmptyTag) return 0;
+ if ((data_ & kTagMask) == kSingletonTag) {
+ if (start == 0 && end >= 0) {
+ return (single_value() == pointer) ? 1 : 0;
+ }
+ return 0;
+ }
+ return list()->CountOccurrences(pointer, start, end);
+ }
+
+ private:
+ typedef ZoneList<T*> PointerList;
+
+ static const intptr_t kEmptyTag = 1;
+ static const intptr_t kSingletonTag = 0;
+ static const intptr_t kListTag = 2;
+ static const intptr_t kTagMask = 3;
+ static const intptr_t kValueMask = ~kTagMask;
+
+ STATIC_ASSERT(kTagMask + 1 <= kPointerAlignment);
+
+ T* single_value() const {
+ ASSERT((data_ & kTagMask) == kSingletonTag);
+ STATIC_ASSERT(kSingletonTag == 0);
+ return reinterpret_cast<T*>(data_);
+ }
+
+ PointerList* list() const {
+ ASSERT((data_ & kTagMask) == kListTag);
+ return reinterpret_cast<PointerList*>(data_ & kValueMask);
+ }
+
+ intptr_t data_;
+
+ DISALLOW_COPY_AND_ASSIGN(SmallPointerList);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_SMALL_POINTER_LIST_H_
diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc
index f1106e138..ef89a5ef7 100644
--- a/deps/v8/src/snapshot-common.cc
+++ b/deps/v8/src/snapshot-common.cc
@@ -53,7 +53,7 @@ bool Snapshot::Initialize(const char* snapshot_file) {
DeleteArray(str);
return true;
} else if (size_ > 0) {
- Deserialize(data_, size_);
+ Deserialize(raw_data_, raw_size_);
return true;
}
return false;
@@ -64,14 +64,15 @@ Handle<Context> Snapshot::NewContextFromSnapshot() {
if (context_size_ == 0) {
return Handle<Context>();
}
- Heap::ReserveSpace(new_space_used_,
+ HEAP->ReserveSpace(new_space_used_,
pointer_space_used_,
data_space_used_,
code_space_used_,
map_space_used_,
cell_space_used_,
large_space_used_);
- SnapshotByteSource source(context_data_, context_size_);
+ SnapshotByteSource source(context_raw_data_,
+ context_raw_size_);
Deserializer deserializer(&source);
Object* root;
deserializer.DeserializePartial(&root);
diff --git a/deps/v8/src/snapshot-empty.cc b/deps/v8/src/snapshot-empty.cc
index cb26eb8c5..0b35720cc 100644
--- a/deps/v8/src/snapshot-empty.cc
+++ b/deps/v8/src/snapshot-empty.cc
@@ -35,9 +35,13 @@ namespace v8 {
namespace internal {
const byte Snapshot::data_[] = { 0 };
+const byte* Snapshot::raw_data_ = NULL;
const int Snapshot::size_ = 0;
+const int Snapshot::raw_size_ = 0;
const byte Snapshot::context_data_[] = { 0 };
+const byte* Snapshot::context_raw_data_ = NULL;
const int Snapshot::context_size_ = 0;
+const int Snapshot::context_raw_size_ = 0;
const int Snapshot::new_space_used_ = 0;
const int Snapshot::pointer_space_used_ = 0;
diff --git a/deps/v8/src/snapshot.h b/deps/v8/src/snapshot.h
index 9f77c20f5..4f01a2d62 100644
--- a/deps/v8/src/snapshot.h
+++ b/deps/v8/src/snapshot.h
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "isolate.h"
+
#ifndef V8_SNAPSHOT_H_
#define V8_SNAPSHOT_H_
@@ -48,9 +50,25 @@ class Snapshot {
// successfully.
static bool WriteToFile(const char* snapshot_file);
+ static const byte* data() { return data_; }
+ static int size() { return size_; }
+ static int raw_size() { return raw_size_; }
+ static void set_raw_data(const byte* raw_data) {
+ raw_data_ = raw_data;
+ }
+ static const byte* context_data() { return context_data_; }
+ static int context_size() { return context_size_; }
+ static int context_raw_size() { return context_raw_size_; }
+ static void set_context_raw_data(
+ const byte* context_raw_data) {
+ context_raw_data_ = context_raw_data;
+ }
+
private:
static const byte data_[];
+ static const byte* raw_data_;
static const byte context_data_[];
+ static const byte* context_raw_data_;
static const int new_space_used_;
static const int pointer_space_used_;
static const int data_space_used_;
@@ -59,7 +77,9 @@ class Snapshot {
static const int cell_space_used_;
static const int large_space_used_;
static const int size_;
+ static const int raw_size_;
static const int context_size_;
+ static const int context_raw_size_;
static bool Deserialize(const byte* content, int len);
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index b5ee1e40e..070f97057 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -28,8 +28,9 @@
#ifndef V8_SPACES_INL_H_
#define V8_SPACES_INL_H_
-#include "memory.h"
+#include "isolate.h"
#include "spaces.h"
+#include "v8memory.h"
namespace v8 {
namespace internal {
@@ -56,18 +57,18 @@ Page* PageIterator::next() {
// Page
Page* Page::next_page() {
- return MemoryAllocator::GetNextPage(this);
+ return heap_->isolate()->memory_allocator()->GetNextPage(this);
}
Address Page::AllocationTop() {
- PagedSpace* owner = MemoryAllocator::PageOwner(this);
+ PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
return owner->PageAllocationTop(this);
}
Address Page::AllocationWatermark() {
- PagedSpace* owner = MemoryAllocator::PageOwner(this);
+ PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
if (this == owner->AllocationTopPage()) {
return owner->top();
}
@@ -82,7 +83,7 @@ uint32_t Page::AllocationWatermarkOffset() {
void Page::SetAllocationWatermark(Address allocation_watermark) {
- if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
+ if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
// When iterating intergenerational references during scavenge
// we might decide to promote an encountered young object.
// We will allocate a space for such an object and put it
@@ -219,23 +220,26 @@ void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
}
-void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
- watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
+void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
+ heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
}
bool Page::IsWatermarkValid() {
- return (flags_ & (1 << WATERMARK_INVALIDATED)) != watermark_invalidated_mark_;
+ return (flags_ & (1 << WATERMARK_INVALIDATED)) !=
+ heap_->page_watermark_invalidated_mark_;
}
void Page::InvalidateWatermark(bool value) {
if (value) {
flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
- watermark_invalidated_mark_;
+ heap_->page_watermark_invalidated_mark_;
} else {
- flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
- (watermark_invalidated_mark_ ^ (1 << WATERMARK_INVALIDATED));
+ flags_ =
+ (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
+ (heap_->page_watermark_invalidated_mark_ ^
+ (1 << WATERMARK_INVALIDATED));
}
ASSERT(IsWatermarkValid() == !value);
@@ -264,7 +268,7 @@ void Page::ClearPageFlags() {
void Page::ClearGCFields() {
InvalidateWatermark(true);
SetAllocationWatermark(ObjectAreaStart());
- if (Heap::gc_state() == Heap::SCAVENGE) {
+ if (heap_->gc_state() == Heap::SCAVENGE) {
SetCachedAllocationWatermark(ObjectAreaStart());
}
SetRegionMarks(kAllRegionsCleanMarks);
@@ -308,6 +312,7 @@ void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
size_ = s;
owner_ = o;
executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
+ owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity();
}
@@ -408,15 +413,7 @@ void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
bool PagedSpace::Contains(Address addr) {
Page* p = Page::FromAddress(addr);
if (!p->is_valid()) return false;
- return MemoryAllocator::IsPageInSpace(p, this);
-}
-
-
-bool PagedSpace::SafeContains(Address addr) {
- if (!MemoryAllocator::SafeIsInAPageChunk(addr)) return false;
- Page* p = Page::FromAddress(addr);
- if (!p->is_valid()) return false;
- return MemoryAllocator::IsPageInSpace(p, this);
+ return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this);
}
@@ -477,7 +474,9 @@ Address LargeObjectChunk::GetStartAddress() {
void LargeObjectChunk::Free(Executability executable) {
- MemoryAllocator::FreeRawMemory(address(), size(), executable);
+ Isolate* isolate =
+ Page::FromAddress(RoundUp(address(), Page::kPageSize))->heap_->isolate();
+ isolate->memory_allocator()->FreeRawMemory(address(), size(), executable);
}
// -----------------------------------------------------------------------------
@@ -501,6 +500,12 @@ MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
}
+intptr_t LargeObjectSpace::Available() {
+ return LargeObjectChunk::ObjectSizeFor(
+ heap()->isolate()->memory_allocator()->Available());
+}
+
+
template <typename StringType>
void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
ASSERT(length <= string->length());
@@ -514,9 +519,9 @@ void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
bool FreeListNode::IsFreeListNode(HeapObject* object) {
- return object->map() == Heap::raw_unchecked_byte_array_map()
- || object->map() == Heap::raw_unchecked_one_pointer_filler_map()
- || object->map() == Heap::raw_unchecked_two_pointer_filler_map();
+ return object->map() == HEAP->raw_unchecked_byte_array_map()
+ || object->map() == HEAP->raw_unchecked_one_pointer_filler_map()
+ || object->map() == HEAP->raw_unchecked_two_pointer_filler_map();
}
} } // namespace v8::internal
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index a586fbf90..23c87cd0c 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -42,8 +42,6 @@ namespace internal {
&& (info).top <= (space).high() \
&& (info).limit == (space).high())
-intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
-
// ----------------------------------------------------------------------------
// HeapObjectIterator
@@ -149,10 +147,14 @@ PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
// -----------------------------------------------------------------------------
// CodeRange
-List<CodeRange::FreeBlock> CodeRange::free_list_(0);
-List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
-int CodeRange::current_allocation_block_index_ = 0;
-VirtualMemory* CodeRange::code_range_ = NULL;
+
+CodeRange::CodeRange()
+ : code_range_(NULL),
+ free_list_(0),
+ allocation_list_(0),
+ current_allocation_block_index_(0),
+ isolate_(NULL) {
+}
bool CodeRange::Setup(const size_t requested) {
@@ -168,7 +170,7 @@ bool CodeRange::Setup(const size_t requested) {
// We are sure that we have mapped a block of requested addresses.
ASSERT(code_range_->size() == requested);
- LOG(NewEvent("CodeRange", code_range_->address(), requested));
+ LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
current_allocation_block_index_ = 0;
return true;
@@ -271,24 +273,24 @@ void CodeRange::TearDown() {
// -----------------------------------------------------------------------------
// MemoryAllocator
//
-intptr_t MemoryAllocator::capacity_ = 0;
-intptr_t MemoryAllocator::capacity_executable_ = 0;
-intptr_t MemoryAllocator::size_ = 0;
-intptr_t MemoryAllocator::size_executable_ = 0;
-
-List<MemoryAllocator::MemoryAllocationCallbackRegistration>
- MemoryAllocator::memory_allocation_callbacks_;
-
-VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
// 270 is an estimate based on the static default heap size of a pair of 256K
// semispaces and a 64M old generation.
const int kEstimatedNumberOfChunks = 270;
-List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
- kEstimatedNumberOfChunks);
-List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
-int MemoryAllocator::max_nof_chunks_ = 0;
-int MemoryAllocator::top_ = 0;
+
+
+MemoryAllocator::MemoryAllocator()
+ : capacity_(0),
+ capacity_executable_(0),
+ size_(0),
+ size_executable_(0),
+ initial_chunk_(NULL),
+ chunks_(kEstimatedNumberOfChunks),
+ free_chunk_ids_(kEstimatedNumberOfChunks),
+ max_nof_chunks_(0),
+ top_(0),
+ isolate_(NULL) {
+}
void MemoryAllocator::Push(int free_chunk_id) {
@@ -334,11 +336,6 @@ bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
}
-bool MemoryAllocator::SafeIsInAPageChunk(Address addr) {
- return InInitialChunk(addr) || InAllocatedChunks(addr);
-}
-
-
void MemoryAllocator::TearDown() {
for (int i = 0; i < max_nof_chunks_; i++) {
if (chunks_[i].address() != NULL) DeleteChunk(i);
@@ -347,15 +344,11 @@ void MemoryAllocator::TearDown() {
free_chunk_ids_.Clear();
if (initial_chunk_ != NULL) {
- LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
+ LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
delete initial_chunk_;
initial_chunk_ = NULL;
}
- FreeChunkTables(&chunk_table_[0],
- kChunkTableTopLevelEntries,
- kChunkTableLevels);
-
ASSERT(top_ == max_nof_chunks_); // all chunks are free
top_ = 0;
capacity_ = 0;
@@ -365,22 +358,6 @@ void MemoryAllocator::TearDown() {
}
-void MemoryAllocator::FreeChunkTables(uintptr_t* array, int len, int level) {
- for (int i = 0; i < len; i++) {
- if (array[i] != kUnusedChunkTableEntry) {
- uintptr_t* subarray = reinterpret_cast<uintptr_t*>(array[i]);
- if (level > 1) {
- array[i] = kUnusedChunkTableEntry;
- FreeChunkTables(subarray, 1 << kChunkTableBitsPerLevel, level - 1);
- } else {
- array[i] = kUnusedChunkTableEntry;
- }
- delete[] subarray;
- }
- }
-}
-
-
void* MemoryAllocator::AllocateRawMemory(const size_t requested,
size_t* allocated,
Executability executable) {
@@ -393,14 +370,15 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
// Check executable memory limit.
if (size_executable_ + requested >
static_cast<size_t>(capacity_executable_)) {
- LOG(StringEvent("MemoryAllocator::AllocateRawMemory",
+ LOG(isolate_,
+ StringEvent("MemoryAllocator::AllocateRawMemory",
"V8 Executable Allocation capacity exceeded"));
return NULL;
}
// Allocate executable memory either from code range or from the
// OS.
- if (CodeRange::exists()) {
- mem = CodeRange::AllocateRawMemory(requested, allocated);
+ if (isolate_->code_range()->exists()) {
+ mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
} else {
mem = OS::Allocate(requested, allocated, true);
}
@@ -415,7 +393,7 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
#ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), alloced);
#endif
- Counters::memory_allocated.Increment(alloced);
+ isolate_->counters()->memory_allocated()->Increment(alloced);
return mem;
}
@@ -426,12 +404,12 @@ void MemoryAllocator::FreeRawMemory(void* mem,
#ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), length);
#endif
- if (CodeRange::contains(static_cast<Address>(mem))) {
- CodeRange::FreeRawMemory(mem, length);
+ if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
+ isolate_->code_range()->FreeRawMemory(mem, length);
} else {
OS::Free(mem, length);
}
- Counters::memory_allocated.Decrement(static_cast<int>(length));
+ isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
size_ -= static_cast<int>(length);
if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
@@ -498,7 +476,8 @@ void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
// We are sure that we have mapped a block of requested addresses.
ASSERT(initial_chunk_->size() == requested);
- LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
+ LOG(isolate_,
+ NewEvent("InitialChunk", initial_chunk_->address(), requested));
size_ += static_cast<int>(requested);
return initial_chunk_->address();
}
@@ -522,14 +501,14 @@ Page* MemoryAllocator::AllocatePages(int requested_pages,
void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
if (chunk == NULL) return Page::FromAddress(NULL);
- LOG(NewEvent("PagedChunk", chunk, chunk_size));
+ LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
*allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
// We may 'lose' a page due to alignment.
ASSERT(*allocated_pages >= kPagesPerChunk - 1);
if (*allocated_pages == 0) {
FreeRawMemory(chunk, chunk_size, owner->executable());
- LOG(DeleteEvent("PagedChunk", chunk));
+ LOG(isolate_, DeleteEvent("PagedChunk", chunk));
return Page::FromAddress(NULL);
}
@@ -540,8 +519,6 @@ Page* MemoryAllocator::AllocatePages(int requested_pages,
PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
- AddToAllocatedChunks(static_cast<Address>(chunk), chunk_size);
-
return new_pages;
}
@@ -560,7 +537,7 @@ Page* MemoryAllocator::CommitPages(Address start, size_t size,
#ifdef DEBUG
ZapBlock(start, size);
#endif
- Counters::memory_allocated.Increment(static_cast<int>(size));
+ isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
// So long as we correctly overestimated the number of chunks we should not
// run out of chunk ids.
@@ -584,7 +561,7 @@ bool MemoryAllocator::CommitBlock(Address start,
#ifdef DEBUG
ZapBlock(start, size);
#endif
- Counters::memory_allocated.Increment(static_cast<int>(size));
+ isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
return true;
}
@@ -597,7 +574,7 @@ bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
ASSERT(InInitialChunk(start + size - 1));
if (!initial_chunk_->Uncommit(start, size)) return false;
- Counters::memory_allocated.Decrement(static_cast<int>(size));
+ isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
@@ -628,6 +605,7 @@ Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
Address page_addr = low;
for (int i = 0; i < pages_in_chunk; i++) {
Page* p = Page::FromAddress(page_addr);
+ p->heap_ = owner->heap();
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
p->InvalidateWatermark(true);
p->SetIsLargeObjectPage(false);
@@ -697,11 +675,11 @@ void MemoryAllocator::DeleteChunk(int chunk_id) {
// TODO(1240712): VirtualMemory::Uncommit has a return value which
// is ignored here.
initial_chunk_->Uncommit(c.address(), c.size());
- Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
+ Counters* counters = isolate_->counters();
+ counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
} else {
- RemoveFromAllocatedChunks(c.address(), c.size());
- LOG(DeleteEvent("PagedChunk", c.address()));
- ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity());
+ LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
+ ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
size_t size = c.size();
FreeRawMemory(c.address(), size, c.executable());
PerformAllocationCallback(space, kAllocationActionFree, size);
@@ -813,131 +791,14 @@ Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
}
-void MemoryAllocator::AddToAllocatedChunks(Address addr, intptr_t size) {
- ASSERT(size == kChunkSize);
- uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
- AddChunkUsingAddress(int_address, int_address);
- AddChunkUsingAddress(int_address, int_address + size - 1);
-}
-
-
-void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start,
- uintptr_t chunk_index_base) {
- uintptr_t* fine_grained = AllocatedChunksFinder(
- chunk_table_,
- chunk_index_base,
- kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
- kCreateTablesAsNeeded);
- int index = FineGrainedIndexForAddress(chunk_index_base);
- if (fine_grained[index] != kUnusedChunkTableEntry) index++;
- ASSERT(fine_grained[index] == kUnusedChunkTableEntry);
- fine_grained[index] = chunk_start;
-}
-
-
-void MemoryAllocator::RemoveFromAllocatedChunks(Address addr, intptr_t size) {
- ASSERT(size == kChunkSize);
- uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
- RemoveChunkFoundUsingAddress(int_address, int_address);
- RemoveChunkFoundUsingAddress(int_address, int_address + size - 1);
-}
-
-
-void MemoryAllocator::RemoveChunkFoundUsingAddress(
- uintptr_t chunk_start,
- uintptr_t chunk_index_base) {
- uintptr_t* fine_grained = AllocatedChunksFinder(
- chunk_table_,
- chunk_index_base,
- kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
- kDontCreateTables);
- // Can't remove an entry that's not there.
- ASSERT(fine_grained != kUnusedChunkTableEntry);
- int index = FineGrainedIndexForAddress(chunk_index_base);
- ASSERT(fine_grained[index] != kUnusedChunkTableEntry);
- if (fine_grained[index] != chunk_start) {
- index++;
- ASSERT(fine_grained[index] == chunk_start);
- fine_grained[index] = kUnusedChunkTableEntry;
- } else {
- // If only one of the entries is used it must be the first, since
- // InAllocatedChunks relies on that. Move things around so that this is
- // the case.
- fine_grained[index] = fine_grained[index + 1];
- fine_grained[index + 1] = kUnusedChunkTableEntry;
- }
-}
-
-
-bool MemoryAllocator::InAllocatedChunks(Address addr) {
- uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
- uintptr_t* fine_grained = AllocatedChunksFinder(
- chunk_table_,
- int_address,
- kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
- kDontCreateTables);
- if (fine_grained == NULL) return false;
- int index = FineGrainedIndexForAddress(int_address);
- if (fine_grained[index] == kUnusedChunkTableEntry) return false;
- uintptr_t entry = fine_grained[index];
- if (entry <= int_address && entry + kChunkSize > int_address) return true;
- index++;
- if (fine_grained[index] == kUnusedChunkTableEntry) return false;
- entry = fine_grained[index];
- if (entry <= int_address && entry + kChunkSize > int_address) return true;
- return false;
-}
-
-
-uintptr_t* MemoryAllocator::AllocatedChunksFinder(
- uintptr_t* table,
- uintptr_t address,
- int bit_position,
- CreateTables create_as_needed) {
- if (bit_position == kChunkSizeLog2) {
- return table;
- }
- ASSERT(bit_position >= kChunkSizeLog2 + kChunkTableBitsPerLevel);
- int index =
- ((address >> bit_position) &
- ((V8_INTPTR_C(1) << kChunkTableBitsPerLevel) - 1));
- uintptr_t more_fine_grained_address =
- address & ((V8_INTPTR_C(1) << bit_position) - 1);
- ASSERT((table == chunk_table_ && index < kChunkTableTopLevelEntries) ||
- (table != chunk_table_ && index < 1 << kChunkTableBitsPerLevel));
- uintptr_t* more_fine_grained_table =
- reinterpret_cast<uintptr_t*>(table[index]);
- if (more_fine_grained_table == kUnusedChunkTableEntry) {
- if (create_as_needed == kDontCreateTables) return NULL;
- int words_needed = 1 << kChunkTableBitsPerLevel;
- if (bit_position == kChunkTableBitsPerLevel + kChunkSizeLog2) {
- words_needed =
- (1 << kChunkTableBitsPerLevel) * kChunkTableFineGrainedWordsPerEntry;
- }
- more_fine_grained_table = new uintptr_t[words_needed];
- for (int i = 0; i < words_needed; i++) {
- more_fine_grained_table[i] = kUnusedChunkTableEntry;
- }
- table[index] = reinterpret_cast<uintptr_t>(more_fine_grained_table);
- }
- return AllocatedChunksFinder(
- more_fine_grained_table,
- more_fine_grained_address,
- bit_position - kChunkTableBitsPerLevel,
- create_as_needed);
-}
-
-
-uintptr_t MemoryAllocator::chunk_table_[kChunkTableTopLevelEntries];
-
-
// -----------------------------------------------------------------------------
// PagedSpace implementation
-PagedSpace::PagedSpace(intptr_t max_capacity,
+PagedSpace::PagedSpace(Heap* heap,
+ intptr_t max_capacity,
AllocationSpace id,
Executability executable)
- : Space(id, executable) {
+ : Space(heap, id, executable) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
* Page::kObjectAreaSize;
accounting_stats_.Clear();
@@ -958,15 +819,17 @@ bool PagedSpace::Setup(Address start, size_t size) {
// contain at least one page, ignore it and allocate instead.
int pages_in_chunk = PagesInChunk(start, size);
if (pages_in_chunk > 0) {
- first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
- Page::kPageSize * pages_in_chunk,
- this, &num_pages);
+ first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
+ RoundUp(start, Page::kPageSize),
+ Page::kPageSize * pages_in_chunk,
+ this, &num_pages);
} else {
int requested_pages =
Min(MemoryAllocator::kPagesPerChunk,
static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
first_page_ =
- MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
+ Isolate::Current()->memory_allocator()->AllocatePages(
+ requested_pages, &num_pages, this);
if (!first_page_->is_valid()) return false;
}
@@ -999,7 +862,7 @@ bool PagedSpace::HasBeenSetup() {
void PagedSpace::TearDown() {
- MemoryAllocator::FreeAllPages(this);
+ Isolate::Current()->memory_allocator()->FreeAllPages(this);
first_page_ = NULL;
accounting_stats_.Clear();
}
@@ -1010,8 +873,9 @@ void PagedSpace::TearDown() {
void PagedSpace::Protect() {
Page* page = first_page_;
while (page->is_valid()) {
- MemoryAllocator::ProtectChunkFromPage(page);
- page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
+ Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page);
+ page = Isolate::Current()->memory_allocator()->
+ FindLastPageInSameChunk(page)->next_page();
}
}
@@ -1019,8 +883,9 @@ void PagedSpace::Protect() {
void PagedSpace::Unprotect() {
Page* page = first_page_;
while (page->is_valid()) {
- MemoryAllocator::UnprotectChunkFromPage(page);
- page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
+ Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page);
+ page = Isolate::Current()->memory_allocator()->
+ FindLastPageInSameChunk(page)->next_page();
}
}
@@ -1038,7 +903,7 @@ void PagedSpace::MarkAllPagesClean() {
MaybeObject* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called before or after mark-compact GC
// because it accesses map pointers.
- ASSERT(!MarkCompactCollector::in_use());
+ ASSERT(!heap()->mark_compact_collector()->in_use());
if (!Contains(addr)) return Failure::Exception();
@@ -1158,13 +1023,14 @@ bool PagedSpace::Expand(Page* last_page) {
if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
- Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
+ Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
+ desired_pages, &desired_pages, this);
if (!p->is_valid()) return false;
accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
ASSERT(Capacity() <= max_capacity_);
- MemoryAllocator::SetNextPage(last_page, p);
+ heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
// Sequentially clear region marks of new pages and and cache the
// new last page in the space.
@@ -1207,8 +1073,9 @@ void PagedSpace::Shrink() {
}
// Free pages after top_page.
- Page* p = MemoryAllocator::FreePages(top_page->next_page());
- MemoryAllocator::SetNextPage(top_page, p);
+ Page* p = heap()->isolate()->memory_allocator()->
+ FreePages(top_page->next_page());
+ heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
// Find out how many pages we failed to free and update last_page_.
// Please note pages can only be freed in whole chunks.
@@ -1230,7 +1097,8 @@ bool PagedSpace::EnsureCapacity(int capacity) {
Page* last_page = AllocationTopPage();
Page* next_page = last_page->next_page();
while (next_page->is_valid()) {
- last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
+ last_page = heap()->isolate()->memory_allocator()->
+ FindLastPageInSameChunk(next_page);
next_page = last_page->next_page();
}
@@ -1239,7 +1107,8 @@ bool PagedSpace::EnsureCapacity(int capacity) {
if (!Expand(last_page)) return false;
ASSERT(last_page->next_page()->is_valid());
last_page =
- MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
+ heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
+ last_page->next_page());
} while (Capacity() < capacity);
return true;
@@ -1259,7 +1128,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// space.
ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
- ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
+ ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
// Loop over all the pages.
bool above_allocation_top = false;
@@ -1284,7 +1153,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
- ASSERT(Heap::map_space()->Contains(map));
+ ASSERT(heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@@ -1320,8 +1189,8 @@ bool NewSpace::Setup(Address start, int size) {
// start and size. The provided space is divided into two semi-spaces.
// To support fast containment testing in the new space, the size of
// this chunk must be a power of two and it must be aligned to its size.
- int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
- int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
+ int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
+ int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity));
@@ -1337,7 +1206,7 @@ bool NewSpace::Setup(Address start, int size) {
#undef SET_NAME
#endif
- ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
+ ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
ASSERT(IsAddressAligned(start, size, 0));
if (!to_space_.Setup(start,
@@ -1392,16 +1261,16 @@ void NewSpace::TearDown() {
#ifdef ENABLE_HEAP_PROTECTION
void NewSpace::Protect() {
- MemoryAllocator::Protect(ToSpaceLow(), Capacity());
- MemoryAllocator::Protect(FromSpaceLow(), Capacity());
+ heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
+ heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
}
void NewSpace::Unprotect() {
- MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
- to_space_.executable());
- MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
- from_space_.executable());
+ heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
+ to_space_.executable());
+ heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
+ from_space_.executable());
}
#endif
@@ -1495,7 +1364,7 @@ void NewSpace::Verify() {
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
- ASSERT(Heap::map_space()->Contains(map));
+ ASSERT(heap()->map_space()->Contains(map));
// The object should not be code or a map.
ASSERT(!object->IsMap());
@@ -1520,7 +1389,8 @@ void NewSpace::Verify() {
bool SemiSpace::Commit() {
ASSERT(!is_committed());
- if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
+ start_, capacity_, executable())) {
return false;
}
committed_ = true;
@@ -1530,7 +1400,8 @@ bool SemiSpace::Commit() {
bool SemiSpace::Uncommit() {
ASSERT(is_committed());
- if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(
+ start_, capacity_)) {
return false;
}
committed_ = false;
@@ -1576,7 +1447,8 @@ bool SemiSpace::Grow() {
int maximum_extra = maximum_capacity_ - capacity_;
int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
maximum_extra);
- if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
+ high(), extra, executable())) {
return false;
}
capacity_ += extra;
@@ -1589,7 +1461,8 @@ bool SemiSpace::GrowTo(int new_capacity) {
ASSERT(new_capacity > capacity_);
size_t delta = new_capacity - capacity_;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
+ high(), delta, executable())) {
return false;
}
capacity_ = new_capacity;
@@ -1602,7 +1475,8 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
ASSERT(new_capacity < capacity_);
size_t delta = capacity_ - new_capacity;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(
+ high() - delta, delta)) {
return false;
}
capacity_ = new_capacity;
@@ -1650,36 +1524,32 @@ void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
#ifdef DEBUG
-// A static array of histogram info for each type.
-static HistogramInfo heap_histograms[LAST_TYPE+1];
-static JSObject::SpillInformation js_spill_information;
-
// heap_histograms is shared, always clear it before using it.
static void ClearHistograms() {
+ Isolate* isolate = Isolate::Current();
// We reset the name each time, though it hasn't changed.
-#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
+#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
#undef DEF_TYPE_NAME
-#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
+#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
#undef CLEAR_HISTOGRAM
- js_spill_information.Clear();
+ isolate->js_spill_information()->Clear();
}
-static int code_kind_statistics[Code::NUMBER_OF_KINDS];
-
-
static void ClearCodeKindStatistics() {
+ Isolate* isolate = Isolate::Current();
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- code_kind_statistics[i] = 0;
+ isolate->code_kind_statistics()[i] = 0;
}
}
static void ReportCodeKindStatistics() {
+ Isolate* isolate = Isolate::Current();
const char* table[Code::NUMBER_OF_KINDS] = { NULL };
#define CASE(name) \
@@ -1698,8 +1568,8 @@ static void ReportCodeKindStatistics() {
CASE(KEYED_STORE_IC);
CASE(CALL_IC);
CASE(KEYED_CALL_IC);
+ CASE(UNARY_OP_IC);
CASE(BINARY_OP_IC);
- CASE(TYPE_RECORDING_BINARY_OP_IC);
CASE(COMPARE_IC);
}
}
@@ -1708,8 +1578,9 @@ static void ReportCodeKindStatistics() {
PrintF("\n Code kind histograms: \n");
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- if (code_kind_statistics[i] > 0) {
- PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
+ if (isolate->code_kind_statistics()[i] > 0) {
+ PrintF(" %-20s: %10d bytes\n", table[i],
+ isolate->code_kind_statistics()[i]);
}
}
PrintF("\n");
@@ -1717,14 +1588,16 @@ static void ReportCodeKindStatistics() {
static int CollectHistogramInfo(HeapObject* obj) {
+ Isolate* isolate = Isolate::Current();
InstanceType type = obj->map()->instance_type();
ASSERT(0 <= type && type <= LAST_TYPE);
- ASSERT(heap_histograms[type].name() != NULL);
- heap_histograms[type].increment_number(1);
- heap_histograms[type].increment_bytes(obj->Size());
+ ASSERT(isolate->heap_histograms()[type].name() != NULL);
+ isolate->heap_histograms()[type].increment_number(1);
+ isolate->heap_histograms()[type].increment_bytes(obj->Size());
if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
- JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
+ JSObject::cast(obj)->IncrementSpillStatistics(
+ isolate->js_spill_information());
}
return obj->Size();
@@ -1732,13 +1605,14 @@ static int CollectHistogramInfo(HeapObject* obj) {
static void ReportHistogram(bool print_spill) {
+ Isolate* isolate = Isolate::Current();
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
- if (heap_histograms[i].number() > 0) {
+ if (isolate->heap_histograms()[i].number() > 0) {
PrintF(" %-34s%10d (%10d bytes)\n",
- heap_histograms[i].name(),
- heap_histograms[i].number(),
- heap_histograms[i].bytes());
+ isolate->heap_histograms()[i].name(),
+ isolate->heap_histograms()[i].number(),
+ isolate->heap_histograms()[i].bytes());
}
}
PrintF("\n");
@@ -1747,8 +1621,8 @@ static void ReportHistogram(bool print_spill) {
int string_number = 0;
int string_bytes = 0;
#define INCREMENT(type, size, name, camel_name) \
- string_number += heap_histograms[type].number(); \
- string_bytes += heap_histograms[type].bytes();
+ string_number += isolate->heap_histograms()[type].number(); \
+ string_bytes += isolate->heap_histograms()[type].bytes();
STRING_TYPE_LIST(INCREMENT)
#undef INCREMENT
if (string_number > 0) {
@@ -1757,7 +1631,7 @@ static void ReportHistogram(bool print_spill) {
}
if (FLAG_collect_heap_spill_statistics && print_spill) {
- js_spill_information.Print();
+ isolate->js_spill_information()->Print();
}
}
#endif // DEBUG
@@ -1786,8 +1660,9 @@ void NewSpace::CollectStatistics() {
#ifdef ENABLE_LOGGING_AND_PROFILING
-static void DoReportStatistics(HistogramInfo* info, const char* description) {
- LOG(HeapSampleBeginEvent("NewSpace", description));
+static void DoReportStatistics(Isolate* isolate,
+ HistogramInfo* info, const char* description) {
+ LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
// Lump all the string types together.
int string_number = 0;
int string_bytes = 0;
@@ -1797,17 +1672,19 @@ static void DoReportStatistics(HistogramInfo* info, const char* description) {
STRING_TYPE_LIST(INCREMENT)
#undef INCREMENT
if (string_number > 0) {
- LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
+ LOG(isolate,
+ HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
}
// Then do the other types.
for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
if (info[i].number() > 0) {
- LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
+ LOG(isolate,
+ HeapSampleItemEvent(info[i].name(), info[i].number(),
info[i].bytes()));
}
}
- LOG(HeapSampleEndEvent("NewSpace", description));
+ LOG(isolate, HeapSampleEndEvent("NewSpace", description));
}
#endif // ENABLE_LOGGING_AND_PROFILING
@@ -1834,8 +1711,9 @@ void NewSpace::ReportStatistics() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_gc) {
- DoReportStatistics(allocated_histogram_, "allocated");
- DoReportStatistics(promoted_histogram_, "promoted");
+ Isolate* isolate = ISOLATE;
+ DoReportStatistics(isolate, allocated_histogram_, "allocated");
+ DoReportStatistics(isolate, promoted_histogram_, "promoted");
}
#endif // ENABLE_LOGGING_AND_PROFILING
}
@@ -1861,7 +1739,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
-void FreeListNode::set_size(int size_in_bytes) {
+void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
ASSERT(size_in_bytes > 0);
ASSERT(IsAligned(size_in_bytes, kPointerSize));
@@ -1873,14 +1751,14 @@ void FreeListNode::set_size(int size_in_bytes) {
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > ByteArray::kHeaderSize) {
- set_map(Heap::raw_unchecked_byte_array_map());
+ set_map(heap->raw_unchecked_byte_array_map());
// Can't use ByteArray::cast because it fails during deserialization.
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
} else if (size_in_bytes == kPointerSize) {
- set_map(Heap::raw_unchecked_one_pointer_filler_map());
+ set_map(heap->raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
- set_map(Heap::raw_unchecked_two_pointer_filler_map());
+ set_map(heap->raw_unchecked_two_pointer_filler_map());
} else {
UNREACHABLE();
}
@@ -1889,9 +1767,9 @@ void FreeListNode::set_size(int size_in_bytes) {
}
-Address FreeListNode::next() {
+Address FreeListNode::next(Heap* heap) {
ASSERT(IsFreeListNode(this));
- if (map() == Heap::raw_unchecked_byte_array_map()) {
+ if (map() == heap->raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
return Memory::Address_at(address() + kNextOffset);
} else {
@@ -1900,9 +1778,9 @@ Address FreeListNode::next() {
}
-void FreeListNode::set_next(Address next) {
+void FreeListNode::set_next(Heap* heap, Address next) {
ASSERT(IsFreeListNode(this));
- if (map() == Heap::raw_unchecked_byte_array_map()) {
+ if (map() == heap->raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
Memory::Address_at(address() + kNextOffset) = next;
} else {
@@ -1911,7 +1789,9 @@ void FreeListNode::set_next(Address next) {
}
-OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
+OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
+ : heap_(heap),
+ owner_(owner) {
Reset();
}
@@ -1943,10 +1823,10 @@ void OldSpaceFreeList::RebuildSizeList() {
int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
#ifdef DEBUG
- MemoryAllocator::ZapBlock(start, size_in_bytes);
+ Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
#endif
FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(size_in_bytes);
+ node->set_size(heap_, size_in_bytes);
// We don't use the freelists in compacting mode. This makes it more like a
// GC that only has mark-sweep-compact and doesn't have a mark-sweep
@@ -1964,7 +1844,7 @@ int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
// Insert other blocks at the head of an exact free list.
int index = size_in_bytes >> kPointerSizeLog2;
- node->set_next(free_[index].head_node_);
+ node->set_next(heap_, free_[index].head_node_);
free_[index].head_node_ = node->address();
available_ += size_in_bytes;
needs_rebuild_ = true;
@@ -1983,7 +1863,8 @@ MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
if (free_[index].head_node_ != NULL) {
FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
// If this was the last block of its size, remove the size.
- if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
+ if ((free_[index].head_node_ = node->next(heap_)) == NULL)
+ RemoveSize(index);
available_ -= size_in_bytes;
*wasted_bytes = 0;
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
@@ -2012,33 +1893,33 @@ MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
finger_ = prev;
free_[prev].next_size_ = rem;
// If this was the last block of size cur, remove the size.
- if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
+ if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
free_[rem].next_size_ = free_[cur].next_size_;
} else {
free_[rem].next_size_ = cur;
}
// Add the remainder block.
- rem_node->set_size(rem_bytes);
- rem_node->set_next(free_[rem].head_node_);
+ rem_node->set_size(heap_, rem_bytes);
+ rem_node->set_next(heap_, free_[rem].head_node_);
free_[rem].head_node_ = rem_node->address();
} else {
// If this was the last block of size cur, remove the size.
- if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
+ if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
finger_ = prev;
free_[prev].next_size_ = free_[cur].next_size_;
}
if (rem_bytes < kMinBlockSize) {
// Too-small remainder is wasted.
- rem_node->set_size(rem_bytes);
+ rem_node->set_size(heap_, rem_bytes);
available_ -= size_in_bytes + rem_bytes;
*wasted_bytes = rem_bytes;
return cur_node;
}
// Add the remainder block and, if needed, insert its size.
- rem_node->set_size(rem_bytes);
- rem_node->set_next(free_[rem].head_node_);
+ rem_node->set_size(heap_, rem_bytes);
+ rem_node->set_next(heap_, free_[rem].head_node_);
free_[rem].head_node_ = rem_node->address();
- if (rem_node->next() == NULL) InsertSize(rem);
+ if (rem_node->next(heap_) == NULL) InsertSize(rem);
}
available_ -= size_in_bytes;
*wasted_bytes = 0;
@@ -2051,7 +1932,7 @@ void OldSpaceFreeList::MarkNodes() {
Address cur_addr = free_[i].head_node_;
while (cur_addr != NULL) {
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
- cur_addr = cur_node->next();
+ cur_addr = cur_node->next(heap_);
cur_node->SetMark();
}
}
@@ -2065,7 +1946,7 @@ bool OldSpaceFreeList::Contains(FreeListNode* node) {
while (cur_addr != NULL) {
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
if (cur_node == node) return true;
- cur_addr = cur_node->next();
+ cur_addr = cur_node->next(heap_);
}
}
return false;
@@ -2073,8 +1954,10 @@ bool OldSpaceFreeList::Contains(FreeListNode* node) {
#endif
-FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
- : owner_(owner), object_size_(object_size) {
+FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
+ AllocationSpace owner,
+ int object_size)
+ : heap_(heap), owner_(owner), object_size_(object_size) {
Reset();
}
@@ -2087,17 +1970,17 @@ void FixedSizeFreeList::Reset() {
void FixedSizeFreeList::Free(Address start) {
#ifdef DEBUG
- MemoryAllocator::ZapBlock(start, object_size_);
+ Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
#endif
// We only use the freelists with mark-sweep.
- ASSERT(!MarkCompactCollector::IsCompacting());
+ ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(object_size_);
- node->set_next(NULL);
+ node->set_size(heap_, object_size_);
+ node->set_next(heap_, NULL);
if (head_ == NULL) {
tail_ = head_ = node->address();
} else {
- FreeListNode::FromAddress(tail_)->set_next(node->address());
+ FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
tail_ = node->address();
}
available_ += object_size_;
@@ -2111,7 +1994,7 @@ MaybeObject* FixedSizeFreeList::Allocate() {
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
FreeListNode* node = FreeListNode::FromAddress(head_);
- head_ = node->next();
+ head_ = node->next(heap_);
available_ -= object_size_;
return node;
}
@@ -2121,7 +2004,7 @@ void FixedSizeFreeList::MarkNodes() {
Address cur_addr = head_;
while (cur_addr != NULL && cur_addr != tail_) {
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
- cur_addr = cur_node->next();
+ cur_addr = cur_node->next(heap_);
cur_node->SetMark();
}
}
@@ -2217,13 +2100,14 @@ void PagedSpace::FreePages(Page* prev, Page* last) {
first_page_ = last->next_page();
} else {
first = prev->next_page();
- MemoryAllocator::SetNextPage(prev, last->next_page());
+ heap()->isolate()->memory_allocator()->SetNextPage(
+ prev, last->next_page());
}
// Attach it after the last page.
- MemoryAllocator::SetNextPage(last_page_, first);
+ heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
last_page_ = last;
- MemoryAllocator::SetNextPage(last, NULL);
+ heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
// Clean them up.
do {
@@ -2262,10 +2146,8 @@ void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
if (page_list_is_chunk_ordered_) return;
Page* new_last_in_use = Page::FromAddress(NULL);
- MemoryAllocator::RelinkPageListInChunkOrder(this,
- &first_page_,
- &last_page_,
- &new_last_in_use);
+ heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
+ this, &first_page_, &last_page_, &new_last_in_use);
ASSERT(new_last_in_use->is_valid());
if (new_last_in_use != last_in_use) {
@@ -2282,7 +2164,7 @@ void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
accounting_stats_.AllocateBytes(size_in_bytes);
DeallocateBlock(start, size_in_bytes, add_to_freelist);
} else {
- Heap::CreateFillerObjectAt(start, size_in_bytes);
+ heap()->CreateFillerObjectAt(start, size_in_bytes);
}
}
@@ -2309,7 +2191,7 @@ void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
accounting_stats_.AllocateBytes(size_in_bytes);
DeallocateBlock(start, size_in_bytes, add_to_freelist);
} else {
- Heap::CreateFillerObjectAt(start, size_in_bytes);
+ heap()->CreateFillerObjectAt(start, size_in_bytes);
}
}
}
@@ -2338,7 +2220,7 @@ bool PagedSpace::ReserveSpace(int bytes) {
int bytes_left_to_reserve = bytes;
while (bytes_left_to_reserve > 0) {
if (!reserved_page->next_page()->is_valid()) {
- if (Heap::OldGenerationAllocationLimitReached()) return false;
+ if (heap()->OldGenerationAllocationLimitReached()) return false;
Expand(reserved_page);
}
bytes_left_to_reserve -= Page::kPageSize;
@@ -2356,7 +2238,7 @@ bool PagedSpace::ReserveSpace(int bytes) {
// You have to call this last, since the implementation from PagedSpace
// doesn't know that memory was 'promised' to large object space.
bool LargeObjectSpace::ReserveSpace(int bytes) {
- return Heap::OldGenerationSpaceAvailable() >= bytes;
+ return heap()->OldGenerationSpaceAvailable() >= bytes;
}
@@ -2375,7 +2257,7 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
// There is no next page in this space. Try free list allocation unless that
// is currently forbidden.
- if (!Heap::linear_allocation()) {
+ if (!heap()->linear_allocation()) {
int wasted_bytes;
Object* result;
MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
@@ -2402,7 +2284,8 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
return NULL;
}
@@ -2465,28 +2348,14 @@ void OldSpace::DeallocateBlock(Address start,
#ifdef DEBUG
-struct CommentStatistic {
- const char* comment;
- int size;
- int count;
- void Clear() {
- comment = NULL;
- size = 0;
- count = 0;
- }
-};
-
-
-// must be small, since an iteration is used for lookup
-const int kMaxComments = 64;
-static CommentStatistic comments_statistics[kMaxComments+1];
-
-
void PagedSpace::ReportCodeStatistics() {
+ Isolate* isolate = Isolate::Current();
+ CommentStatistic* comments_statistics =
+ isolate->paged_space_comments_statistics();
ReportCodeKindStatistics();
PrintF("Code comment statistics (\" [ comment-txt : size/ "
"count (average)\"):\n");
- for (int i = 0; i <= kMaxComments; i++) {
+ for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
const CommentStatistic& cs = comments_statistics[i];
if (cs.size > 0) {
PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
@@ -2498,23 +2367,30 @@ void PagedSpace::ReportCodeStatistics() {
void PagedSpace::ResetCodeStatistics() {
+ Isolate* isolate = Isolate::Current();
+ CommentStatistic* comments_statistics =
+ isolate->paged_space_comments_statistics();
ClearCodeKindStatistics();
- for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
- comments_statistics[kMaxComments].comment = "Unknown";
- comments_statistics[kMaxComments].size = 0;
- comments_statistics[kMaxComments].count = 0;
+ for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
+ comments_statistics[i].Clear();
+ }
+ comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
+ comments_statistics[CommentStatistic::kMaxComments].size = 0;
+ comments_statistics[CommentStatistic::kMaxComments].count = 0;
}
-// Adds comment to 'comment_statistics' table. Performance OK sa long as
+// Adds comment to 'comment_statistics' table. Performance OK as long as
// 'kMaxComments' is small
-static void EnterComment(const char* comment, int delta) {
+static void EnterComment(Isolate* isolate, const char* comment, int delta) {
+ CommentStatistic* comments_statistics =
+ isolate->paged_space_comments_statistics();
// Do not count empty comments
if (delta <= 0) return;
- CommentStatistic* cs = &comments_statistics[kMaxComments];
+ CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
// Search for a free or matching entry in 'comments_statistics': 'cs'
// points to result.
- for (int i = 0; i < kMaxComments; i++) {
+ for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
if (comments_statistics[i].comment == NULL) {
cs = &comments_statistics[i];
cs->comment = comment;
@@ -2532,7 +2408,7 @@ static void EnterComment(const char* comment, int delta) {
// Call for each nested comment start (start marked with '[ xxx', end marked
// with ']'. RelocIterator 'it' must point to a comment reloc info.
-static void CollectCommentStatistics(RelocIterator* it) {
+static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
ASSERT(!it->done());
ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
@@ -2557,13 +2433,13 @@ static void CollectCommentStatistics(RelocIterator* it) {
flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
if (txt[0] == ']') break; // End of nested comment
// A new comment
- CollectCommentStatistics(it);
+ CollectCommentStatistics(isolate, it);
// Skip code that was covered with previous comment
prev_pc = it->rinfo()->pc();
}
it->next();
}
- EnterComment(comment_txt, flat_delta);
+ EnterComment(isolate, comment_txt, flat_delta);
}
@@ -2571,18 +2447,19 @@ static void CollectCommentStatistics(RelocIterator* it) {
// - by code kind
// - by code comment
void PagedSpace::CollectCodeStatistics() {
+ Isolate* isolate = heap()->isolate();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
- code_kind_statistics[code->kind()] += code->Size();
+ isolate->code_kind_statistics()[code->kind()] += code->Size();
RelocIterator it(code);
int delta = 0;
const byte* prev_pc = code->instruction_start();
while (!it.done()) {
if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
- CollectCommentStatistics(&it);
+ CollectCommentStatistics(isolate, &it);
prev_pc = it.rinfo()->pc();
}
it.next();
@@ -2591,7 +2468,7 @@ void PagedSpace::CollectCodeStatistics() {
ASSERT(code->instruction_start() <= prev_pc &&
prev_pc <= code->instruction_end());
delta += static_cast<int>(code->instruction_end() - prev_pc);
- EnterComment("NoComment", delta);
+ EnterComment(isolate, "NoComment", delta);
}
}
}
@@ -2685,7 +2562,7 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
// There is no next page in this space. Try free list allocation unless
// that is currently forbidden. The fixed space free list implicitly assumes
// that all free blocks are of the fixed size.
- if (!Heap::linear_allocation()) {
+ if (!heap()->linear_allocation()) {
Object* result;
MaybeObject* maybe = free_list_.Allocate();
if (maybe->ToObject(&result)) {
@@ -2709,7 +2586,8 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
return NULL;
}
@@ -2811,7 +2689,7 @@ void MapSpace::VerifyObject(HeapObject* object) {
void CellSpace::VerifyObject(HeapObject* object) {
// The object should be a global object property cell or a free-list node.
ASSERT(object->IsJSGlobalPropertyCell() ||
- object->map() == Heap::two_pointer_filler_map());
+ object->map() == heap()->two_pointer_filler_map());
}
#endif
@@ -2848,28 +2726,33 @@ LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
Executability executable) {
size_t requested = ChunkSizeFor(size_in_bytes);
size_t size;
- void* mem = MemoryAllocator::AllocateRawMemory(requested, &size, executable);
+ Isolate* isolate = Isolate::Current();
+ void* mem = isolate->memory_allocator()->AllocateRawMemory(
+ requested, &size, executable);
if (mem == NULL) return NULL;
// The start of the chunk may be overlayed with a page so we have to
// make sure that the page flags fit in the size field.
ASSERT((size & Page::kPageFlagMask) == 0);
- LOG(NewEvent("LargeObjectChunk", mem, size));
+ LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
if (size < requested) {
- MemoryAllocator::FreeRawMemory(mem, size, executable);
- LOG(DeleteEvent("LargeObjectChunk", mem));
+ isolate->memory_allocator()->FreeRawMemory(
+ mem, size, executable);
+ LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
return NULL;
}
ObjectSpace space = (executable == EXECUTABLE)
? kObjectSpaceCodeSpace
: kObjectSpaceLoSpace;
- MemoryAllocator::PerformAllocationCallback(
+ isolate->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionAllocate, size);
LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
chunk->size_ = size;
+ Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
+ page->heap_ = isolate->heap();
return chunk;
}
@@ -2885,8 +2768,8 @@ int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
// -----------------------------------------------------------------------------
// LargeObjectSpace
-LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
- : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
+LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
+ : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
first_chunk_(NULL),
size_(0),
page_count_(0),
@@ -2906,15 +2789,17 @@ void LargeObjectSpace::TearDown() {
while (first_chunk_ != NULL) {
LargeObjectChunk* chunk = first_chunk_;
first_chunk_ = first_chunk_->next();
- LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
+ LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
Executability executable =
page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
size_t size = chunk->size();
- MemoryAllocator::FreeRawMemory(chunk->address(), size, executable);
- MemoryAllocator::PerformAllocationCallback(
+ heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
+ size,
+ executable);
+ heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size);
}
@@ -2929,7 +2814,8 @@ void LargeObjectSpace::TearDown() {
void LargeObjectSpace::Protect() {
LargeObjectChunk* chunk = first_chunk_;
while (chunk != NULL) {
- MemoryAllocator::Protect(chunk->address(), chunk->size());
+ heap()->isolate()->memory_allocator()->Protect(chunk->address(),
+ chunk->size());
chunk = chunk->next();
}
}
@@ -2939,8 +2825,8 @@ void LargeObjectSpace::Unprotect() {
LargeObjectChunk* chunk = first_chunk_;
while (chunk != NULL) {
bool is_code = chunk->GetObject()->IsCode();
- MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
- is_code ? EXECUTABLE : NOT_EXECUTABLE);
+ heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
+ chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
chunk = chunk->next();
}
}
@@ -2955,7 +2841,8 @@ MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
- if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
return Failure::RetryAfterGC(identity());
}
@@ -3060,22 +2947,22 @@ void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
// Iterate regions of the first normal page covering object.
uint32_t first_region_number = page->GetRegionNumberForAddress(start);
newmarks |=
- Heap::IterateDirtyRegions(marks >> first_region_number,
- start,
- end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object) << first_region_number;
+ heap()->IterateDirtyRegions(marks >> first_region_number,
+ start,
+ end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object) << first_region_number;
start = end;
end = start + Page::kPageSize;
while (end <= object_end) {
// Iterate next 32 regions.
newmarks |=
- Heap::IterateDirtyRegions(marks,
- start,
- end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object);
+ heap()->IterateDirtyRegions(marks,
+ start,
+ end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object);
start = end;
end = start + Page::kPageSize;
}
@@ -3084,11 +2971,11 @@ void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
// Iterate the last piece of an object which is less than
// Page::kPageSize.
newmarks |=
- Heap::IterateDirtyRegions(marks,
- start,
- object_end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object);
+ heap()->IterateDirtyRegions(marks,
+ start,
+ object_end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object);
}
page->SetRegionMarks(newmarks);
@@ -3105,7 +2992,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
HeapObject* object = current->GetObject();
if (object->IsMarked()) {
object->ClearMark();
- MarkCompactCollector::tracer()->decrement_marked_count();
+ heap()->mark_compact_collector()->tracer()->decrement_marked_count();
previous = current;
current = current->next();
} else {
@@ -3125,7 +3012,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
// Free the chunk.
- MarkCompactCollector::ReportDeleteIfNeeded(object);
+ heap()->mark_compact_collector()->ReportDeleteIfNeeded(
+ object, heap()->isolate());
LiveObjectList::ProcessNonLive(object);
size_ -= static_cast<int>(chunk_size);
@@ -3133,10 +3021,12 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
page_count_--;
ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
- MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
- MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree,
- size_);
- LOG(DeleteEvent("LargeObjectChunk", chunk_address));
+ heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
+ chunk_size,
+ executable);
+ heap()->isolate()->memory_allocator()->PerformAllocationCallback(
+ space, kAllocationActionFree, size_);
+ LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
}
}
}
@@ -3144,7 +3034,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
bool LargeObjectSpace::Contains(HeapObject* object) {
Address address = object->address();
- if (Heap::new_space()->Contains(address)) {
+ if (heap()->new_space()->Contains(address)) {
return false;
}
Page* page = Page::FromAddress(address);
@@ -3173,14 +3063,14 @@ void LargeObjectSpace::Verify() {
// in map space.
Map* map = object->map();
ASSERT(map->IsMap());
- ASSERT(Heap::map_space()->Contains(map));
+ ASSERT(heap()->map_space()->Contains(map));
// We have only code, sequential strings, external strings
// (sequential strings that have been morphed into external
// strings), fixed arrays, and byte arrays in large object space.
ASSERT(object->IsCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsFixedArray() ||
- object->IsByteArray());
+ object->IsFixedDoubleArray() || object->IsByteArray());
// The object itself should look OK.
object->Verify();
@@ -3200,9 +3090,9 @@ void LargeObjectSpace::Verify() {
Object* element = array->get(j);
if (element->IsHeapObject()) {
HeapObject* element_object = HeapObject::cast(element);
- ASSERT(Heap::Contains(element_object));
+ ASSERT(heap()->Contains(element_object));
ASSERT(element_object->map()->IsMap());
- if (Heap::InNewSpace(element_object)) {
+ if (heap()->InNewSpace(element_object)) {
Address array_addr = object->address();
Address element_addr = array_addr + FixedArray::kHeaderSize +
j * kPointerSize;
@@ -3241,11 +3131,12 @@ void LargeObjectSpace::ReportStatistics() {
void LargeObjectSpace::CollectCodeStatistics() {
+ Isolate* isolate = heap()->isolate();
LargeObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
- code_kind_statistics[code->kind()] += code->Size();
+ isolate->code_kind_statistics()[code->kind()] += code->Size();
}
}
}
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 6165255fd..4024387cd 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,12 +28,15 @@
#ifndef V8_SPACES_H_
#define V8_SPACES_H_
-#include "list-inl.h"
+#include "allocation.h"
+#include "list.h"
#include "log.h"
namespace v8 {
namespace internal {
+class Isolate;
+
// -----------------------------------------------------------------------------
// Heap structures:
//
@@ -241,7 +244,7 @@ class Page {
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
- kIntSize + kPointerSize;
+ kIntSize + kPointerSize + kPointerSize;
// The start offset of the object area in a page. Aligned to both maps and
// code alignment to be suitable for both.
@@ -286,7 +289,7 @@ class Page {
// This invariant guarantees that after flipping flag meaning at the
// beginning of scavenge all pages in use will be marked as having valid
// watermark.
- static inline void FlipMeaningOfInvalidatedWatermarkFlag();
+ static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
// Returns true if the page allocation watermark was not altered during
// scavenge.
@@ -312,11 +315,6 @@ class Page {
STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
kAllocationWatermarkOffsetBits);
- // This field contains the meaning of the WATERMARK_INVALIDATED flag.
- // Instead of clearing this flag from all pages we just flip
- // its meaning at the beginning of a scavenge.
- static intptr_t watermark_invalidated_mark_;
-
//---------------------------------------------------------------------------
// Page header description.
//
@@ -353,6 +351,8 @@ class Page {
// During scavenge collection this field is used to store allocation watermark
// if it is altered during scavenge.
Address mc_first_forwarded;
+
+ Heap* heap_;
};
@@ -360,11 +360,13 @@ class Page {
// Space is the abstract superclass for all allocation spaces.
class Space : public Malloced {
public:
- Space(AllocationSpace id, Executability executable)
- : id_(id), executable_(executable) {}
+ Space(Heap* heap, AllocationSpace id, Executability executable)
+ : heap_(heap), id_(id), executable_(executable) {}
virtual ~Space() {}
+ Heap* heap() const { return heap_; }
+
// Does the space need executable memory?
Executability executable() { return executable_; }
@@ -397,6 +399,7 @@ class Space : public Malloced {
virtual bool ReserveSpace(int bytes) = 0;
private:
+ Heap* heap_;
AllocationSpace id_;
Executability executable_;
};
@@ -409,19 +412,19 @@ class Space : public Malloced {
// displacements cover the entire 4GB virtual address space. On 64-bit
// platforms, we support this using the CodeRange object, which reserves and
// manages a range of virtual memory.
-class CodeRange : public AllStatic {
+class CodeRange {
public:
// Reserves a range of virtual memory, but does not commit any of it.
// Can only be called once, at heap initialization time.
// Returns false on failure.
- static bool Setup(const size_t requested_size);
+ bool Setup(const size_t requested_size);
// Frees the range of virtual memory, and frees the data structures used to
// manage it.
- static void TearDown();
+ void TearDown();
- static bool exists() { return code_range_ != NULL; }
- static bool contains(Address address) {
+ bool exists() { return code_range_ != NULL; }
+ bool contains(Address address) {
if (code_range_ == NULL) return false;
Address start = static_cast<Address>(code_range_->address());
return start <= address && address < start + code_range_->size();
@@ -430,13 +433,15 @@ class CodeRange : public AllStatic {
// Allocates a chunk of memory from the large-object portion of
// the code range. On platforms with no separate code range, should
// not be called.
- MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested,
- size_t* allocated);
- static void FreeRawMemory(void* buf, size_t length);
+ MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
+ size_t* allocated);
+ void FreeRawMemory(void* buf, size_t length);
private:
+ CodeRange();
+
// The reserved range of virtual memory that all code objects are put in.
- static VirtualMemory* code_range_;
+ VirtualMemory* code_range_;
// Plain old data class, just a struct plus a constructor.
class FreeBlock {
public:
@@ -452,20 +457,26 @@ class CodeRange : public AllStatic {
// Freed blocks of memory are added to the free list. When the allocation
// list is exhausted, the free list is sorted and merged to make the new
// allocation list.
- static List<FreeBlock> free_list_;
+ List<FreeBlock> free_list_;
// Memory is allocated from the free blocks on the allocation list.
// The block at current_allocation_block_index_ is the current block.
- static List<FreeBlock> allocation_list_;
- static int current_allocation_block_index_;
+ List<FreeBlock> allocation_list_;
+ int current_allocation_block_index_;
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
// If none can be found, terminates V8 with FatalProcessOutOfMemory.
- static void GetNextAllocationBlock(size_t requested);
+ void GetNextAllocationBlock(size_t requested);
// Compares the start addresses of two free blocks.
static int CompareFreeBlockAddress(const FreeBlock* left,
const FreeBlock* right);
+
+ friend class Isolate;
+
+ Isolate* isolate_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeRange);
};
@@ -493,14 +504,14 @@ class CodeRange : public AllStatic {
//
-class MemoryAllocator : public AllStatic {
+class MemoryAllocator {
public:
// Initializes its internal bookkeeping structures.
// Max capacity of the total space and executable memory limit.
- static bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
+ bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
// Deletes valid chunks.
- static void TearDown();
+ void TearDown();
// Reserves an initial address range of virtual memory to be split between
// the two new space semispaces, the old space, and the map space. The
@@ -511,7 +522,7 @@ class MemoryAllocator : public AllStatic {
// address of the initial chunk if successful, with the side effect of
// setting the initial chunk, or else NULL if unsuccessful and leaves the
// initial chunk NULL.
- static void* ReserveInitialChunk(const size_t requested);
+ void* ReserveInitialChunk(const size_t requested);
// Commits pages from an as-yet-unmanaged block of virtual memory into a
// paged space. The block should be part of the initial chunk reserved via
@@ -520,24 +531,24 @@ class MemoryAllocator : public AllStatic {
// address is non-null and that it is big enough to hold at least one
// page-aligned page. The call always succeeds, and num_pages is always
// greater than zero.
- static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
- int* num_pages);
+ Page* CommitPages(Address start, size_t size, PagedSpace* owner,
+ int* num_pages);
// Commit a contiguous block of memory from the initial chunk. Assumes that
// the address is not NULL, the size is greater than zero, and that the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
- static bool CommitBlock(Address start, size_t size, Executability executable);
+ bool CommitBlock(Address start, size_t size, Executability executable);
// Uncommit a contiguous block of memory [start..(start+size)[.
// start is not NULL, the size is greater than zero, and the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
- static bool UncommitBlock(Address start, size_t size);
+ bool UncommitBlock(Address start, size_t size);
// Zaps a contiguous block of memory [start..(start+size)[ thus
// filling it up with a recognizable non-NULL bit pattern.
- static void ZapBlock(Address start, size_t size);
+ void ZapBlock(Address start, size_t size);
// Attempts to allocate the requested (non-zero) number of pages from the
// OS. Fewer pages might be allocated than requested. If it fails to
@@ -548,8 +559,8 @@ class MemoryAllocator : public AllStatic {
// number of allocated pages is returned in the output parameter
// allocated_pages. If the PagedSpace owner is executable and there is
// a code range, the pages are allocated from the code range.
- static Page* AllocatePages(int requested_pages, int* allocated_pages,
- PagedSpace* owner);
+ Page* AllocatePages(int requested_pages, int* allocated_pages,
+ PagedSpace* owner);
// Frees pages from a given page and after. Requires pages to be
// linked in chunk-order (see comment for class).
@@ -558,10 +569,10 @@ class MemoryAllocator : public AllStatic {
// Otherwise, the function searches a page after 'p' that is
// the first page of a chunk. Pages after the found page
// are freed and the function returns 'p'.
- static Page* FreePages(Page* p);
+ Page* FreePages(Page* p);
// Frees all pages owned by given space.
- static void FreeAllPages(PagedSpace* space);
+ void FreeAllPages(PagedSpace* space);
// Allocates and frees raw memory of certain size.
// These are just thin wrappers around OS::Allocate and OS::Free,
@@ -569,96 +580,83 @@ class MemoryAllocator : public AllStatic {
// If the flag is EXECUTABLE and a code range exists, the requested
// memory is allocated from the code range. If a code range exists
// and the freed memory is in it, the code range manages the freed memory.
- MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested,
- size_t* allocated,
- Executability executable);
- static void FreeRawMemory(void* buf,
- size_t length,
- Executability executable);
- static void PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size);
-
- static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action);
- static void RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback);
- static bool MemoryAllocationCallbackRegistered(
- MemoryAllocationCallback callback);
+ MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
+ size_t* allocated,
+ Executability executable);
+ void FreeRawMemory(void* buf,
+ size_t length,
+ Executability executable);
+ void PerformAllocationCallback(ObjectSpace space,
+ AllocationAction action,
+ size_t size);
+
+ void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action);
+ void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
+ bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
// Returns the maximum available bytes of heaps.
- static intptr_t Available() {
- return capacity_ < size_ ? 0 : capacity_ - size_;
- }
+ intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
// Returns allocated spaces in bytes.
- static intptr_t Size() { return size_; }
+ intptr_t Size() { return size_; }
// Returns the maximum available executable bytes of heaps.
- static intptr_t AvailableExecutable() {
+ intptr_t AvailableExecutable() {
if (capacity_executable_ < size_executable_) return 0;
return capacity_executable_ - size_executable_;
}
// Returns allocated executable spaces in bytes.
- static intptr_t SizeExecutable() { return size_executable_; }
+ intptr_t SizeExecutable() { return size_executable_; }
// Returns maximum available bytes that the old space can have.
- static intptr_t MaxAvailable() {
+ intptr_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
}
- // Sanity check on a pointer.
- static bool SafeIsInAPageChunk(Address addr);
-
// Links two pages.
- static inline void SetNextPage(Page* prev, Page* next);
+ inline void SetNextPage(Page* prev, Page* next);
// Returns the next page of a given page.
- static inline Page* GetNextPage(Page* p);
+ inline Page* GetNextPage(Page* p);
// Checks whether a page belongs to a space.
- static inline bool IsPageInSpace(Page* p, PagedSpace* space);
+ inline bool IsPageInSpace(Page* p, PagedSpace* space);
// Returns the space that owns the given page.
- static inline PagedSpace* PageOwner(Page* page);
+ inline PagedSpace* PageOwner(Page* page);
// Finds the first/last page in the same chunk as a given page.
- static Page* FindFirstPageInSameChunk(Page* p);
- static Page* FindLastPageInSameChunk(Page* p);
+ Page* FindFirstPageInSameChunk(Page* p);
+ Page* FindLastPageInSameChunk(Page* p);
// Relinks list of pages owned by space to make it chunk-ordered.
// Returns new first and last pages of space.
// Also returns last page in relinked list which has WasInUsedBeforeMC
// flag set.
- static void RelinkPageListInChunkOrder(PagedSpace* space,
- Page** first_page,
- Page** last_page,
- Page** last_page_in_use);
+ void RelinkPageListInChunkOrder(PagedSpace* space,
+ Page** first_page,
+ Page** last_page,
+ Page** last_page_in_use);
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect a block of memory by marking it read-only/writable.
- static inline void Protect(Address start, size_t size);
- static inline void Unprotect(Address start, size_t size,
- Executability executable);
+ inline void Protect(Address start, size_t size);
+ inline void Unprotect(Address start, size_t size,
+ Executability executable);
// Protect/unprotect a chunk given a page in the chunk.
- static inline void ProtectChunkFromPage(Page* page);
- static inline void UnprotectChunkFromPage(Page* page);
+ inline void ProtectChunkFromPage(Page* page);
+ inline void UnprotectChunkFromPage(Page* page);
#endif
#ifdef DEBUG
// Reports statistic info of the space.
- static void ReportStatistics();
+ void ReportStatistics();
#endif
- static void AddToAllocatedChunks(Address addr, intptr_t size);
- static void RemoveFromAllocatedChunks(Address addr, intptr_t size);
- // Note: This only checks the regular chunks, not the odd-sized initial
- // chunk.
- static bool InAllocatedChunks(Address addr);
-
// Due to encoding limitation, we can only have 8K chunks.
static const int kMaxNofChunks = 1 << kPageSizeBits;
// If a chunk has at least 16 pages, the maximum heap size is about
@@ -678,29 +676,21 @@ class MemoryAllocator : public AllStatic {
#endif
private:
+ MemoryAllocator();
+
static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
- static const int kChunkTableTopLevelEntries =
- 1 << (sizeof(intptr_t) * kBitsPerByte - kChunkSizeLog2 -
- (kChunkTableLevels - 1) * kChunkTableBitsPerLevel);
-
- // The chunks are not chunk-size aligned so for a given chunk-sized area of
- // memory there can be two chunks that cover it.
- static const int kChunkTableFineGrainedWordsPerEntry = 2;
- static const uintptr_t kUnusedChunkTableEntry = 0;
// Maximum space size in bytes.
- static intptr_t capacity_;
+ intptr_t capacity_;
// Maximum subset of capacity_ that can be executable
- static intptr_t capacity_executable_;
-
- // Top level table to track whether memory is part of a chunk or not.
- static uintptr_t chunk_table_[kChunkTableTopLevelEntries];
+ intptr_t capacity_executable_;
// Allocated space size in bytes.
- static intptr_t size_;
+ intptr_t size_;
+
// Allocated executable space size in bytes.
- static intptr_t size_executable_;
+ intptr_t size_executable_;
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -713,11 +703,11 @@ class MemoryAllocator : public AllStatic {
AllocationAction action;
};
// A List of callback that are triggered when memory is allocated or free'd
- static List<MemoryAllocationCallbackRegistration>
+ List<MemoryAllocationCallbackRegistration>
memory_allocation_callbacks_;
// The initial chunk of virtual memory.
- static VirtualMemory* initial_chunk_;
+ VirtualMemory* initial_chunk_;
// Allocated chunk info: chunk start address, chunk size, and owning space.
class ChunkInfo BASE_EMBEDDED {
@@ -725,7 +715,8 @@ class MemoryAllocator : public AllStatic {
ChunkInfo() : address_(NULL),
size_(0),
owner_(NULL),
- executable_(NOT_EXECUTABLE) {}
+ executable_(NOT_EXECUTABLE),
+ owner_identity_(FIRST_SPACE) {}
inline void init(Address a, size_t s, PagedSpace* o);
Address address() { return address_; }
size_t size() { return size_; }
@@ -733,74 +724,60 @@ class MemoryAllocator : public AllStatic {
// We save executability of the owner to allow using it
// when collecting stats after the owner has been destroyed.
Executability executable() const { return executable_; }
+ AllocationSpace owner_identity() const { return owner_identity_; }
private:
Address address_;
size_t size_;
PagedSpace* owner_;
Executability executable_;
+ AllocationSpace owner_identity_;
};
// Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
- static List<ChunkInfo> chunks_;
- static List<int> free_chunk_ids_;
- static int max_nof_chunks_;
- static int top_;
+ List<ChunkInfo> chunks_;
+ List<int> free_chunk_ids_;
+ int max_nof_chunks_;
+ int top_;
// Push/pop a free chunk id onto/from the stack.
- static void Push(int free_chunk_id);
- static int Pop();
- static bool OutOfChunkIds() { return top_ == 0; }
+ void Push(int free_chunk_id);
+ int Pop();
+ bool OutOfChunkIds() { return top_ == 0; }
// Frees a chunk.
- static void DeleteChunk(int chunk_id);
-
- // Helpers to maintain and query the chunk tables.
- static void AddChunkUsingAddress(
- uintptr_t chunk_start, // Where the chunk starts.
- uintptr_t chunk_index_base); // Used to place the chunk in the tables.
- static void RemoveChunkFoundUsingAddress(
- uintptr_t chunk_start, // Where the chunk starts.
- uintptr_t chunk_index_base); // Used to locate the entry in the tables.
- // Controls whether the lookup creates intermediate levels of tables as
- // needed.
- enum CreateTables { kDontCreateTables, kCreateTablesAsNeeded };
- static uintptr_t* AllocatedChunksFinder(uintptr_t* table,
- uintptr_t address,
- int bit_position,
- CreateTables create_as_needed);
- static void FreeChunkTables(uintptr_t* array, int length, int level);
- static int FineGrainedIndexForAddress(uintptr_t address) {
- int index = ((address >> kChunkSizeLog2) &
- ((1 << kChunkTableBitsPerLevel) - 1));
- return index * kChunkTableFineGrainedWordsPerEntry;
- }
-
+ void DeleteChunk(int chunk_id);
// Basic check whether a chunk id is in the valid range.
- static inline bool IsValidChunkId(int chunk_id);
+ inline bool IsValidChunkId(int chunk_id);
// Checks whether a chunk id identifies an allocated chunk.
- static inline bool IsValidChunk(int chunk_id);
+ inline bool IsValidChunk(int chunk_id);
// Returns the chunk id that a page belongs to.
- static inline int GetChunkId(Page* p);
+ inline int GetChunkId(Page* p);
// True if the address lies in the initial chunk.
- static inline bool InInitialChunk(Address address);
+ inline bool InInitialChunk(Address address);
// Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact
// collector to rebuild page headers in the from space, which is
// used as a marking stack and its page headers are destroyed.
- static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
- PagedSpace* owner);
+ Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
+ PagedSpace* owner);
- static Page* RelinkPagesInChunk(int chunk_id,
- Address chunk_start,
- size_t chunk_size,
- Page* prev,
- Page** last_page_in_use);
+ Page* RelinkPagesInChunk(int chunk_id,
+ Address chunk_start,
+ size_t chunk_size,
+ Page* prev,
+ Page** last_page_in_use);
+
+ friend class Isolate;
+
+ Isolate* isolate_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
};
@@ -1048,7 +1025,8 @@ class AllocationStats BASE_EMBEDDED {
class PagedSpace : public Space {
public:
// Creates a space with a maximum capacity, and an id.
- PagedSpace(intptr_t max_capacity,
+ PagedSpace(Heap* heap,
+ intptr_t max_capacity,
AllocationSpace id,
Executability executable);
@@ -1341,7 +1319,7 @@ class HistogramInfo: public NumberAndSizeInfo {
class SemiSpace : public Space {
public:
// Constructor.
- SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
+ explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
start_ = NULL;
age_mark_ = NULL;
}
@@ -1508,7 +1486,10 @@ class SemiSpaceIterator : public ObjectIterator {
class NewSpace : public Space {
public:
// Constructor.
- NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
+ explicit NewSpace(Heap* heap)
+ : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+ to_space_(heap),
+ from_space_(heap) {}
// Sets up the new space using the given chunk.
bool Setup(Address start, int size);
@@ -1741,11 +1722,11 @@ class FreeListNode: public HeapObject {
// function also writes a map to the first word of the block so that it
// looks like a heap object to the garbage collector and heap iteration
// functions.
- void set_size(int size_in_bytes);
+ void set_size(Heap* heap, int size_in_bytes);
// Accessors for the next field.
- inline Address next();
- inline void set_next(Address next);
+ inline Address next(Heap* heap);
+ inline void set_next(Heap* heap, Address next);
private:
static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
@@ -1757,7 +1738,7 @@ class FreeListNode: public HeapObject {
// The free list for the old space.
class OldSpaceFreeList BASE_EMBEDDED {
public:
- explicit OldSpaceFreeList(AllocationSpace owner);
+ OldSpaceFreeList(Heap* heap, AllocationSpace owner);
// Clear the free list.
void Reset();
@@ -1787,6 +1768,8 @@ class OldSpaceFreeList BASE_EMBEDDED {
static const int kMinBlockSize = 2 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
+ Heap* heap_;
+
// The identity of the owning space, for building allocation Failure
// objects.
AllocationSpace owner_;
@@ -1861,7 +1844,7 @@ class OldSpaceFreeList BASE_EMBEDDED {
// The free list for the map space.
class FixedSizeFreeList BASE_EMBEDDED {
public:
- FixedSizeFreeList(AllocationSpace owner, int object_size);
+ FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
// Clear the free list.
void Reset();
@@ -1882,6 +1865,9 @@ class FixedSizeFreeList BASE_EMBEDDED {
void MarkNodes();
private:
+
+ Heap* heap_;
+
// Available bytes on the free list.
intptr_t available_;
@@ -1909,10 +1895,12 @@ class OldSpace : public PagedSpace {
public:
// Creates an old space object with a given maximum capacity.
// The constructor does not allocate pages from OS.
- explicit OldSpace(intptr_t max_capacity,
- AllocationSpace id,
- Executability executable)
- : PagedSpace(max_capacity, id, executable), free_list_(id) {
+ OldSpace(Heap* heap,
+ intptr_t max_capacity,
+ AllocationSpace id,
+ Executability executable)
+ : PagedSpace(heap, max_capacity, id, executable),
+ free_list_(heap, id) {
page_extra_ = 0;
}
@@ -1981,14 +1969,15 @@ class OldSpace : public PagedSpace {
class FixedSpace : public PagedSpace {
public:
- FixedSpace(intptr_t max_capacity,
+ FixedSpace(Heap* heap,
+ intptr_t max_capacity,
AllocationSpace id,
int object_size_in_bytes,
const char* name)
- : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
name_(name),
- free_list_(id, object_size_in_bytes) {
+ free_list_(heap, id, object_size_in_bytes) {
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
}
@@ -2059,8 +2048,11 @@ class FixedSpace : public PagedSpace {
class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
- MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id)
- : FixedSpace(max_capacity, id, Map::kSize, "map"),
+ MapSpace(Heap* heap,
+ intptr_t max_capacity,
+ int max_map_space_pages,
+ AllocationSpace id)
+ : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
max_map_space_pages_(max_map_space_pages) {
ASSERT(max_map_space_pages < kMaxMapPageIndex);
}
@@ -2170,8 +2162,9 @@ class MapSpace : public FixedSpace {
class CellSpace : public FixedSpace {
public:
// Creates a property cell space object with a maximum capacity.
- CellSpace(intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
+ CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
+ : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
+ {}
protected:
#ifdef DEBUG
@@ -2246,7 +2239,7 @@ class LargeObjectChunk {
class LargeObjectSpace : public Space {
public:
- explicit LargeObjectSpace(AllocationSpace id);
+ LargeObjectSpace(Heap* heap, AllocationSpace id);
virtual ~LargeObjectSpace() {}
// Initializes internal data structures.
@@ -2263,9 +2256,7 @@ class LargeObjectSpace : public Space {
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
// Available bytes for objects in this space.
- intptr_t Available() {
- return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
- }
+ inline intptr_t Available();
virtual intptr_t Size() {
return size_;
@@ -2357,6 +2348,22 @@ class LargeObjectIterator: public ObjectIterator {
};
+#ifdef DEBUG
+struct CommentStatistic {
+ const char* comment;
+ int size;
+ int count;
+ void Clear() {
+ comment = NULL;
+ size = 0;
+ count = 0;
+ }
+ // Must be small, since an iteration is used for lookup.
+ static const int kMaxComments = 64;
+};
+#endif
+
+
} } // namespace v8::internal
#endif // V8_SPACES_H_
diff --git a/deps/v8/src/splay-tree.h b/deps/v8/src/splay-tree.h
index c26527600..0cb9ea840 100644
--- a/deps/v8/src/splay-tree.h
+++ b/deps/v8/src/splay-tree.h
@@ -28,6 +28,8 @@
#ifndef V8_SPLAY_TREE_H_
#define V8_SPLAY_TREE_H_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/string-search.cc b/deps/v8/src/string-search.cc
index 56874432f..3ae68b5d4 100644
--- a/deps/v8/src/string-search.cc
+++ b/deps/v8/src/string-search.cc
@@ -33,8 +33,9 @@ namespace internal {
// Storage for constants used by string-search.
-int StringSearchBase::kBadCharShiftTable[kUC16AlphabetSize];
-int StringSearchBase::kGoodSuffixShiftTable[kBMMaxShift + 1];
-int StringSearchBase::kSuffixTable[kBMMaxShift + 1];
+// Now in Isolate:
+// bad_char_shift_table()
+// good_suffix_shift_table()
+// suffix_table()
}} // namespace v8::internal
diff --git a/deps/v8/src/string-search.h b/deps/v8/src/string-search.h
index 5de3c0951..1223db0f9 100644
--- a/deps/v8/src/string-search.h
+++ b/deps/v8/src/string-search.h
@@ -44,7 +44,7 @@ class StringSearchBase {
// limit, we can fix the size of tables. For a needle longer than this limit,
// search will not be optimal, since we only build tables for a suffix
// of the string, but it is a safe approximation.
- static const int kBMMaxShift = 250;
+ static const int kBMMaxShift = Isolate::kBMMaxShift;
// Reduce alphabet to this size.
// One of the tables used by Boyer-Moore and Boyer-Moore-Horspool has size
@@ -54,7 +54,7 @@ class StringSearchBase {
// For needles using only characters in the same Unicode 256-code point page,
// there is no search speed degradation.
static const int kAsciiAlphabetSize = 128;
- static const int kUC16AlphabetSize = 256;
+ static const int kUC16AlphabetSize = Isolate::kUC16AlphabetSize;
// Bad-char shift table stored in the state. It's length is the alphabet size.
// For patterns below this length, the skip length of Boyer-Moore is too short
@@ -69,25 +69,16 @@ class StringSearchBase {
return String::IsAscii(string.start(), string.length());
}
- // The following tables are shared by all searches.
- // TODO(lrn): Introduce a way for a pattern to keep its tables
- // between searches (e.g., for an Atom RegExp).
-
- // Store for the BoyerMoore(Horspool) bad char shift table.
- static int kBadCharShiftTable[kUC16AlphabetSize];
- // Store for the BoyerMoore good suffix shift table.
- static int kGoodSuffixShiftTable[kBMMaxShift + 1];
- // Table used temporarily while building the BoyerMoore good suffix
- // shift table.
- static int kSuffixTable[kBMMaxShift + 1];
+ friend class Isolate;
};
template <typename PatternChar, typename SubjectChar>
class StringSearch : private StringSearchBase {
public:
- explicit StringSearch(Vector<const PatternChar> pattern)
- : pattern_(pattern),
+ StringSearch(Isolate* isolate, Vector<const PatternChar> pattern)
+ : isolate_(isolate),
+ pattern_(pattern),
start_(Max(0, pattern.length() - kBMMaxShift)) {
if (sizeof(PatternChar) > sizeof(SubjectChar)) {
if (!IsAsciiString(pattern_)) {
@@ -175,24 +166,33 @@ class StringSearch : private StringSearchBase {
return bad_char_occurrence[equiv_class];
}
+ // The following tables are shared by all searches.
+ // TODO(lrn): Introduce a way for a pattern to keep its tables
+ // between searches (e.g., for an Atom RegExp).
+
+ // Store for the BoyerMoore(Horspool) bad char shift table.
// Return a table covering the last kBMMaxShift+1 positions of
// pattern.
int* bad_char_table() {
- return kBadCharShiftTable;
+ return isolate_->bad_char_shift_table();
}
+ // Store for the BoyerMoore good suffix shift table.
int* good_suffix_shift_table() {
// Return biased pointer that maps the range [start_..pattern_.length()
// to the kGoodSuffixShiftTable array.
- return kGoodSuffixShiftTable - start_;
+ return isolate_->good_suffix_shift_table() - start_;
}
+ // Table used temporarily while building the BoyerMoore good suffix
+ // shift table.
int* suffix_table() {
// Return biased pointer that maps the range [start_..pattern_.length()
// to the kSuffixTable array.
- return kSuffixTable - start_;
+ return isolate_->suffix_table() - start_;
}
+ Isolate* isolate_;
// The pattern to search for.
Vector<const PatternChar> pattern_;
// Pointer to implementation of the search.
@@ -555,10 +555,11 @@ int StringSearch<PatternChar, SubjectChar>::InitialSearch(
// object should be constructed once and the Search function then called
// for each search.
template <typename SubjectChar, typename PatternChar>
-static int SearchString(Vector<const SubjectChar> subject,
+static int SearchString(Isolate* isolate,
+ Vector<const SubjectChar> subject,
Vector<const PatternChar> pattern,
int start_index) {
- StringSearch<PatternChar, SubjectChar> search(pattern);
+ StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
return search.Search(subject, start_index);
}
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 7abd1bbe8..aea142042 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -34,9 +34,6 @@ namespace v8 {
namespace internal {
static const int kMentionedObjectCacheMaxSize = 256;
-static List<HeapObject*, PreallocatedStorage>* debug_object_cache = NULL;
-static Object* current_security_token = NULL;
-
char* HeapStringAllocator::allocate(unsigned bytes) {
space_ = NewArray<char>(bytes);
@@ -195,6 +192,8 @@ void StringStream::PrintObject(Object* o) {
return;
}
if (o->IsHeapObject()) {
+ DebugObjectCache* debug_object_cache = Isolate::Current()->
+ string_stream_debug_object_cache();
for (int i = 0; i < debug_object_cache->length(); i++) {
if ((*debug_object_cache)[i] == o) {
Add("#%d#", i);
@@ -260,7 +259,7 @@ SmartPointer<const char> StringStream::ToCString() const {
void StringStream::Log() {
- LOG(StringEvent("StackDump", buffer_));
+ LOG(ISOLATE, StringEvent("StackDump", buffer_));
}
@@ -281,22 +280,25 @@ void StringStream::OutputToFile(FILE* out) {
Handle<String> StringStream::ToString() {
- return Factory::NewStringFromUtf8(Vector<const char>(buffer_, length_));
+ return FACTORY->NewStringFromUtf8(Vector<const char>(buffer_, length_));
}
void StringStream::ClearMentionedObjectCache() {
- current_security_token = NULL;
- if (debug_object_cache == NULL) {
- debug_object_cache = new List<HeapObject*, PreallocatedStorage>(0);
+ Isolate* isolate = Isolate::Current();
+ isolate->set_string_stream_current_security_token(NULL);
+ if (isolate->string_stream_debug_object_cache() == NULL) {
+ isolate->set_string_stream_debug_object_cache(
+ new List<HeapObject*, PreallocatedStorage>(0));
}
- debug_object_cache->Clear();
+ isolate->string_stream_debug_object_cache()->Clear();
}
#ifdef DEBUG
bool StringStream::IsMentionedObjectCacheClear() {
- return (debug_object_cache->length() == 0);
+ return (
+ Isolate::Current()->string_stream_debug_object_cache()->length() == 0);
}
#endif
@@ -338,7 +340,7 @@ void StringStream::PrintName(Object* name) {
void StringStream::PrintUsingMap(JSObject* js_object) {
Map* map = js_object->map();
- if (!Heap::Contains(map) ||
+ if (!HEAP->Contains(map) ||
!map->IsHeapObject() ||
!map->IsMap()) {
Add("<Invalid map>\n");
@@ -375,9 +377,10 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
+ Heap* heap = HEAP;
for (unsigned int i = 0; i < 10 && i < limit; i++) {
Object* element = array->get(i);
- if (element != Heap::the_hole_value()) {
+ if (element != heap->the_hole_value()) {
for (int len = 1; len < 18; len++)
Put(' ');
Add("%d: %o\n", i, array->get(i));
@@ -412,6 +415,8 @@ void StringStream::PrintByteArray(ByteArray* byte_array) {
void StringStream::PrintMentionedObjectCache() {
+ DebugObjectCache* debug_object_cache =
+ Isolate::Current()->string_stream_debug_object_cache();
Add("==== Key ============================================\n\n");
for (int i = 0; i < debug_object_cache->length(); i++) {
HeapObject* printee = (*debug_object_cache)[i];
@@ -444,12 +449,14 @@ void StringStream::PrintMentionedObjectCache() {
void StringStream::PrintSecurityTokenIfChanged(Object* f) {
- if (!f->IsHeapObject() || !Heap::Contains(HeapObject::cast(f))) {
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ if (!f->IsHeapObject() || !heap->Contains(HeapObject::cast(f))) {
return;
}
Map* map = HeapObject::cast(f)->map();
if (!map->IsHeapObject() ||
- !Heap::Contains(map) ||
+ !heap->Contains(map) ||
!map->IsMap() ||
!f->IsJSFunction()) {
return;
@@ -458,17 +465,17 @@ void StringStream::PrintSecurityTokenIfChanged(Object* f) {
JSFunction* fun = JSFunction::cast(f);
Object* perhaps_context = fun->unchecked_context();
if (perhaps_context->IsHeapObject() &&
- Heap::Contains(HeapObject::cast(perhaps_context)) &&
+ heap->Contains(HeapObject::cast(perhaps_context)) &&
perhaps_context->IsContext()) {
Context* context = fun->context();
- if (!Heap::Contains(context)) {
+ if (!heap->Contains(context)) {
Add("(Function context is outside heap)\n");
return;
}
Object* token = context->global_context()->security_token();
- if (token != current_security_token) {
+ if (token != isolate->string_stream_current_security_token()) {
Add("Security context: %o\n", token);
- current_security_token = token;
+ isolate->set_string_stream_current_security_token(token);
}
} else {
Add("(Function context is corrupt)\n");
@@ -478,8 +485,8 @@ void StringStream::PrintSecurityTokenIfChanged(Object* f) {
void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
if (f->IsHeapObject() &&
- Heap::Contains(HeapObject::cast(f)) &&
- Heap::Contains(HeapObject::cast(f)->map()) &&
+ HEAP->Contains(HeapObject::cast(f)) &&
+ HEAP->Contains(HeapObject::cast(f)->map()) &&
HeapObject::cast(f)->map()->IsMap()) {
if (f->IsJSFunction()) {
JSFunction* fun = JSFunction::cast(f);
@@ -506,11 +513,11 @@ void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
Add("/* warning: 'function' was not a heap object */ ");
return;
}
- if (!Heap::Contains(HeapObject::cast(f))) {
+ if (!HEAP->Contains(HeapObject::cast(f))) {
Add("/* warning: 'function' was not on the heap */ ");
return;
}
- if (!Heap::Contains(HeapObject::cast(f)->map())) {
+ if (!HEAP->Contains(HeapObject::cast(f)->map())) {
Add("/* warning: function's map was not on the heap */ ");
return;
}
@@ -526,10 +533,11 @@ void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
Object* name = fun->shared()->name();
bool print_name = false;
- for (Object* p = receiver; p != Heap::null_value(); p = p->GetPrototype()) {
+ Heap* heap = HEAP;
+ for (Object* p = receiver; p != heap->null_value(); p = p->GetPrototype()) {
if (p->IsJSObject()) {
Object* key = JSObject::cast(p)->SlowReverseLookup(fun);
- if (key != Heap::undefined_value()) {
+ if (key != heap->undefined_value()) {
if (!name->IsString() ||
!key->IsString() ||
!String::cast(name)->Equals(String::cast(key))) {
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 2b73e0f6e..bed211a03 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -62,6 +62,10 @@ function StringValueOf() {
// ECMA-262, section 15.5.4.4
function StringCharAt(pos) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.charAt"]);
+ }
var result = %_StringCharAt(this, pos);
if (%_IsSmi(result)) {
result = %_StringCharAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
@@ -72,6 +76,10 @@ function StringCharAt(pos) {
// ECMA-262 section 15.5.4.5
function StringCharCodeAt(pos) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.charCodeAt"]);
+ }
var result = %_StringCharCodeAt(this, pos);
if (!%_IsSmi(result)) {
result = %_StringCharCodeAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
@@ -82,12 +90,15 @@ function StringCharCodeAt(pos) {
// ECMA-262, section 15.5.4.6
function StringConcat() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined", ["String.prototype.concat"]);
+ }
var len = %_ArgumentsLength();
var this_as_string = TO_STRING_INLINE(this);
if (len === 1) {
return this_as_string + %_Arguments(0);
}
- var parts = new $Array(len + 1);
+ var parts = new InternalArray(len + 1);
parts[0] = this_as_string;
for (var i = 0; i < len; i++) {
var part = %_Arguments(i);
@@ -102,6 +113,10 @@ function StringConcat() {
// ECMA-262 section 15.5.4.7
function StringIndexOf(pattern /* position */) { // length == 1
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.indexOf"]);
+ }
var subject = TO_STRING_INLINE(this);
pattern = TO_STRING_INLINE(pattern);
var index = 0;
@@ -117,6 +132,10 @@ function StringIndexOf(pattern /* position */) { // length == 1
// ECMA-262 section 15.5.4.8
function StringLastIndexOf(pat /* position */) { // length == 1
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.lastIndexOf"]);
+ }
var sub = TO_STRING_INLINE(this);
var subLength = sub.length;
var pat = TO_STRING_INLINE(pat);
@@ -146,6 +165,10 @@ function StringLastIndexOf(pat /* position */) { // length == 1
// This function is implementation specific. For now, we do not
// do anything locale specific.
function StringLocaleCompare(other) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.localeCompare"]);
+ }
if (%_ArgumentsLength() === 0) return 0;
return %StringLocaleCompare(TO_STRING_INLINE(this),
TO_STRING_INLINE(other));
@@ -154,6 +177,10 @@ function StringLocaleCompare(other) {
// ECMA-262 section 15.5.4.10
function StringMatch(regexp) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.match"]);
+ }
var subject = TO_STRING_INLINE(this);
if (IS_REGEXP(regexp)) {
if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
@@ -187,6 +214,10 @@ var reusableMatchInfo = [2, "", "", -1, -1];
// ECMA-262, section 15.5.4.11
function StringReplace(search, replace) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.replace"]);
+ }
var subject = TO_STRING_INLINE(this);
// Delegate to one of the regular expression variants if necessary.
@@ -357,7 +388,7 @@ function addCaptureString(builder, matchInfo, index) {
// TODO(lrn): This array will survive indefinitely if replace is never
// called again. However, it will be empty, since the contents are cleared
// in the finally block.
-var reusableReplaceArray = $Array(16);
+var reusableReplaceArray = new InternalArray(16);
// Helper function for replacing regular expressions with the result of a
// function application in String.prototype.replace.
@@ -370,7 +401,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
// of another replace) or we have failed to set the reusable array
// back due to an exception in a replacement function. Create a new
// array to use in the future, or until the original is written back.
- resultArray = $Array(16);
+ resultArray = new InternalArray(16);
}
var res = %RegExpExecMultiple(regexp,
subject,
@@ -386,7 +417,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
var i = 0;
if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
var match_start = 0;
- var override = [null, 0, subject];
+ var override = new InternalArray(null, 0, subject);
var receiver = %GetGlobalReceiver();
while (i < len) {
var elem = res[i];
@@ -447,7 +478,7 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
replacement =
%_CallFunction(%GetGlobalReceiver(), s, index, subject, replace);
} else {
- var parameters = $Array(m + 2);
+ var parameters = new InternalArray(m + 2);
for (var j = 0; j < m; j++) {
parameters[j] = CaptureString(subject, matchInfo, j);
}
@@ -467,6 +498,10 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
// ECMA-262 section 15.5.4.12
function StringSearch(re) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.search"]);
+ }
var regexp;
if (IS_STRING(re)) {
regexp = %_GetFromCache(STRING_TO_REGEXP_CACHE_ID, re);
@@ -485,6 +520,10 @@ function StringSearch(re) {
// ECMA-262 section 15.5.4.13
function StringSlice(start, end) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.slice"]);
+ }
var s = TO_STRING_INLINE(this);
var s_len = s.length;
var start_i = TO_INTEGER(start);
@@ -520,6 +559,10 @@ function StringSlice(start, end) {
// ECMA-262 section 15.5.4.14
function StringSplit(separator, limit) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.split"]);
+ }
var subject = TO_STRING_INLINE(this);
limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
if (limit === 0) return [];
@@ -613,6 +656,10 @@ function StringSplit(separator, limit) {
// ECMA-262 section 15.5.4.15
function StringSubstring(start, end) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.subString"]);
+ }
var s = TO_STRING_INLINE(this);
var s_len = s.length;
@@ -646,6 +693,10 @@ function StringSubstring(start, end) {
// This is not a part of ECMA-262.
function StringSubstr(start, n) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.substr"]);
+ }
var s = TO_STRING_INLINE(this);
var len;
@@ -686,41 +737,69 @@ function StringSubstr(start, n) {
// ECMA-262, 15.5.4.16
function StringToLowerCase() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.toLowerCase"]);
+ }
return %StringToLowerCase(TO_STRING_INLINE(this));
}
// ECMA-262, 15.5.4.17
function StringToLocaleLowerCase() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.toLocaleLowerCase"]);
+ }
return %StringToLowerCase(TO_STRING_INLINE(this));
}
// ECMA-262, 15.5.4.18
function StringToUpperCase() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.toUpperCase"]);
+ }
return %StringToUpperCase(TO_STRING_INLINE(this));
}
// ECMA-262, 15.5.4.19
function StringToLocaleUpperCase() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.toLocaleUpperCase"]);
+ }
return %StringToUpperCase(TO_STRING_INLINE(this));
}
// ES5, 15.5.4.20
function StringTrim() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.trim"]);
+ }
return %StringTrim(TO_STRING_INLINE(this), true, true);
}
function StringTrimLeft() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.trimLeft"]);
+ }
return %StringTrim(TO_STRING_INLINE(this), true, false);
}
function StringTrimRight() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["String.prototype.trimRight"]);
+ }
return %StringTrim(TO_STRING_INLINE(this), false, true);
}
-var static_charcode_array = new $Array(4);
+var static_charcode_array = new InternalArray(4);
// ECMA-262, section 15.5.3.2
function StringFromCharCode(code) {
@@ -825,7 +904,7 @@ function ReplaceResultBuilder(str) {
if (%_ArgumentsLength() > 1) {
this.elements = %_Arguments(1);
} else {
- this.elements = new $Array();
+ this.elements = new InternalArray();
}
this.special_string = str;
}
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 360f0b743..d5392d95b 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,6 +29,7 @@
#include "api.h"
#include "arguments.h"
+#include "code-stubs.h"
#include "gdb-jit.h"
#include "ic-inl.h"
#include "stub-cache.h"
@@ -41,8 +42,12 @@ namespace internal {
// StubCache implementation.
-StubCache::Entry StubCache::primary_[StubCache::kPrimaryTableSize];
-StubCache::Entry StubCache::secondary_[StubCache::kSecondaryTableSize];
+StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {
+ ASSERT(isolate == Isolate::Current());
+ memset(primary_, 0, sizeof(primary_[0]) * StubCache::kPrimaryTableSize);
+ memset(secondary_, 0, sizeof(secondary_[0]) * StubCache::kSecondaryTableSize);
+}
+
void StubCache::Initialize(bool create_heap_objects) {
ASSERT(IsPowerOf2(kPrimaryTableSize));
@@ -60,7 +65,7 @@ Code* StubCache::Set(String* name, Map* map, Code* code) {
// Validate that the name does not move on scavenge, and that we
// can use identity checks instead of string equality checks.
- ASSERT(!Heap::InNewSpace(name));
+ ASSERT(!heap()->InNewSpace(name));
ASSERT(name->IsSymbol());
// The state bits are not important to the hash function because
@@ -80,7 +85,7 @@ Code* StubCache::Set(String* name, Map* map, Code* code) {
// If the primary entry has useful data in it, we retire it to the
// secondary cache before overwriting it.
- if (hit != Builtins::builtin(Builtins::Illegal)) {
+ if (hit != isolate_->builtins()->builtin(Builtins::kIllegal)) {
Code::Flags primary_flags = Code::RemoveTypeFromFlags(hit->flags());
int secondary_offset =
SecondaryOffset(primary->key, primary_flags, primary_offset);
@@ -104,10 +109,10 @@ MaybeObject* StubCache::ComputeLoadNonexistent(String* name,
// there are global objects involved, we need to check global
// property cells in the stub and therefore the stub will be
// specific to the name.
- String* cache_name = Heap::empty_string();
+ String* cache_name = heap()->empty_string();
if (receiver->IsGlobalObject()) cache_name = name;
JSObject* last = receiver;
- while (last->GetPrototype() != Heap::null_value()) {
+ while (last->GetPrototype() != heap()->null_value()) {
last = JSObject::cast(last->GetPrototype());
if (last->IsGlobalObject()) cache_name = name;
}
@@ -122,7 +127,8 @@ MaybeObject* StubCache::ComputeLoadNonexistent(String* name,
compiler.CompileLoadNonexistent(cache_name, receiver, last);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, cache_name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -147,7 +153,8 @@ MaybeObject* StubCache::ComputeLoadField(String* name,
compiler.CompileLoadField(receiver, holder, field_index, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -173,7 +180,8 @@ MaybeObject* StubCache::ComputeLoadCallback(String* name,
compiler.CompileLoadCallback(name, receiver, holder, callback);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -199,7 +207,8 @@ MaybeObject* StubCache::ComputeLoadConstant(String* name,
compiler.CompileLoadConstant(receiver, holder, value, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -223,7 +232,8 @@ MaybeObject* StubCache::ComputeLoadInterceptor(String* name,
compiler.CompileLoadInterceptor(receiver, holder, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -236,7 +246,7 @@ MaybeObject* StubCache::ComputeLoadInterceptor(String* name,
MaybeObject* StubCache::ComputeLoadNormal() {
- return Builtins::builtin(Builtins::LoadIC_Normal);
+ return isolate_->builtins()->builtin(Builtins::kLoadIC_Normal);
}
@@ -257,7 +267,8 @@ MaybeObject* StubCache::ComputeLoadGlobal(String* name,
is_dont_delete);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -282,7 +293,8 @@ MaybeObject* StubCache::ComputeKeyedLoadField(String* name,
compiler.CompileLoadField(name, receiver, holder, field_index);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -308,7 +320,8 @@ MaybeObject* StubCache::ComputeKeyedLoadConstant(String* name,
compiler.CompileLoadConstant(name, receiver, holder, value);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -333,7 +346,8 @@ MaybeObject* StubCache::ComputeKeyedLoadInterceptor(String* name,
compiler.CompileLoadInterceptor(receiver, holder, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -359,7 +373,8 @@ MaybeObject* StubCache::ComputeKeyedLoadCallback(String* name,
compiler.CompileLoadCallback(name, receiver, holder, callback);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -383,7 +398,8 @@ MaybeObject* StubCache::ComputeKeyedLoadArrayLength(String* name,
{ MaybeObject* maybe_code = compiler.CompileLoadArrayLength(name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -406,7 +422,8 @@ MaybeObject* StubCache::ComputeKeyedLoadStringLength(String* name,
{ MaybeObject* maybe_code = compiler.CompileLoadStringLength(name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result = map->UpdateCodeCache(name, Code::cast(code));
@@ -428,7 +445,8 @@ MaybeObject* StubCache::ComputeKeyedLoadFunctionPrototype(
{ MaybeObject* maybe_code = compiler.CompileLoadFunctionPrototype(name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -440,60 +458,6 @@ MaybeObject* StubCache::ComputeKeyedLoadFunctionPrototype(
}
-MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) {
- // Using NORMAL as the PropertyType for array element loads is a misuse. The
- // generated stub always accesses fast elements, not slow-mode fields, but
- // some property type is required for the stub lookup. Note that overloading
- // the NORMAL PropertyType is only safe as long as no stubs are generated for
- // other keyed field loads. This is guaranteed to be the case since all field
- // keyed loads that are not array elements go through a generic builtin stub.
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
- String* name = Heap::KeyedLoadSpecialized_symbol();
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code = compiler.CompileLoadSpecialized(receiver);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeKeyedLoadPixelArray(JSObject* receiver) {
- // Using NORMAL as the PropertyType for array element loads is a misuse. The
- // generated stub always accesses fast elements, not slow-mode fields, but
- // some property type is required for the stub lookup. Note that overloading
- // the NORMAL PropertyType is only safe as long as no stubs are generated for
- // other keyed field loads. This is guaranteed to be the case since all field
- // keyed loads that are not array elements go through a generic builtin stub.
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
- String* name = Heap::KeyedLoadPixelArray_symbol();
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedLoadStubCompiler compiler;
- { MaybeObject* maybe_code = compiler.CompileLoadPixelArray(receiver);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
MaybeObject* StubCache::ComputeStoreField(String* name,
JSObject* receiver,
int field_index,
@@ -509,7 +473,8 @@ MaybeObject* StubCache::ComputeStoreField(String* name,
compiler.CompileStoreField(receiver, field_index, transition, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -521,132 +486,56 @@ MaybeObject* StubCache::ComputeStoreField(String* name,
}
-MaybeObject* StubCache::ComputeKeyedStoreSpecialized(
- JSObject* receiver,
- StrictModeFlag strict_mode) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode);
- String* name = Heap::KeyedStoreSpecialized_symbol();
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedStoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code = compiler.CompileStoreSpecialized(receiver);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeKeyedStorePixelArray(
- JSObject* receiver,
- StrictModeFlag strict_mode) {
- // Using NORMAL as the PropertyType for array element stores is a misuse. The
- // generated stub always accesses fast elements, not slow-mode fields, but
- // some property type is required for the stub lookup. Note that overloading
- // the NORMAL PropertyType is only safe as long as no stubs are generated for
- // other keyed field stores. This is guaranteed to be the case since all field
- // keyed stores that are not array elements go through a generic builtin stub.
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode);
- String* name = Heap::KeyedStorePixelArray_symbol();
- Object* code = receiver->map()->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- KeyedStoreStubCompiler compiler(strict_mode);
- { MaybeObject* maybe_code = compiler.CompileStorePixelArray(receiver);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- }
- return code;
-}
-
-
-namespace {
-
-ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) {
- switch (kind) {
- case JSObject::EXTERNAL_BYTE_ELEMENTS:
- return kExternalByteArray;
- case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return kExternalUnsignedByteArray;
- case JSObject::EXTERNAL_SHORT_ELEMENTS:
- return kExternalShortArray;
- case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return kExternalUnsignedShortArray;
- case JSObject::EXTERNAL_INT_ELEMENTS:
- return kExternalIntArray;
- case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
- return kExternalUnsignedIntArray;
- case JSObject::EXTERNAL_FLOAT_ELEMENTS:
- return kExternalFloatArray;
- default:
- UNREACHABLE();
- return static_cast<ExternalArrayType>(0);
- }
-}
-
-} // anonymous namespace
-
-
-MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
+MaybeObject* StubCache::ComputeKeyedLoadOrStoreElement(
JSObject* receiver,
bool is_store,
StrictModeFlag strict_mode) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(
- is_store ? Code::KEYED_STORE_IC : Code::KEYED_LOAD_IC,
+ is_store ? Code::KEYED_STORE_IC :
+ Code::KEYED_LOAD_IC,
NORMAL,
strict_mode);
- ExternalArrayType array_type =
- ElementsKindToExternalArrayType(receiver->GetElementsKind());
- String* name =
- is_store ? Heap::KeyedStoreExternalArray_symbol()
- : Heap::KeyedLoadExternalArray_symbol();
- // Use the global maps for the particular external array types,
- // rather than the receiver's map, when looking up the cached code,
- // so that we actually canonicalize these stubs.
- Map* map = Heap::MapForExternalArrayType(array_type);
- Object* code = map->FindInCodeCache(name, flags);
- if (code->IsUndefined()) {
- ExternalArrayStubCompiler compiler;
- { MaybeObject* maybe_code = is_store
- ? compiler.CompileKeyedStoreStub(array_type, flags)
- : compiler.CompileKeyedLoadStub(array_type, flags);
- if (!maybe_code->ToObject(&code)) return maybe_code;
- }
- if (is_store) {
- PROFILE(
- CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
- } else {
- PROFILE(
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
- }
- Object* result;
- { MaybeObject* maybe_result =
- map->UpdateCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ String* name = is_store
+ ? isolate()->heap()->KeyedStoreElementMonomorphic_symbol()
+ : isolate()->heap()->KeyedLoadElementMonomorphic_symbol();
+ Object* maybe_code = receiver->map()->FindInCodeCache(name, flags);
+ if (!maybe_code->IsUndefined()) return Code::cast(maybe_code);
+
+ MaybeObject* maybe_new_code = NULL;
+ Map* receiver_map = receiver->map();
+ if (is_store) {
+ KeyedStoreStubCompiler compiler(strict_mode);
+ maybe_new_code = compiler.CompileStoreElement(receiver_map);
+ } else {
+ KeyedLoadStubCompiler compiler;
+ maybe_new_code = compiler.CompileLoadElement(receiver_map);
+ }
+ Code* code;
+ if (!maybe_new_code->To(&code)) return maybe_new_code;
+ if (is_store) {
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
+ Code::cast(code), 0));
+ } else {
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
+ Code::cast(code), 0));
+ }
+ ASSERT(code->IsCode());
+ Object* result;
+ { MaybeObject* maybe_result =
+ receiver->UpdateMapCodeCache(name, Code::cast(code));
+ if (!maybe_result->ToObject(&result)) return maybe_result;
}
return code;
}
MaybeObject* StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
- return Builtins::builtin((strict_mode == kStrictMode)
- ? Builtins::StoreIC_Normal_Strict
- : Builtins::StoreIC_Normal);
+ return isolate_->builtins()->builtin((strict_mode == kStrictMode)
+ ? Builtins::kStoreIC_Normal_Strict
+ : Builtins::kStoreIC_Normal);
}
@@ -663,7 +552,8 @@ MaybeObject* StubCache::ComputeStoreGlobal(String* name,
compiler.CompileStoreGlobal(receiver, cell, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -690,7 +580,8 @@ MaybeObject* StubCache::ComputeStoreCallback(
compiler.CompileStoreCallback(receiver, callback, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -715,7 +606,8 @@ MaybeObject* StubCache::ComputeStoreInterceptor(
compiler.CompileStoreInterceptor(receiver, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate_,
+ CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -742,8 +634,9 @@ MaybeObject* StubCache::ComputeKeyedStoreField(String* name,
compiler.CompileStoreField(receiver, field_index, transition, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(CodeCreateEvent(
- Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
+ Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
@@ -802,7 +695,8 @@ MaybeObject* StubCache::ComputeCallConstant(int argc,
}
Code::cast(code)->set_check_type(check);
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+ PROFILE(isolate_,
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
@@ -818,6 +712,7 @@ MaybeObject* StubCache::ComputeCallConstant(int argc,
MaybeObject* StubCache::ComputeCallField(int argc,
InLoopFlag in_loop,
Code::Kind kind,
+ Code::ExtraICState extra_ic_state,
String* name,
Object* object,
JSObject* holder,
@@ -836,14 +731,14 @@ MaybeObject* StubCache::ComputeCallField(int argc,
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
FIELD,
- Code::kNoExtraICState,
+ extra_ic_state,
cache_holder,
in_loop,
argc);
Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(
- argc, in_loop, kind, Code::kNoExtraICState, cache_holder);
+ argc, in_loop, kind, extra_ic_state, cache_holder);
{ MaybeObject* maybe_code =
compiler.CompileCallField(JSObject::cast(object),
holder,
@@ -852,7 +747,8 @@ MaybeObject* StubCache::ComputeCallField(int argc,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+ PROFILE(isolate_,
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
@@ -865,11 +761,13 @@ MaybeObject* StubCache::ComputeCallField(int argc,
}
-MaybeObject* StubCache::ComputeCallInterceptor(int argc,
- Code::Kind kind,
- String* name,
- Object* object,
- JSObject* holder) {
+MaybeObject* StubCache::ComputeCallInterceptor(
+ int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state,
+ String* name,
+ Object* object,
+ JSObject* holder) {
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(object, holder);
@@ -884,20 +782,21 @@ MaybeObject* StubCache::ComputeCallInterceptor(int argc,
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
INTERCEPTOR,
- Code::kNoExtraICState,
+ extra_ic_state,
cache_holder,
NOT_IN_LOOP,
argc);
Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(
- argc, NOT_IN_LOOP, kind, Code::kNoExtraICState, cache_holder);
+ argc, NOT_IN_LOOP, kind, extra_ic_state, cache_holder);
{ MaybeObject* maybe_code =
compiler.CompileCallInterceptor(JSObject::cast(object), holder, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
@@ -913,10 +812,12 @@ MaybeObject* StubCache::ComputeCallInterceptor(int argc,
MaybeObject* StubCache::ComputeCallNormal(int argc,
InLoopFlag in_loop,
Code::Kind kind,
+ Code::ExtraICState extra_ic_state,
String* name,
JSObject* receiver) {
Object* code;
- { MaybeObject* maybe_code = ComputeCallNormal(argc, in_loop, kind);
+ { MaybeObject* maybe_code =
+ ComputeCallNormal(argc, in_loop, kind, extra_ic_state);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
return code;
@@ -926,6 +827,7 @@ MaybeObject* StubCache::ComputeCallNormal(int argc,
MaybeObject* StubCache::ComputeCallGlobal(int argc,
InLoopFlag in_loop,
Code::Kind kind,
+ Code::ExtraICState extra_ic_state,
String* name,
JSObject* receiver,
GlobalObject* holder,
@@ -936,7 +838,7 @@ MaybeObject* StubCache::ComputeCallGlobal(int argc,
JSObject* map_holder = IC::GetCodeCacheHolder(receiver, cache_holder);
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
NORMAL,
- Code::kNoExtraICState,
+ extra_ic_state,
cache_holder,
in_loop,
argc);
@@ -948,13 +850,14 @@ MaybeObject* StubCache::ComputeCallGlobal(int argc,
// caches.
if (!function->is_compiled()) return Failure::InternalError();
CallStubCompiler compiler(
- argc, in_loop, kind, Code::kNoExtraICState, cache_holder);
+ argc, in_loop, kind, extra_ic_state, cache_holder);
{ MaybeObject* maybe_code =
compiler.CompileCallGlobal(receiver, holder, cell, function, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
@@ -967,45 +870,48 @@ MaybeObject* StubCache::ComputeCallGlobal(int argc,
}
-static Object* GetProbeValue(Code::Flags flags) {
+static Object* GetProbeValue(Isolate* isolate, Code::Flags flags) {
// Use raw_unchecked... so we don't get assert failures during GC.
- NumberDictionary* dictionary = Heap::raw_unchecked_non_monomorphic_cache();
- int entry = dictionary->FindEntry(flags);
+ NumberDictionary* dictionary =
+ isolate->heap()->raw_unchecked_non_monomorphic_cache();
+ int entry = dictionary->FindEntry(isolate, flags);
if (entry != -1) return dictionary->ValueAt(entry);
- return Heap::raw_unchecked_undefined_value();
+ return isolate->heap()->raw_unchecked_undefined_value();
}
-MUST_USE_RESULT static MaybeObject* ProbeCache(Code::Flags flags) {
- Object* probe = GetProbeValue(flags);
- if (probe != Heap::undefined_value()) return probe;
+MUST_USE_RESULT static MaybeObject* ProbeCache(Isolate* isolate,
+ Code::Flags flags) {
+ Heap* heap = isolate->heap();
+ Object* probe = GetProbeValue(isolate, flags);
+ if (probe != heap->undefined_value()) return probe;
// Seed the cache with an undefined value to make sure that any
// generated code object can always be inserted into the cache
// without causing allocation failures.
Object* result;
{ MaybeObject* maybe_result =
- Heap::non_monomorphic_cache()->AtNumberPut(flags,
- Heap::undefined_value());
+ heap->non_monomorphic_cache()->AtNumberPut(flags,
+ heap->undefined_value());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Heap::public_set_non_monomorphic_cache(NumberDictionary::cast(result));
+ heap->public_set_non_monomorphic_cache(NumberDictionary::cast(result));
return probe;
}
-static MaybeObject* FillCache(MaybeObject* maybe_code) {
+static MaybeObject* FillCache(Isolate* isolate, MaybeObject* maybe_code) {
Object* code;
if (maybe_code->ToObject(&code)) {
if (code->IsCode()) {
- int entry =
- Heap::non_monomorphic_cache()->FindEntry(
- Code::cast(code)->flags());
+ Heap* heap = isolate->heap();
+ int entry = heap->non_monomorphic_cache()->FindEntry(
+ Code::cast(code)->flags());
// The entry must be present see comment in ProbeCache.
ASSERT(entry != -1);
- ASSERT(Heap::non_monomorphic_cache()->ValueAt(entry) ==
- Heap::undefined_value());
- Heap::non_monomorphic_cache()->ValueAtPut(entry, code);
- CHECK(GetProbeValue(Code::cast(code)->flags()) == code);
+ ASSERT(heap->non_monomorphic_cache()->ValueAt(entry) ==
+ heap->undefined_value());
+ heap->non_monomorphic_cache()->ValueAtPut(entry, code);
+ CHECK(GetProbeValue(isolate, Code::cast(code)->flags()) == code);
}
}
return maybe_code;
@@ -1014,15 +920,19 @@ static MaybeObject* FillCache(MaybeObject* maybe_code) {
Code* StubCache::FindCallInitialize(int argc,
InLoopFlag in_loop,
+ RelocInfo::Mode mode,
Code::Kind kind) {
+ Code::ExtraICState extra_state =
+ CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
+ CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
Code::Flags flags = Code::ComputeFlags(kind,
in_loop,
UNINITIALIZED,
- Code::kNoExtraICState,
+ extra_state,
NORMAL,
argc);
- Object* result = ProbeCache(flags)->ToObjectUnchecked();
- ASSERT(!result->IsUndefined());
+ Object* result = ProbeCache(isolate(), flags)->ToObjectUnchecked();
+ ASSERT(result != heap()->undefined_value());
// This might be called during the marking phase of the collector
// hence the unchecked cast.
return reinterpret_cast<Code*>(result);
@@ -1031,33 +941,41 @@ Code* StubCache::FindCallInitialize(int argc,
MaybeObject* StubCache::ComputeCallInitialize(int argc,
InLoopFlag in_loop,
+ RelocInfo::Mode mode,
Code::Kind kind) {
+ Code::ExtraICState extra_state =
+ CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
+ CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
Code::Flags flags = Code::ComputeFlags(kind,
in_loop,
UNINITIALIZED,
- Code::kNoExtraICState,
+ extra_state,
NORMAL,
argc);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallInitialize(flags));
+ return FillCache(isolate_, compiler.CompileCallInitialize(flags));
}
-Handle<Code> StubCache::ComputeCallInitialize(int argc, InLoopFlag in_loop) {
+Handle<Code> StubCache::ComputeCallInitialize(int argc,
+ InLoopFlag in_loop,
+ RelocInfo::Mode mode) {
if (in_loop == IN_LOOP) {
// Force the creation of the corresponding stub outside loops,
// because it may be used when clearing the ICs later - it is
// possible for a series of IC transitions to lose the in-loop
// information, and the IC clearing code can't generate a stub
// that it needs so we need to ensure it is generated already.
- ComputeCallInitialize(argc, NOT_IN_LOOP);
+ ComputeCallInitialize(argc, NOT_IN_LOOP, mode);
}
- CALL_HEAP_FUNCTION(ComputeCallInitialize(argc, in_loop, Code::CALL_IC), Code);
+ CALL_HEAP_FUNCTION(isolate_,
+ ComputeCallInitialize(argc, in_loop, mode, Code::CALL_IC),
+ Code);
}
@@ -1072,51 +990,60 @@ Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc,
ComputeKeyedCallInitialize(argc, NOT_IN_LOOP);
}
CALL_HEAP_FUNCTION(
- ComputeCallInitialize(argc, in_loop, Code::KEYED_CALL_IC), Code);
+ isolate_,
+ ComputeCallInitialize(argc,
+ in_loop,
+ RelocInfo::CODE_TARGET,
+ Code::KEYED_CALL_IC),
+ Code);
}
-MaybeObject* StubCache::ComputeCallPreMonomorphic(int argc,
- InLoopFlag in_loop,
- Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallPreMonomorphic(
+ int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state) {
Code::Flags flags = Code::ComputeFlags(kind,
in_loop,
PREMONOMORPHIC,
- Code::kNoExtraICState,
+ extra_ic_state,
NORMAL,
argc);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallPreMonomorphic(flags));
+ return FillCache(isolate_, compiler.CompileCallPreMonomorphic(flags));
}
MaybeObject* StubCache::ComputeCallNormal(int argc,
InLoopFlag in_loop,
- Code::Kind kind) {
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state) {
Code::Flags flags = Code::ComputeFlags(kind,
in_loop,
MONOMORPHIC,
- Code::kNoExtraICState,
+ extra_ic_state,
NORMAL,
argc);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallNormal(flags));
+ return FillCache(isolate_, compiler.CompileCallNormal(flags));
}
-MaybeObject* StubCache::ComputeCallMegamorphic(int argc,
- InLoopFlag in_loop,
- Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallArguments(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind) {
+ ASSERT(kind == Code::KEYED_CALL_IC);
Code::Flags flags = Code::ComputeFlags(kind,
in_loop,
MEGAMORPHIC,
@@ -1124,37 +1051,64 @@ MaybeObject* StubCache::ComputeCallMegamorphic(int argc,
NORMAL,
argc);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallMegamorphic(flags));
+ return FillCache(isolate_, compiler.CompileCallArguments(flags));
}
-MaybeObject* StubCache::ComputeCallMiss(int argc, Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallMegamorphic(
+ int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state) {
+ Code::Flags flags = Code::ComputeFlags(kind,
+ in_loop,
+ MEGAMORPHIC,
+ extra_ic_state,
+ NORMAL,
+ argc);
+ Object* probe;
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
+ if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+ }
+ if (!probe->IsUndefined()) return probe;
+ StubCompiler compiler;
+ return FillCache(isolate_, compiler.CompileCallMegamorphic(flags));
+}
+
+
+MaybeObject* StubCache::ComputeCallMiss(int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state) {
// MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
// and monomorphic stubs are not mixed up together in the stub cache.
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
MONOMORPHIC_PROTOTYPE_FAILURE,
- Code::kNoExtraICState,
+ extra_ic_state,
NORMAL,
argc,
OWN_MAP);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallMiss(flags));
+ return FillCache(isolate_, compiler.CompileCallMiss(flags));
}
#ifdef ENABLE_DEBUGGER_SUPPORT
-MaybeObject* StubCache::ComputeCallDebugBreak(int argc, Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallDebugBreak(
+ int argc,
+ Code::Kind kind) {
+ // Extra IC state is irrelevant for debug break ICs. They jump to
+ // the actual call ic to carry out the work.
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
DEBUG_BREAK,
@@ -1162,17 +1116,20 @@ MaybeObject* StubCache::ComputeCallDebugBreak(int argc, Code::Kind kind) {
NORMAL,
argc);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallDebugBreak(flags));
+ return FillCache(isolate_, compiler.CompileCallDebugBreak(flags));
}
-MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(int argc,
- Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(
+ int argc,
+ Code::Kind kind) {
+ // Extra IC state is irrelevant for debug break ICs. They jump to
+ // the actual call ic to carry out the work.
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
DEBUG_PREPARE_STEP_IN,
@@ -1180,24 +1137,26 @@ MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(int argc,
NORMAL,
argc);
Object* probe;
- { MaybeObject* maybe_probe = ProbeCache(flags);
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
}
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
- return FillCache(compiler.CompileCallDebugPrepareStepIn(flags));
+ return FillCache(isolate_, compiler.CompileCallDebugPrepareStepIn(flags));
}
#endif
void StubCache::Clear() {
for (int i = 0; i < kPrimaryTableSize; i++) {
- primary_[i].key = Heap::empty_string();
- primary_[i].value = Builtins::builtin(Builtins::Illegal);
+ primary_[i].key = heap()->empty_string();
+ primary_[i].value = isolate_->builtins()->builtin(
+ Builtins::kIllegal);
}
for (int j = 0; j < kSecondaryTableSize; j++) {
- secondary_[j].key = Heap::empty_string();
- secondary_[j].value = Builtins::builtin(Builtins::Illegal);
+ secondary_[j].key = heap()->empty_string();
+ secondary_[j].value = isolate_->builtins()->builtin(
+ Builtins::kIllegal);
}
}
@@ -1248,7 +1207,7 @@ void StubCache::CollectMatchingMaps(ZoneMapList* types,
// StubCompiler implementation.
-MaybeObject* LoadCallbackProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty) {
ASSERT(args[0]->IsJSObject());
ASSERT(args[1]->IsJSObject());
AccessorInfo* callback = AccessorInfo::cast(args[3]);
@@ -1256,21 +1215,21 @@ MaybeObject* LoadCallbackProperty(Arguments args) {
v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
ASSERT(fun != NULL);
v8::AccessorInfo info(&args[0]);
- HandleScope scope;
+ HandleScope scope(isolate);
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
- ExternalCallbackScope call_scope(getter_address);
+ VMState state(isolate, EXTERNAL);
+ ExternalCallbackScope call_scope(isolate, getter_address);
result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
- if (result.IsEmpty()) return Heap::undefined_value();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ if (result.IsEmpty()) return HEAP->undefined_value();
return *v8::Utils::OpenHandle(*result);
}
-MaybeObject* StoreCallbackProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
JSObject* recv = JSObject::cast(args[0]);
AccessorInfo* callback = AccessorInfo::cast(args[1]);
Address setter_address = v8::ToCData<Address>(callback->setter());
@@ -1278,17 +1237,17 @@ MaybeObject* StoreCallbackProperty(Arguments args) {
ASSERT(fun != NULL);
Handle<String> name = args.at<String>(2);
Handle<Object> value = args.at<Object>(3);
- HandleScope scope;
- LOG(ApiNamedPropertyAccess("store", recv, *name));
- CustomArguments custom_args(callback->data(), recv, recv);
+ HandleScope scope(isolate);
+ LOG(isolate, ApiNamedPropertyAccess("store", recv, *name));
+ CustomArguments custom_args(isolate, callback->data(), recv, recv);
v8::AccessorInfo info(custom_args.end());
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
- ExternalCallbackScope call_scope(setter_address);
+ VMState state(isolate, EXTERNAL);
+ ExternalCallbackScope call_scope(isolate, setter_address);
fun(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *value;
}
@@ -1303,7 +1262,7 @@ static const int kAccessorInfoOffsetInInterceptorArgs = 2;
* Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
* provide any value for the given name.
*/
-MaybeObject* LoadPropertyWithInterceptorOnly(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
Handle<String> name_handle = args.at<String>(0);
Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
@@ -1320,20 +1279,20 @@ MaybeObject* LoadPropertyWithInterceptorOnly(Arguments args) {
// Use the interceptor getter.
v8::AccessorInfo info(args.arguments() -
kAccessorInfoOffsetInInterceptorArgs);
- HandleScope scope;
+ HandleScope scope(isolate);
v8::Handle<v8::Value> r;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
r = getter(v8::Utils::ToLocal(name_handle), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
return *v8::Utils::OpenHandle(*r);
}
}
- return Heap::no_interceptor_result_sentinel();
+ return isolate->heap()->no_interceptor_result_sentinel();
}
@@ -1341,17 +1300,17 @@ static MaybeObject* ThrowReferenceError(String* name) {
// If the load is non-contextual, just return the undefined result.
// Note that both keyed and non-keyed loads may end up here, so we
// can't use either LoadIC or KeyedLoadIC constructors.
- IC ic(IC::NO_EXTRA_FRAME);
+ IC ic(IC::NO_EXTRA_FRAME, Isolate::Current());
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
- if (!ic.SlowIsContextual()) return Heap::undefined_value();
+ if (!ic.SlowIsContextual()) return HEAP->undefined_value();
// Throw a reference error.
HandleScope scope;
Handle<String> name_handle(name);
Handle<Object> error =
- Factory::NewReferenceError("not_defined",
+ FACTORY->NewReferenceError("not_defined",
HandleVector(&name_handle, 1));
- return Top::Throw(*error);
+ return Isolate::Current()->Throw(*error);
}
@@ -1364,6 +1323,8 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
Handle<JSObject> holder_handle = args->at<JSObject>(3);
ASSERT(args->length() == 5); // Last arg is data object.
+ Isolate* isolate = receiver_handle->GetIsolate();
+
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
v8::NamedPropertyGetter getter =
FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
@@ -1373,14 +1334,14 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
// Use the interceptor getter.
v8::AccessorInfo info(args->arguments() -
kAccessorInfoOffsetInInterceptorArgs);
- HandleScope scope;
+ HandleScope scope(isolate);
v8::Handle<v8::Value> r;
{
// Leaving JavaScript.
- VMState state(EXTERNAL);
+ VMState state(isolate, EXTERNAL);
r = getter(v8::Utils::ToLocal(name_handle), info);
}
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
*attrs = NONE;
return *v8::Utils::OpenHandle(*r);
@@ -1391,7 +1352,7 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
*receiver_handle,
*name_handle,
attrs);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return result;
}
@@ -1400,7 +1361,7 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
* Loads a property with an interceptor performing post interceptor
* lookup if interceptor failed.
*/
-MaybeObject* LoadPropertyWithInterceptorForLoad(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) {
PropertyAttributes attr = NONE;
Object* result;
{ MaybeObject* maybe_result = LoadWithInterceptor(&args, &attr);
@@ -1413,10 +1374,10 @@ MaybeObject* LoadPropertyWithInterceptorForLoad(Arguments args) {
}
-MaybeObject* LoadPropertyWithInterceptorForCall(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) {
PropertyAttributes attr;
MaybeObject* result = LoadWithInterceptor(&args, &attr);
- RETURN_IF_SCHEDULED_EXCEPTION();
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
// This is call IC. In this case, we simply return the undefined result which
// will lead to an exception when trying to invoke the result as a
// function.
@@ -1424,36 +1385,36 @@ MaybeObject* LoadPropertyWithInterceptorForCall(Arguments args) {
}
-MaybeObject* StoreInterceptorProperty(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
ASSERT(args.length() == 4);
JSObject* recv = JSObject::cast(args[0]);
String* name = String::cast(args[1]);
Object* value = args[2];
- StrictModeFlag strict =
- static_cast<StrictModeFlag>(Smi::cast(args[3])->value());
- ASSERT(strict == kStrictMode || strict == kNonStrictMode);
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
+ ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
ASSERT(recv->HasNamedInterceptor());
PropertyAttributes attr = NONE;
MaybeObject* result = recv->SetPropertyWithInterceptor(
- name, value, attr, strict);
+ name, value, attr, strict_mode);
return result;
}
-MaybeObject* KeyedLoadPropertyWithInterceptor(Arguments args) {
+RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
JSObject* receiver = JSObject::cast(args[0]);
- ASSERT(Smi::cast(args[1])->value() >= 0);
- uint32_t index = Smi::cast(args[1])->value();
+ ASSERT(args.smi_at(1) >= 0);
+ uint32_t index = args.smi_at(1);
return receiver->GetElementWithInterceptor(receiver, index);
}
MaybeObject* StubCompiler::CompileCallInitialize(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
if (kind == Code::CALL_IC) {
- CallIC::GenerateInitialize(masm(), argc);
+ CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
} else {
KeyedCallIC::GenerateInitialize(masm(), argc);
}
@@ -1462,10 +1423,11 @@ MaybeObject* StubCompiler::CompileCallInitialize(Code::Flags flags) {
GetCodeWithFlags(flags, "CompileCallInitialize");
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Counters::call_initialize_stubs.Increment();
+ isolate()->counters()->call_initialize_stubs()->Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, Code::cast(code)));
return result;
@@ -1473,13 +1435,14 @@ MaybeObject* StubCompiler::CompileCallInitialize(Code::Flags flags) {
MaybeObject* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
int argc = Code::ExtractArgumentsCountFromFlags(flags);
// The code of the PreMonomorphic stub is the same as the code
// of the Initialized stub. They just differ on the code object flags.
Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
if (kind == Code::CALL_IC) {
- CallIC::GenerateInitialize(masm(), argc);
+ CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
} else {
KeyedCallIC::GenerateInitialize(masm(), argc);
}
@@ -1488,10 +1451,11 @@ MaybeObject* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Counters::call_premonomorphic_stubs.Increment();
+ isolate()->counters()->call_premonomorphic_stubs()->Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, Code::cast(code)));
return result;
@@ -1499,10 +1463,13 @@ MaybeObject* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
MaybeObject* StubCompiler::CompileCallNormal(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
+ // Call normal is always with a explict receiver.
+ ASSERT(!CallIC::Contextual::decode(
+ Code::ExtractExtraICStateFromFlags(flags)));
CallIC::GenerateNormal(masm(), argc);
} else {
KeyedCallIC::GenerateNormal(masm(), argc);
@@ -1511,10 +1478,11 @@ MaybeObject* StubCompiler::CompileCallNormal(Code::Flags flags) {
{ MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallNormal");
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Counters::call_normal_stubs.Increment();
+ isolate()->counters()->call_normal_stubs()->Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, Code::cast(code)));
return result;
@@ -1522,24 +1490,45 @@ MaybeObject* StubCompiler::CompileCallNormal(Code::Flags flags) {
MaybeObject* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
if (kind == Code::CALL_IC) {
- CallIC::GenerateMegamorphic(masm(), argc);
+ CallIC::GenerateMegamorphic(masm(), argc, extra_ic_state);
} else {
KeyedCallIC::GenerateMegamorphic(masm(), argc);
}
-
Object* result;
{ MaybeObject* maybe_result =
GetCodeWithFlags(flags, "CompileCallMegamorphic");
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Counters::call_megamorphic_stubs.Increment();
+ isolate()->counters()->call_megamorphic_stubs()->Increment();
+ Code* code = Code::cast(result);
+ USE(code);
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
+ code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
+ return result;
+}
+
+
+MaybeObject* StubCompiler::CompileCallArguments(Code::Flags flags) {
+ HandleScope scope(isolate());
+ int argc = Code::ExtractArgumentsCountFromFlags(flags);
+ KeyedCallIC::GenerateNonStrictArguments(masm(), argc);
+ Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ Object* result;
+ { MaybeObject* maybe_result =
+ GetCodeWithFlags(flags, "CompileCallArguments");
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
return result;
@@ -1547,11 +1536,12 @@ MaybeObject* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
if (kind == Code::CALL_IC) {
- CallIC::GenerateMiss(masm(), argc);
+ CallIC::GenerateMiss(masm(), argc, extra_ic_state);
} else {
KeyedCallIC::GenerateMiss(masm(), argc);
}
@@ -1559,10 +1549,11 @@ MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
{ MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallMiss");
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Counters::call_megamorphic_stubs.Increment();
+ isolate()->counters()->call_megamorphic_stubs()->Increment();
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_MISS, Code::cast(code)));
return result;
@@ -1571,7 +1562,7 @@ MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
#ifdef ENABLE_DEBUGGER_SUPPORT
MaybeObject* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
Debug::GenerateCallICDebugBreak(masm());
Object* result;
{ MaybeObject* maybe_result =
@@ -1582,20 +1573,22 @@ MaybeObject* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
USE(code);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
USE(kind);
- PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
code, code->arguments_count()));
return result;
}
MaybeObject* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
- HandleScope scope;
+ HandleScope scope(isolate());
// Use the same code for the the step in preparations as we do for
// the miss case.
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
- CallIC::GenerateMiss(masm(), argc);
+ // For the debugger extra ic state is irrelevant.
+ CallIC::GenerateMiss(masm(), argc, Code::kNoExtraICState);
} else {
KeyedCallIC::GenerateMiss(masm(), argc);
}
@@ -1606,10 +1599,11 @@ MaybeObject* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
}
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(
- CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
- code,
- code->arguments_count()));
+ PROFILE(isolate(),
+ CodeCreateEvent(
+ CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
+ code,
+ code->arguments_count()));
return result;
}
#endif
@@ -1624,7 +1618,7 @@ MaybeObject* StubCompiler::GetCodeWithFlags(Code::Flags flags,
// Create code object in the heap.
CodeDesc desc;
masm_.GetCode(&desc);
- MaybeObject* result = Heap::CreateCode(desc, flags, masm_.CodeObject());
+ MaybeObject* result = heap()->CreateCode(desc, flags, masm_.CodeObject());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs && !result->IsFailure()) {
Code::cast(result->ToObjectUnchecked())->Disassemble(name);
@@ -1649,7 +1643,7 @@ void StubCompiler::LookupPostInterceptor(JSObject* holder,
if (!lookup->IsProperty()) {
lookup->NotFound();
Object* proto = holder->GetPrototype();
- if (proto != Heap::null_value()) {
+ if (!proto->IsNull()) {
proto->Lookup(name, lookup);
}
}
@@ -1661,7 +1655,8 @@ MaybeObject* LoadStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
MaybeObject* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG,
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::LOAD_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
@@ -1672,11 +1667,15 @@ MaybeObject* LoadStubCompiler::GetCode(PropertyType type, String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, type);
+MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type,
+ String* name,
+ InlineCacheState state) {
+ Code::Flags flags = Code::ComputeFlags(
+ Code::KEYED_LOAD_IC, NOT_IN_LOOP, state, Code::kNoExtraICState, type);
MaybeObject* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
- PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
@@ -1687,12 +1686,28 @@ MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
}
+MaybeObject* KeyedLoadStubCompiler::ComputeSharedKeyedLoadElementStub(
+ Map* receiver_map) {
+ MaybeObject* maybe_stub = NULL;
+ if (receiver_map->has_fast_elements()) {
+ maybe_stub = KeyedLoadFastElementStub().TryGetCode();
+ } else if (receiver_map->has_external_array_elements()) {
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ maybe_stub = KeyedLoadExternalArrayStub(elements_kind).TryGetCode();
+ } else {
+ UNREACHABLE();
+ }
+ return maybe_stub;
+}
+
+
MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, type, strict_mode_);
MaybeObject* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
- PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG,
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::STORE_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC,
@@ -1703,12 +1718,15 @@ MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
}
-MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::KEYED_STORE_IC, type, strict_mode_);
+MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type,
+ String* name,
+ InlineCacheState state) {
+ Code::Flags flags = Code::ComputeFlags(
+ Code::KEYED_STORE_IC, NOT_IN_LOOP, state, strict_mode_, type);
MaybeObject* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
- PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC,
@@ -1719,6 +1737,22 @@ MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
}
+MaybeObject* KeyedStoreStubCompiler::ComputeSharedKeyedStoreElementStub(
+ Map* receiver_map) {
+ MaybeObject* maybe_stub = NULL;
+ if (receiver_map->has_fast_elements()) {
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ maybe_stub = KeyedStoreFastElementStub(is_js_array).TryGetCode();
+ } else if (receiver_map->has_external_array_elements()) {
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ maybe_stub = KeyedStoreExternalArrayStub(elements_kind).TryGetCode();
+ } else {
+ UNREACHABLE();
+ }
+ return maybe_stub;
+}
+
+
CallStubCompiler::CallStubCompiler(int argc,
InLoopFlag in_loop,
Code::Kind kind,
@@ -1732,32 +1766,51 @@ CallStubCompiler::CallStubCompiler(int argc,
}
-bool CallStubCompiler::HasCustomCallGenerator(BuiltinFunctionId id) {
+bool CallStubCompiler::HasCustomCallGenerator(JSFunction* function) {
+ SharedFunctionInfo* info = function->shared();
+ if (info->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = info->builtin_function_id();
#define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
- CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
+ CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
#undef CALL_GENERATOR_CASE
+ }
+ CallOptimization optimization(function);
+ if (optimization.is_simple_api_call()) {
+ return true;
+ }
return false;
}
-MaybeObject* CallStubCompiler::CompileCustomCall(BuiltinFunctionId id,
- Object* object,
+MaybeObject* CallStubCompiler::CompileCustomCall(Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* fname) {
-#define CALL_GENERATOR_CASE(name) \
- if (id == k##name) { \
- return CallStubCompiler::Compile##name##Call(object, \
- holder, \
- cell, \
- function, \
- fname); \
- }
- CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
+ ASSERT(HasCustomCallGenerator(function));
+
+ SharedFunctionInfo* info = function->shared();
+ if (info->HasBuiltinFunctionId()) {
+ BuiltinFunctionId id = info->builtin_function_id();
+#define CALL_GENERATOR_CASE(name) \
+ if (id == k##name) { \
+ return CallStubCompiler::Compile##name##Call(object, \
+ holder, \
+ cell, \
+ function, \
+ fname); \
+ }
+ CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
#undef CALL_GENERATOR_CASE
- ASSERT(!HasCustomCallGenerator(id));
- return Heap::undefined_value();
+ }
+ CallOptimization optimization(function);
+ ASSERT(optimization.is_simple_api_call());
+ return CompileFastApiCall(optimization,
+ object,
+ holder,
+ cell,
+ function,
+ fname);
}
@@ -1790,7 +1843,7 @@ MaybeObject* ConstructStubCompiler::GetCode() {
}
Code* code = Code::cast(result);
USE(code);
- PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
+ PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", Code::cast(code)));
return result;
}
@@ -1863,16 +1916,4 @@ void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) {
}
-MaybeObject* ExternalArrayStubCompiler::GetCode(Code::Flags flags) {
- Object* result;
- { MaybeObject* maybe_result = GetCodeWithFlags(flags, "ExternalArrayStub");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStub"));
- return result;
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 6927076c1..fa2676061 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,7 +28,10 @@
#ifndef V8_STUB_CACHE_H_
#define V8_STUB_CACHE_H_
+#include "allocation.h"
+#include "arguments.h"
#include "macro-assembler.h"
+#include "objects.h"
#include "zone-inl.h"
namespace v8 {
@@ -43,50 +46,62 @@ namespace internal {
// invalidate the cache whenever a prototype map is changed. The stub
// validates the map chain as in the mono-morphic case.
-class SCTableReference;
+class StubCache;
+
+class SCTableReference {
+ public:
+ Address address() const { return address_; }
+
+ private:
+ explicit SCTableReference(Address address) : address_(address) {}
+
+ Address address_;
+
+ friend class StubCache;
+};
-class StubCache : public AllStatic {
+class StubCache {
public:
struct Entry {
String* key;
Code* value;
};
+ void Initialize(bool create_heap_objects);
- static void Initialize(bool create_heap_objects);
// Computes the right stub matching. Inserts the result in the
// cache before returning. This might compile a stub if needed.
- MUST_USE_RESULT static MaybeObject* ComputeLoadNonexistent(
+ MUST_USE_RESULT MaybeObject* ComputeLoadNonexistent(
String* name,
JSObject* receiver);
- MUST_USE_RESULT static MaybeObject* ComputeLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index);
+ MUST_USE_RESULT MaybeObject* ComputeLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int field_index);
- MUST_USE_RESULT static MaybeObject* ComputeLoadCallback(
+ MUST_USE_RESULT MaybeObject* ComputeLoadCallback(
String* name,
JSObject* receiver,
JSObject* holder,
AccessorInfo* callback);
- MUST_USE_RESULT static MaybeObject* ComputeLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value);
+ MUST_USE_RESULT MaybeObject* ComputeLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value);
- MUST_USE_RESULT static MaybeObject* ComputeLoadInterceptor(
+ MUST_USE_RESULT MaybeObject* ComputeLoadInterceptor(
String* name,
JSObject* receiver,
JSObject* holder);
- MUST_USE_RESULT static MaybeObject* ComputeLoadNormal();
+ MUST_USE_RESULT MaybeObject* ComputeLoadNormal();
- MUST_USE_RESULT static MaybeObject* ComputeLoadGlobal(
+ MUST_USE_RESULT MaybeObject* ComputeLoadGlobal(
String* name,
JSObject* receiver,
GlobalObject* holder,
@@ -96,108 +111,96 @@ class StubCache : public AllStatic {
// ---
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index);
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int field_index);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadCallback(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadCallback(
String* name,
JSObject* receiver,
JSObject* holder,
AccessorInfo* callback);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadConstant(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadConstant(
String* name,
JSObject* receiver,
JSObject* holder,
Object* value);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadInterceptor(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadInterceptor(
String* name,
JSObject* receiver,
JSObject* holder);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadArrayLength(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadArrayLength(
String* name,
JSArray* receiver);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadStringLength(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadStringLength(
String* name,
String* receiver);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadFunctionPrototype(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadFunctionPrototype(
String* name,
JSFunction* receiver);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadSpecialized(
- JSObject* receiver);
-
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadPixelArray(
- JSObject* receiver);
-
// ---
- MUST_USE_RESULT static MaybeObject* ComputeStoreField(
+ MUST_USE_RESULT MaybeObject* ComputeStoreField(
String* name,
JSObject* receiver,
int field_index,
Map* transition,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ComputeStoreNormal(
+ MUST_USE_RESULT MaybeObject* ComputeStoreNormal(
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ComputeStoreGlobal(
+ MUST_USE_RESULT MaybeObject* ComputeStoreGlobal(
String* name,
GlobalObject* receiver,
JSGlobalPropertyCell* cell,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ComputeStoreCallback(
+ MUST_USE_RESULT MaybeObject* ComputeStoreCallback(
String* name,
JSObject* receiver,
AccessorInfo* callback,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ComputeStoreInterceptor(
+ MUST_USE_RESULT MaybeObject* ComputeStoreInterceptor(
String* name,
JSObject* receiver,
StrictModeFlag strict_mode);
// ---
- MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreField(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedStoreField(
String* name,
JSObject* receiver,
int field_index,
Map* transition,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized(
- JSObject* receiver,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT static MaybeObject* ComputeKeyedStorePixelArray(
- JSObject* receiver,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreElement(
JSObject* receiver,
bool is_store,
StrictModeFlag strict_mode);
// ---
- MUST_USE_RESULT static MaybeObject* ComputeCallField(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder,
- int index);
+ MUST_USE_RESULT MaybeObject* ComputeCallField(
+ int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ Code::ExtraICState extra_ic_state,
+ String* name,
+ Object* object,
+ JSObject* holder,
+ int index);
- MUST_USE_RESULT static MaybeObject* ComputeCallConstant(
+ MUST_USE_RESULT MaybeObject* ComputeCallConstant(
int argc,
InLoopFlag in_loop,
Code::Kind,
@@ -207,22 +210,27 @@ class StubCache : public AllStatic {
JSObject* holder,
JSFunction* function);
- MUST_USE_RESULT static MaybeObject* ComputeCallNormal(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- JSObject* receiver);
+ MUST_USE_RESULT MaybeObject* ComputeCallNormal(
+ int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ Code::ExtraICState extra_ic_state,
+ String* name,
+ JSObject* receiver);
- MUST_USE_RESULT static MaybeObject* ComputeCallInterceptor(int argc,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder);
+ MUST_USE_RESULT MaybeObject* ComputeCallInterceptor(
+ int argc,
+ Code::Kind,
+ Code::ExtraICState extra_ic_state,
+ String* name,
+ Object* object,
+ JSObject* holder);
- MUST_USE_RESULT static MaybeObject* ComputeCallGlobal(
+ MUST_USE_RESULT MaybeObject* ComputeCallGlobal(
int argc,
InLoopFlag in_loop,
Code::Kind,
+ Code::ExtraICState extra_ic_state,
String* name,
JSObject* receiver,
GlobalObject* holder,
@@ -231,77 +239,115 @@ class StubCache : public AllStatic {
// ---
- MUST_USE_RESULT static MaybeObject* ComputeCallInitialize(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT MaybeObject* ComputeCallInitialize(int argc,
+ InLoopFlag in_loop,
+ RelocInfo::Mode mode,
+ Code::Kind kind);
- static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+ Handle<Code> ComputeCallInitialize(int argc,
+ InLoopFlag in_loop,
+ RelocInfo::Mode mode);
- static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
+ Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
- MUST_USE_RESULT static MaybeObject* ComputeCallPreMonomorphic(
+ MUST_USE_RESULT MaybeObject* ComputeCallPreMonomorphic(
int argc,
InLoopFlag in_loop,
- Code::Kind kind);
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state);
- MUST_USE_RESULT static MaybeObject* ComputeCallNormal(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind,
+ Code::ExtraICState state);
- MUST_USE_RESULT static MaybeObject* ComputeCallMegamorphic(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT MaybeObject* ComputeCallArguments(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
+
+ MUST_USE_RESULT MaybeObject* ComputeCallMegamorphic(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind,
+ Code::ExtraICState state);
- MUST_USE_RESULT static MaybeObject* ComputeCallMiss(int argc,
- Code::Kind kind);
+ MUST_USE_RESULT MaybeObject* ComputeCallMiss(int argc,
+ Code::Kind kind,
+ Code::ExtraICState state);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
- MUST_USE_RESULT static Code* FindCallInitialize(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT Code* FindCallInitialize(int argc,
+ InLoopFlag in_loop,
+ RelocInfo::Mode mode,
+ Code::Kind kind);
#ifdef ENABLE_DEBUGGER_SUPPORT
- MUST_USE_RESULT static MaybeObject* ComputeCallDebugBreak(int argc,
- Code::Kind kind);
+ MUST_USE_RESULT MaybeObject* ComputeCallDebugBreak(int argc, Code::Kind kind);
- MUST_USE_RESULT static MaybeObject* ComputeCallDebugPrepareStepIn(
- int argc,
- Code::Kind kind);
+ MUST_USE_RESULT MaybeObject* ComputeCallDebugPrepareStepIn(int argc,
+ Code::Kind kind);
#endif
// Update cache for entry hash(name, map).
- static Code* Set(String* name, Map* map, Code* code);
+ Code* Set(String* name, Map* map, Code* code);
// Clear the lookup table (@ mark compact collection).
- static void Clear();
+ void Clear();
// Collect all maps that match the name and flags.
- static void CollectMatchingMaps(ZoneMapList* types,
- String* name,
- Code::Flags flags);
+ void CollectMatchingMaps(ZoneMapList* types,
+ String* name,
+ Code::Flags flags);
// Generate code for probing the stub cache table.
// Arguments extra and extra2 may be used to pass additional scratch
// registers. Set to no_reg if not needed.
- static void GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2 = no_reg);
+ void GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra,
+ Register extra2 = no_reg);
enum Table {
kPrimary,
kSecondary
};
+
+ SCTableReference key_reference(StubCache::Table table) {
+ return SCTableReference(
+ reinterpret_cast<Address>(&first_entry(table)->key));
+ }
+
+
+ SCTableReference value_reference(StubCache::Table table) {
+ return SCTableReference(
+ reinterpret_cast<Address>(&first_entry(table)->value));
+ }
+
+
+ StubCache::Entry* first_entry(StubCache::Table table) {
+ switch (table) {
+ case StubCache::kPrimary: return StubCache::primary_;
+ case StubCache::kSecondary: return StubCache::secondary_;
+ }
+ UNREACHABLE();
+ return NULL;
+ }
+
+ Isolate* isolate() { return isolate_; }
+ Heap* heap() { return isolate()->heap(); }
+
private:
+ explicit StubCache(Isolate* isolate);
+
+ friend class Isolate;
friend class SCTableReference;
static const int kPrimaryTableSize = 2048;
static const int kSecondaryTableSize = 512;
- static Entry primary_[];
- static Entry secondary_[];
+ Entry primary_[kPrimaryTableSize];
+ Entry secondary_[kSecondaryTableSize];
// Computes the hashed offsets for primary and secondary caches.
static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
@@ -349,65 +395,41 @@ class StubCache : public AllStatic {
return reinterpret_cast<Entry*>(
reinterpret_cast<Address>(table) + (offset << shift_amount));
}
-};
-
-
-class SCTableReference {
- public:
- static SCTableReference keyReference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->key));
- }
+ Isolate* isolate_;
- static SCTableReference valueReference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->value));
- }
-
- Address address() const { return address_; }
-
- private:
- explicit SCTableReference(Address address) : address_(address) {}
-
- static StubCache::Entry* first_entry(StubCache::Table table) {
- switch (table) {
- case StubCache::kPrimary: return StubCache::primary_;
- case StubCache::kSecondary: return StubCache::secondary_;
- }
- UNREACHABLE();
- return NULL;
- }
-
- Address address_;
+ DISALLOW_COPY_AND_ASSIGN(StubCache);
};
+
// ------------------------------------------------------------------------
// Support functions for IC stubs for callbacks.
-MaybeObject* LoadCallbackProperty(Arguments args);
-MaybeObject* StoreCallbackProperty(Arguments args);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty);
// Support functions for IC stubs for interceptors.
-MaybeObject* LoadPropertyWithInterceptorOnly(Arguments args);
-MaybeObject* LoadPropertyWithInterceptorForLoad(Arguments args);
-MaybeObject* LoadPropertyWithInterceptorForCall(Arguments args);
-MaybeObject* StoreInterceptorProperty(Arguments args);
-MaybeObject* CallInterceptorProperty(Arguments args);
-MaybeObject* KeyedLoadPropertyWithInterceptor(Arguments args);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, CallInterceptorProperty);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
// The stub compiler compiles stubs for the stub cache.
class StubCompiler BASE_EMBEDDED {
public:
- StubCompiler() : scope_(), masm_(NULL, 256), failure_(NULL) { }
+ StubCompiler()
+ : scope_(), masm_(Isolate::Current(), NULL, 256), failure_(NULL) { }
MUST_USE_RESULT MaybeObject* CompileCallInitialize(Code::Flags flags);
MUST_USE_RESULT MaybeObject* CompileCallPreMonomorphic(Code::Flags flags);
MUST_USE_RESULT MaybeObject* CompileCallNormal(Code::Flags flags);
MUST_USE_RESULT MaybeObject* CompileCallMegamorphic(Code::Flags flags);
+ MUST_USE_RESULT MaybeObject* CompileCallArguments(Code::Flags flags);
MUST_USE_RESULT MaybeObject* CompileCallMiss(Code::Flags flags);
#ifdef ENABLE_DEBUGGER_SUPPORT
MUST_USE_RESULT MaybeObject* CompileCallDebugBreak(Code::Flags flags);
@@ -461,7 +483,10 @@ class StubCompiler BASE_EMBEDDED {
Register scratch,
Label* miss_label);
- static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
+ static void GenerateLoadMiss(MacroAssembler* masm,
+ Code::Kind kind);
+
+ static void GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm);
// Generates code that verifies that the property holder has not changed
// (checking maps of objects in the prototype chain for fast and global
@@ -554,6 +579,10 @@ class StubCompiler BASE_EMBEDDED {
String* name,
LookupResult* lookup);
+ Isolate* isolate() { return scope_.isolate(); }
+ Heap* heap() { return isolate()->heap(); }
+ Factory* factory() { return isolate()->factory(); }
+
private:
HandleScope scope_;
MacroAssembler masm_;
@@ -622,11 +651,23 @@ class KeyedLoadStubCompiler: public StubCompiler {
MUST_USE_RESULT MaybeObject* CompileLoadStringLength(String* name);
MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
- MUST_USE_RESULT MaybeObject* CompileLoadSpecialized(JSObject* receiver);
- MUST_USE_RESULT MaybeObject* CompileLoadPixelArray(JSObject* receiver);
+ MUST_USE_RESULT MaybeObject* CompileLoadElement(Map* receiver_map);
+
+ MUST_USE_RESULT MaybeObject* CompileLoadMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics);
+
+ static void GenerateLoadExternalArray(MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind);
+
+ static void GenerateLoadFastElement(MacroAssembler* masm);
private:
- MaybeObject* GetCode(PropertyType type, String* name);
+ MaybeObject* GetCode(PropertyType type,
+ String* name,
+ InlineCacheState state = MONOMORPHIC);
+
+ MaybeObject* ComputeSharedKeyedLoadElementStub(Map* receiver_map);
};
@@ -667,12 +708,24 @@ class KeyedStoreStubCompiler: public StubCompiler {
Map* transition,
String* name);
- MUST_USE_RESULT MaybeObject* CompileStoreSpecialized(JSObject* receiver);
+ MUST_USE_RESULT MaybeObject* CompileStoreElement(Map* receiver_map);
- MUST_USE_RESULT MaybeObject* CompileStorePixelArray(JSObject* receiver);
+ MUST_USE_RESULT MaybeObject* CompileStoreMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics);
+
+ static void GenerateStoreFastElement(MacroAssembler* masm,
+ bool is_js_array);
+
+ static void GenerateStoreExternalArray(MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind);
private:
- MaybeObject* GetCode(PropertyType type, String* name);
+ MaybeObject* GetCode(PropertyType type,
+ String* name,
+ InlineCacheState state = MONOMORPHIC);
+
+ MaybeObject* ComputeSharedKeyedStoreElementStub(Map* receiver_map);
StrictModeFlag strict_mode_;
};
@@ -690,6 +743,8 @@ class KeyedStoreStubCompiler: public StubCompiler {
V(MathAbs)
+class CallOptimization;
+
class CallStubCompiler: public StubCompiler {
public:
CallStubCompiler(int argc,
@@ -698,32 +753,38 @@ class CallStubCompiler: public StubCompiler {
Code::ExtraICState extra_ic_state,
InlineCacheHolderFlag cache_holder);
- MUST_USE_RESULT MaybeObject* CompileCallField(JSObject* object,
- JSObject* holder,
- int index,
- String* name);
- MUST_USE_RESULT MaybeObject* CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check);
- MUST_USE_RESULT MaybeObject* CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name);
- MUST_USE_RESULT MaybeObject* CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name);
+ MUST_USE_RESULT MaybeObject* CompileCallField(
+ JSObject* object,
+ JSObject* holder,
+ int index,
+ String* name);
- static bool HasCustomCallGenerator(BuiltinFunctionId id);
+ MUST_USE_RESULT MaybeObject* CompileCallConstant(
+ Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check);
+
+ MUST_USE_RESULT MaybeObject* CompileCallInterceptor(
+ JSObject* object,
+ JSObject* holder,
+ String* name);
+
+ MUST_USE_RESULT MaybeObject* CompileCallGlobal(
+ JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name);
+
+ static bool HasCustomCallGenerator(JSFunction* function);
private:
// Compiles a custom call constant/global IC. For constant calls
// cell is NULL. Returns undefined if there is no custom call code
// for the given function or it can't be generated.
- MUST_USE_RESULT MaybeObject* CompileCustomCall(BuiltinFunctionId id,
- Object* object,
+ MUST_USE_RESULT MaybeObject* CompileCustomCall(Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
@@ -738,6 +799,14 @@ class CallStubCompiler: public StubCompiler {
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
#undef DECLARE_CALL_GENERATOR
+ MUST_USE_RESULT MaybeObject* CompileFastApiCall(
+ const CallOptimization& optimization,
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name);
+
const ParameterCount arguments_;
const InLoopFlag in_loop_;
const Code::Kind kind_;
@@ -830,19 +899,6 @@ class CallOptimization BASE_EMBEDDED {
CallHandlerInfo* api_call_info_;
};
-class ExternalArrayStubCompiler: public StubCompiler {
- public:
- explicit ExternalArrayStubCompiler() {}
-
- MUST_USE_RESULT MaybeObject* CompileKeyedLoadStub(
- ExternalArrayType array_type, Code::Flags flags);
-
- MUST_USE_RESULT MaybeObject* CompileKeyedStoreStub(
- ExternalArrayType array_type, Code::Flags flags);
-
- private:
- MaybeObject* GetCode(Code::Flags flags);
-};
} } // namespace v8::internal
diff --git a/deps/v8/src/token.cc b/deps/v8/src/token.cc
index 488e90979..feca7beb9 100644
--- a/deps/v8/src/token.cc
+++ b/deps/v8/src/token.cc
@@ -32,21 +32,21 @@ namespace v8 {
namespace internal {
#define T(name, string, precedence) #name,
-const char* Token::name_[NUM_TOKENS] = {
+const char* const Token::name_[NUM_TOKENS] = {
TOKEN_LIST(T, T, IGNORE_TOKEN)
};
#undef T
#define T(name, string, precedence) string,
-const char* Token::string_[NUM_TOKENS] = {
+const char* const Token::string_[NUM_TOKENS] = {
TOKEN_LIST(T, T, IGNORE_TOKEN)
};
#undef T
#define T(name, string, precedence) precedence,
-int8_t Token::precedence_[NUM_TOKENS] = {
+const int8_t Token::precedence_[NUM_TOKENS] = {
TOKEN_LIST(T, T, IGNORE_TOKEN)
};
#undef T
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index 776d9f3bc..77333bc0b 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -167,8 +167,8 @@ namespace internal {
\
/* Future reserved words (ECMA-262, section 7.6.1.2). */ \
T(FUTURE_RESERVED_WORD, NULL, 0) \
+ T(FUTURE_STRICT_RESERVED_WORD, NULL, 0) \
K(CONST, "const", 0) \
- K(NATIVE, "native", 0) \
\
/* Illegal token - not able to scan. */ \
T(ILLEGAL, "ILLEGAL", 0) \
@@ -277,9 +277,9 @@ class Token {
}
private:
- static const char* name_[NUM_TOKENS];
- static const char* string_[NUM_TOKENS];
- static int8_t precedence_[NUM_TOKENS];
+ static const char* const name_[NUM_TOKENS];
+ static const char* const string_[NUM_TOKENS];
+ static const int8_t precedence_[NUM_TOKENS];
static const char token_type[NUM_TOKENS];
};
diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc
deleted file mode 100644
index d6fcf1009..000000000
--- a/deps/v8/src/top.cc
+++ /dev/null
@@ -1,1153 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "bootstrapper.h"
-#include "debug.h"
-#include "execution.h"
-#include "messages.h"
-#include "platform.h"
-#include "simulator.h"
-#include "string-stream.h"
-#include "vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-Semaphore* Top::runtime_profiler_semaphore_ = NULL;
-#endif
-ThreadLocalTop Top::thread_local_;
-Mutex* Top::break_access_ = OS::CreateMutex();
-
-NoAllocationStringAllocator* preallocated_message_space = NULL;
-
-bool capture_stack_trace_for_uncaught_exceptions = false;
-int stack_trace_for_uncaught_exceptions_frame_limit = 0;
-StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options =
- StackTrace::kOverview;
-
-Address top_addresses[] = {
-#define C(name) reinterpret_cast<Address>(Top::name()),
- TOP_ADDRESS_LIST(C)
- TOP_ADDRESS_LIST_PROF(C)
-#undef C
- NULL
-};
-
-
-v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
- return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
-}
-
-
-void ThreadLocalTop::Initialize() {
- c_entry_fp_ = 0;
- handler_ = 0;
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
- simulator_ = Simulator::current();
-#elif V8_TARGET_ARCH_MIPS
- simulator_ = assembler::mips::Simulator::current();
-#endif
-#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
- js_entry_sp_ = NULL;
- external_callback_ = NULL;
-#endif
-#ifdef ENABLE_VMSTATE_TRACKING
- current_vm_state_ = EXTERNAL;
- runtime_profiler_state_ = Top::PROF_NOT_IN_JS;
-#endif
- try_catch_handler_address_ = NULL;
- context_ = NULL;
- int id = ThreadManager::CurrentId();
- thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
- external_caught_exception_ = false;
- failed_access_check_callback_ = NULL;
- save_context_ = NULL;
- catcher_ = NULL;
-}
-
-
-Address Top::get_address_from_id(Top::AddressId id) {
- return top_addresses[id];
-}
-
-
-char* Top::Iterate(ObjectVisitor* v, char* thread_storage) {
- ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
- Iterate(v, thread);
- return thread_storage + sizeof(ThreadLocalTop);
-}
-
-
-void Top::IterateThread(ThreadVisitor* v) {
- v->VisitThread(&thread_local_);
-}
-
-
-void Top::IterateThread(ThreadVisitor* v, char* t) {
- ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
- v->VisitThread(thread);
-}
-
-
-void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
- // Visit the roots from the top for a given thread.
- Object *pending;
- // The pending exception can sometimes be a failure. We can't show
- // that to the GC, which only understands objects.
- if (thread->pending_exception_->ToObject(&pending)) {
- v->VisitPointer(&pending);
- thread->pending_exception_ = pending; // In case GC updated it.
- }
- v->VisitPointer(&(thread->pending_message_obj_));
- v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
- v->VisitPointer(BitCast<Object**>(&(thread->context_)));
- Object* scheduled;
- if (thread->scheduled_exception_->ToObject(&scheduled)) {
- v->VisitPointer(&scheduled);
- thread->scheduled_exception_ = scheduled;
- }
-
- for (v8::TryCatch* block = thread->TryCatchHandler();
- block != NULL;
- block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
- v->VisitPointer(BitCast<Object**>(&(block->exception_)));
- v->VisitPointer(BitCast<Object**>(&(block->message_)));
- }
-
- // Iterate over pointers on native execution stack.
- for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
- it.frame()->Iterate(v);
- }
-}
-
-
-void Top::Iterate(ObjectVisitor* v) {
- ThreadLocalTop* current_t = &thread_local_;
- Iterate(v, current_t);
-}
-
-
-void Top::InitializeThreadLocal() {
- thread_local_.Initialize();
- clear_pending_exception();
- clear_pending_message();
- clear_scheduled_exception();
-}
-
-
-// Create a dummy thread that will wait forever on a semaphore. The only
-// purpose for this thread is to have some stack area to save essential data
-// into for use by a stacks only core dump (aka minidump).
-class PreallocatedMemoryThread: public Thread {
- public:
- PreallocatedMemoryThread()
- : Thread("v8:PreallocMem"),
- keep_running_(true) {
- wait_for_ever_semaphore_ = OS::CreateSemaphore(0);
- data_ready_semaphore_ = OS::CreateSemaphore(0);
- }
-
- // When the thread starts running it will allocate a fixed number of bytes
- // on the stack and publish the location of this memory for others to use.
- void Run() {
- EmbeddedVector<char, 15 * 1024> local_buffer;
-
- // Initialize the buffer with a known good value.
- OS::StrNCpy(local_buffer, "Trace data was not generated.\n",
- local_buffer.length());
-
- // Publish the local buffer and signal its availability.
- data_ = local_buffer.start();
- length_ = local_buffer.length();
- data_ready_semaphore_->Signal();
-
- while (keep_running_) {
- // This thread will wait here until the end of time.
- wait_for_ever_semaphore_->Wait();
- }
-
- // Make sure we access the buffer after the wait to remove all possibility
- // of it being optimized away.
- OS::StrNCpy(local_buffer, "PreallocatedMemoryThread shutting down.\n",
- local_buffer.length());
- }
-
- static char* data() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return data_;
- }
-
- static unsigned length() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return length_;
- }
-
- static void StartThread() {
- if (the_thread_ != NULL) return;
-
- the_thread_ = new PreallocatedMemoryThread();
- the_thread_->Start();
- }
-
- // Stop the PreallocatedMemoryThread and release its resources.
- static void StopThread() {
- if (the_thread_ == NULL) return;
-
- the_thread_->keep_running_ = false;
- wait_for_ever_semaphore_->Signal();
-
- // Wait for the thread to terminate.
- the_thread_->Join();
-
- if (data_ready_semaphore_ != NULL) {
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
-
- delete wait_for_ever_semaphore_;
- wait_for_ever_semaphore_ = NULL;
-
- // Done with the thread entirely.
- delete the_thread_;
- the_thread_ = NULL;
- }
-
- private:
- // Used to make sure that the thread keeps looping even for spurious wakeups.
- bool keep_running_;
-
- // The preallocated memory thread singleton.
- static PreallocatedMemoryThread* the_thread_;
- // This semaphore is used by the PreallocatedMemoryThread to wait for ever.
- static Semaphore* wait_for_ever_semaphore_;
- // Semaphore to signal that the data has been initialized.
- static Semaphore* data_ready_semaphore_;
-
- // Location and size of the preallocated memory block.
- static char* data_;
- static unsigned length_;
-
- DISALLOW_COPY_AND_ASSIGN(PreallocatedMemoryThread);
-};
-
-PreallocatedMemoryThread* PreallocatedMemoryThread::the_thread_ = NULL;
-Semaphore* PreallocatedMemoryThread::wait_for_ever_semaphore_ = NULL;
-Semaphore* PreallocatedMemoryThread::data_ready_semaphore_ = NULL;
-char* PreallocatedMemoryThread::data_ = NULL;
-unsigned PreallocatedMemoryThread::length_ = 0;
-
-static bool initialized = false;
-
-void Top::Initialize() {
- CHECK(!initialized);
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ASSERT(runtime_profiler_semaphore_ == NULL);
- runtime_profiler_semaphore_ = OS::CreateSemaphore(0);
-#endif
-
- InitializeThreadLocal();
-
- // Only preallocate on the first initialization.
- if (FLAG_preallocate_message_memory && (preallocated_message_space == NULL)) {
- // Start the thread which will set aside some memory.
- PreallocatedMemoryThread::StartThread();
- preallocated_message_space =
- new NoAllocationStringAllocator(PreallocatedMemoryThread::data(),
- PreallocatedMemoryThread::length());
- PreallocatedStorage::Init(PreallocatedMemoryThread::length() / 4);
- }
- initialized = true;
-}
-
-
-void Top::TearDown() {
- if (initialized) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- delete runtime_profiler_semaphore_;
- runtime_profiler_semaphore_ = NULL;
-#endif
-
- // Remove the external reference to the preallocated stack memory.
- if (preallocated_message_space != NULL) {
- delete preallocated_message_space;
- preallocated_message_space = NULL;
- }
-
- PreallocatedMemoryThread::StopThread();
- initialized = false;
- }
-}
-
-
-void Top::RegisterTryCatchHandler(v8::TryCatch* that) {
- // The ARM simulator has a separate JS stack. We therefore register
- // the C++ try catch handler with the simulator and get back an
- // address that can be used for comparisons with addresses into the
- // JS stack. When running without the simulator, the address
- // returned will be the address of the C++ try catch handler itself.
- Address address = reinterpret_cast<Address>(
- SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
- thread_local_.set_try_catch_handler_address(address);
-}
-
-
-void Top::UnregisterTryCatchHandler(v8::TryCatch* that) {
- ASSERT(try_catch_handler() == that);
- thread_local_.set_try_catch_handler_address(
- reinterpret_cast<Address>(that->next_));
- thread_local_.catcher_ = NULL;
- SimulatorStack::UnregisterCTryCatch();
-}
-
-
-
-static int stack_trace_nesting_level = 0;
-static StringStream* incomplete_message = NULL;
-
-
-Handle<String> Top::StackTraceString() {
- if (stack_trace_nesting_level == 0) {
- stack_trace_nesting_level++;
- HeapStringAllocator allocator;
- StringStream::ClearMentionedObjectCache();
- StringStream accumulator(&allocator);
- incomplete_message = &accumulator;
- PrintStack(&accumulator);
- Handle<String> stack_trace = accumulator.ToString();
- incomplete_message = NULL;
- stack_trace_nesting_level = 0;
- return stack_trace;
- } else if (stack_trace_nesting_level == 1) {
- stack_trace_nesting_level++;
- OS::PrintError(
- "\n\nAttempt to print stack while printing stack (double fault)\n");
- OS::PrintError(
- "If you are lucky you may find a partial stack dump on stdout.\n\n");
- incomplete_message->OutputToStdOut();
- return Factory::empty_symbol();
- } else {
- OS::Abort();
- // Unreachable
- return Factory::empty_symbol();
- }
-}
-
-
-Handle<JSArray> Top::CaptureCurrentStackTrace(
- int frame_limit, StackTrace::StackTraceOptions options) {
- // Ensure no negative values.
- int limit = Max(frame_limit, 0);
- Handle<JSArray> stack_trace = Factory::NewJSArray(frame_limit);
-
- Handle<String> column_key = Factory::LookupAsciiSymbol("column");
- Handle<String> line_key = Factory::LookupAsciiSymbol("lineNumber");
- Handle<String> script_key = Factory::LookupAsciiSymbol("scriptName");
- Handle<String> name_or_source_url_key =
- Factory::LookupAsciiSymbol("nameOrSourceURL");
- Handle<String> script_name_or_source_url_key =
- Factory::LookupAsciiSymbol("scriptNameOrSourceURL");
- Handle<String> function_key = Factory::LookupAsciiSymbol("functionName");
- Handle<String> eval_key = Factory::LookupAsciiSymbol("isEval");
- Handle<String> constructor_key = Factory::LookupAsciiSymbol("isConstructor");
-
- StackTraceFrameIterator it;
- int frames_seen = 0;
- while (!it.done() && (frames_seen < limit)) {
- JavaScriptFrame* frame = it.frame();
-
- List<FrameSummary> frames(3); // Max 2 levels of inlining.
- frame->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
- // Create a JSObject to hold the information for the StackFrame.
- Handle<JSObject> stackFrame = Factory::NewJSObject(object_function());
-
- Handle<JSFunction> fun = frames[i].function();
- Handle<Script> script(Script::cast(fun->shared()->script()));
-
- if (options & StackTrace::kLineNumber) {
- int script_line_offset = script->line_offset()->value();
- int position = frames[i].code()->SourcePosition(frames[i].pc());
- int line_number = GetScriptLineNumber(script, position);
- // line_number is already shifted by the script_line_offset.
- int relative_line_number = line_number - script_line_offset;
- if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
- Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
- int start = (relative_line_number == 0) ? 0 :
- Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
- int column_offset = position - start;
- if (relative_line_number == 0) {
- // For the case where the code is on the same line as the script
- // tag.
- column_offset += script->column_offset()->value();
- }
- SetLocalPropertyNoThrow(stackFrame, column_key,
- Handle<Smi>(Smi::FromInt(column_offset + 1)));
- }
- SetLocalPropertyNoThrow(stackFrame, line_key,
- Handle<Smi>(Smi::FromInt(line_number + 1)));
- }
-
- if (options & StackTrace::kScriptName) {
- Handle<Object> script_name(script->name());
- SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
- }
-
- if (options & StackTrace::kScriptNameOrSourceURL) {
- Handle<Object> script_name(script->name());
- Handle<JSValue> script_wrapper = GetScriptWrapper(script);
- Handle<Object> property = GetProperty(script_wrapper,
- name_or_source_url_key);
- ASSERT(property->IsJSFunction());
- Handle<JSFunction> method = Handle<JSFunction>::cast(property);
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
- NULL, &caught_exception);
- if (caught_exception) {
- result = Factory::undefined_value();
- }
- SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
- result);
- }
-
- if (options & StackTrace::kFunctionName) {
- Handle<Object> fun_name(fun->shared()->name());
- if (fun_name->ToBoolean()->IsFalse()) {
- fun_name = Handle<Object>(fun->shared()->inferred_name());
- }
- SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
- }
-
- if (options & StackTrace::kIsEval) {
- int type = Smi::cast(script->compilation_type())->value();
- Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
- Factory::true_value() : Factory::false_value();
- SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
- }
-
- if (options & StackTrace::kIsConstructor) {
- Handle<Object> is_constructor = (frames[i].is_constructor()) ?
- Factory::true_value() : Factory::false_value();
- SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
- }
-
- FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
- frames_seen++;
- }
- it.Advance();
- }
-
- stack_trace->set_length(Smi::FromInt(frames_seen));
- return stack_trace;
-}
-
-
-void Top::PrintStack() {
- if (stack_trace_nesting_level == 0) {
- stack_trace_nesting_level++;
-
- StringAllocator* allocator;
- if (preallocated_message_space == NULL) {
- allocator = new HeapStringAllocator();
- } else {
- allocator = preallocated_message_space;
- }
-
- NativeAllocationChecker allocation_checker(
- !FLAG_preallocate_message_memory ?
- NativeAllocationChecker::ALLOW :
- NativeAllocationChecker::DISALLOW);
-
- StringStream::ClearMentionedObjectCache();
- StringStream accumulator(allocator);
- incomplete_message = &accumulator;
- PrintStack(&accumulator);
- accumulator.OutputToStdOut();
- accumulator.Log();
- incomplete_message = NULL;
- stack_trace_nesting_level = 0;
- if (preallocated_message_space == NULL) {
- // Remove the HeapStringAllocator created above.
- delete allocator;
- }
- } else if (stack_trace_nesting_level == 1) {
- stack_trace_nesting_level++;
- OS::PrintError(
- "\n\nAttempt to print stack while printing stack (double fault)\n");
- OS::PrintError(
- "If you are lucky you may find a partial stack dump on stdout.\n\n");
- incomplete_message->OutputToStdOut();
- }
-}
-
-
-static void PrintFrames(StringStream* accumulator,
- StackFrame::PrintMode mode) {
- StackFrameIterator it;
- for (int i = 0; !it.done(); it.Advance()) {
- it.frame()->Print(accumulator, mode, i++);
- }
-}
-
-
-void Top::PrintStack(StringStream* accumulator) {
- // The MentionedObjectCache is not GC-proof at the moment.
- AssertNoAllocation nogc;
- ASSERT(StringStream::IsMentionedObjectCacheClear());
-
- // Avoid printing anything if there are no frames.
- if (c_entry_fp(GetCurrentThread()) == 0) return;
-
- accumulator->Add(
- "\n==== Stack trace ============================================\n\n");
- PrintFrames(accumulator, StackFrame::OVERVIEW);
-
- accumulator->Add(
- "\n==== Details ================================================\n\n");
- PrintFrames(accumulator, StackFrame::DETAILS);
-
- accumulator->PrintMentionedObjectCache();
- accumulator->Add("=====================\n\n");
-}
-
-
-void Top::SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback) {
- thread_local_.failed_access_check_callback_ = callback;
-}
-
-
-void Top::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
- if (!thread_local_.failed_access_check_callback_) return;
-
- ASSERT(receiver->IsAccessCheckNeeded());
- ASSERT(Top::context());
-
- // Get the data object from access check info.
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return;
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == Heap::undefined_value()) return;
-
- HandleScope scope;
- Handle<JSObject> receiver_handle(receiver);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
- thread_local_.failed_access_check_callback_(
- v8::Utils::ToLocal(receiver_handle),
- type,
- v8::Utils::ToLocal(data));
-}
-
-
-enum MayAccessDecision {
- YES, NO, UNKNOWN
-};
-
-
-static MayAccessDecision MayAccessPreCheck(JSObject* receiver,
- v8::AccessType type) {
- // During bootstrapping, callback functions are not enabled yet.
- if (Bootstrapper::IsActive()) return YES;
-
- if (receiver->IsJSGlobalProxy()) {
- Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
- if (!receiver_context->IsContext()) return NO;
-
- // Get the global context of current top context.
- // avoid using Top::global_context() because it uses Handle.
- Context* global_context = Top::context()->global()->global_context();
- if (receiver_context == global_context) return YES;
-
- if (Context::cast(receiver_context)->security_token() ==
- global_context->security_token())
- return YES;
- }
-
- return UNKNOWN;
-}
-
-
-bool Top::MayNamedAccess(JSObject* receiver, Object* key, v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
-
- // The callers of this method are not expecting a GC.
- AssertNoAllocation no_gc;
-
- // Skip checks for hidden properties access. Note, we do not
- // require existence of a context in this case.
- if (key == Heap::hidden_symbol()) return true;
-
- // Check for compatibility between the security tokens in the
- // current lexical context and the accessed object.
- ASSERT(Top::context());
-
- MayAccessDecision decision = MayAccessPreCheck(receiver, type);
- if (decision != UNKNOWN) return decision == YES;
-
- // Get named access check callback
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return false;
-
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == Heap::undefined_value()) return false;
-
- Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
- v8::NamedSecurityCallback callback =
- v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
-
- if (!callback) return false;
-
- HandleScope scope;
- Handle<JSObject> receiver_handle(receiver);
- Handle<Object> key_handle(key);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
- LOG(ApiNamedSecurityCheck(key));
- bool result = false;
- {
- // Leaving JavaScript.
- VMState state(EXTERNAL);
- result = callback(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(key_handle),
- type,
- v8::Utils::ToLocal(data));
- }
- return result;
-}
-
-
-bool Top::MayIndexedAccess(JSObject* receiver,
- uint32_t index,
- v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
- // Check for compatibility between the security tokens in the
- // current lexical context and the accessed object.
- ASSERT(Top::context());
- // The callers of this method are not expecting a GC.
- AssertNoAllocation no_gc;
-
- MayAccessDecision decision = MayAccessPreCheck(receiver, type);
- if (decision != UNKNOWN) return decision == YES;
-
- // Get indexed access check callback
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return false;
-
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == Heap::undefined_value()) return false;
-
- Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
- v8::IndexedSecurityCallback callback =
- v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
-
- if (!callback) return false;
-
- HandleScope scope;
- Handle<JSObject> receiver_handle(receiver);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
- LOG(ApiIndexedSecurityCheck(index));
- bool result = false;
- {
- // Leaving JavaScript.
- VMState state(EXTERNAL);
- result = callback(v8::Utils::ToLocal(receiver_handle),
- index,
- type,
- v8::Utils::ToLocal(data));
- }
- return result;
-}
-
-
-const char* Top::kStackOverflowMessage =
- "Uncaught RangeError: Maximum call stack size exceeded";
-
-
-Failure* Top::StackOverflow() {
- HandleScope scope;
- Handle<String> key = Factory::stack_overflow_symbol();
- Handle<JSObject> boilerplate =
- Handle<JSObject>::cast(GetProperty(Top::builtins(), key));
- Handle<Object> exception = Copy(boilerplate);
- // TODO(1240995): To avoid having to call JavaScript code to compute
- // the message for stack overflow exceptions which is very likely to
- // double fault with another stack overflow exception, we use a
- // precomputed message.
- DoThrow(*exception, NULL, kStackOverflowMessage);
- return Failure::Exception();
-}
-
-
-Failure* Top::TerminateExecution() {
- DoThrow(Heap::termination_exception(), NULL, NULL);
- return Failure::Exception();
-}
-
-
-Failure* Top::Throw(Object* exception, MessageLocation* location) {
- DoThrow(exception, location, NULL);
- return Failure::Exception();
-}
-
-
-Failure* Top::ReThrow(MaybeObject* exception, MessageLocation* location) {
- bool can_be_caught_externally = false;
- ShouldReportException(&can_be_caught_externally,
- is_catchable_by_javascript(exception));
- thread_local_.catcher_ = can_be_caught_externally ?
- try_catch_handler() : NULL;
-
- // Set the exception being re-thrown.
- set_pending_exception(exception);
- if (exception->IsFailure()) return exception->ToFailureUnchecked();
- return Failure::Exception();
-}
-
-
-Failure* Top::ThrowIllegalOperation() {
- return Throw(Heap::illegal_access_symbol());
-}
-
-
-void Top::ScheduleThrow(Object* exception) {
- // When scheduling a throw we first throw the exception to get the
- // error reporting if it is uncaught before rescheduling it.
- Throw(exception);
- thread_local_.scheduled_exception_ = pending_exception();
- thread_local_.external_caught_exception_ = false;
- clear_pending_exception();
-}
-
-
-Failure* Top::PromoteScheduledException() {
- MaybeObject* thrown = scheduled_exception();
- clear_scheduled_exception();
- // Re-throw the exception to avoid getting repeated error reporting.
- return ReThrow(thrown);
-}
-
-
-void Top::PrintCurrentStackTrace(FILE* out) {
- StackTraceFrameIterator it;
- while (!it.done()) {
- HandleScope scope;
- // Find code position if recorded in relocation info.
- JavaScriptFrame* frame = it.frame();
- int pos = frame->code()->SourcePosition(frame->pc());
- Handle<Object> pos_obj(Smi::FromInt(pos));
- // Fetch function and receiver.
- Handle<JSFunction> fun(JSFunction::cast(frame->function()));
- Handle<Object> recv(frame->receiver());
- // Advance to the next JavaScript frame and determine if the
- // current frame is the top-level frame.
- it.Advance();
- Handle<Object> is_top_level = it.done()
- ? Factory::true_value()
- : Factory::false_value();
- // Generate and print stack trace line.
- Handle<String> line =
- Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
- if (line->length() > 0) {
- line->PrintOn(out);
- fprintf(out, "\n");
- }
- }
-}
-
-
-void Top::ComputeLocation(MessageLocation* target) {
- *target = MessageLocation(Handle<Script>(Heap::empty_script()), -1, -1);
- StackTraceFrameIterator it;
- if (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- JSFunction* fun = JSFunction::cast(frame->function());
- Object* script = fun->shared()->script();
- if (script->IsScript() &&
- !(Script::cast(script)->source()->IsUndefined())) {
- int pos = frame->code()->SourcePosition(frame->pc());
- // Compute the location from the function and the reloc info.
- Handle<Script> casted_script(Script::cast(script));
- *target = MessageLocation(casted_script, pos, pos + 1);
- }
- }
-}
-
-
-bool Top::ShouldReportException(bool* can_be_caught_externally,
- bool catchable_by_javascript) {
- // Find the top-most try-catch handler.
- StackHandler* handler =
- StackHandler::FromAddress(Top::handler(Top::GetCurrentThread()));
- while (handler != NULL && !handler->is_try_catch()) {
- handler = handler->next();
- }
-
- // Get the address of the external handler so we can compare the address to
- // determine which one is closer to the top of the stack.
- Address external_handler_address = thread_local_.try_catch_handler_address();
-
- // The exception has been externally caught if and only if there is
- // an external handler which is on top of the top-most try-catch
- // handler.
- *can_be_caught_externally = external_handler_address != NULL &&
- (handler == NULL || handler->address() > external_handler_address ||
- !catchable_by_javascript);
-
- if (*can_be_caught_externally) {
- // Only report the exception if the external handler is verbose.
- return try_catch_handler()->is_verbose_;
- } else {
- // Report the exception if it isn't caught by JavaScript code.
- return handler == NULL;
- }
-}
-
-
-void Top::DoThrow(MaybeObject* exception,
- MessageLocation* location,
- const char* message) {
- ASSERT(!has_pending_exception());
-
- HandleScope scope;
- Object* exception_object = Smi::FromInt(0);
- bool is_object = exception->ToObject(&exception_object);
- Handle<Object> exception_handle(exception_object);
-
- // Determine reporting and whether the exception is caught externally.
- bool catchable_by_javascript = is_catchable_by_javascript(exception);
- // Only real objects can be caught by JS.
- ASSERT(!catchable_by_javascript || is_object);
- bool can_be_caught_externally = false;
- bool should_report_exception =
- ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
- bool report_exception = catchable_by_javascript && should_report_exception;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Notify debugger of exception.
- if (catchable_by_javascript) {
- Debugger::OnException(exception_handle, report_exception);
- }
-#endif
-
- // Generate the message.
- Handle<Object> message_obj;
- MessageLocation potential_computed_location;
- bool try_catch_needs_message =
- can_be_caught_externally &&
- try_catch_handler()->capture_message_;
- if (report_exception || try_catch_needs_message) {
- if (location == NULL) {
- // If no location was specified we use a computed one instead
- ComputeLocation(&potential_computed_location);
- location = &potential_computed_location;
- }
- if (!Bootstrapper::IsActive()) {
- // It's not safe to try to make message objects or collect stack
- // traces while the bootstrapper is active since the infrastructure
- // may not have been properly initialized.
- Handle<String> stack_trace;
- if (FLAG_trace_exception) stack_trace = StackTraceString();
- Handle<JSArray> stack_trace_object;
- if (report_exception && capture_stack_trace_for_uncaught_exceptions) {
- stack_trace_object = Top::CaptureCurrentStackTrace(
- stack_trace_for_uncaught_exceptions_frame_limit,
- stack_trace_for_uncaught_exceptions_options);
- }
- ASSERT(is_object); // Can't use the handle unless there's a real object.
- message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
- location, HandleVector<Object>(&exception_handle, 1), stack_trace,
- stack_trace_object);
- }
- }
-
- // Save the message for reporting if the the exception remains uncaught.
- thread_local_.has_pending_message_ = report_exception;
- thread_local_.pending_message_ = message;
- if (!message_obj.is_null()) {
- thread_local_.pending_message_obj_ = *message_obj;
- if (location != NULL) {
- thread_local_.pending_message_script_ = *location->script();
- thread_local_.pending_message_start_pos_ = location->start_pos();
- thread_local_.pending_message_end_pos_ = location->end_pos();
- }
- }
-
- // Do not forget to clean catcher_ if currently thrown exception cannot
- // be caught. If necessary, ReThrow will update the catcher.
- thread_local_.catcher_ = can_be_caught_externally ?
- try_catch_handler() : NULL;
-
- // NOTE: Notifying the debugger or generating the message
- // may have caused new exceptions. For now, we just ignore
- // that and set the pending exception to the original one.
- if (is_object) {
- set_pending_exception(*exception_handle);
- } else {
- // Failures are not on the heap so they neither need nor work with handles.
- ASSERT(exception_handle->IsFailure());
- set_pending_exception(exception);
- }
-}
-
-
-bool Top::IsExternallyCaught() {
- ASSERT(has_pending_exception());
-
- if ((thread_local_.catcher_ == NULL) ||
- (try_catch_handler() != thread_local_.catcher_)) {
- // When throwing the exception, we found no v8::TryCatch
- // which should care about this exception.
- return false;
- }
-
- if (!is_catchable_by_javascript(pending_exception())) {
- return true;
- }
-
- // Get the address of the external handler so we can compare the address to
- // determine which one is closer to the top of the stack.
- Address external_handler_address = thread_local_.try_catch_handler_address();
- ASSERT(external_handler_address != NULL);
-
- // The exception has been externally caught if and only if there is
- // an external handler which is on top of the top-most try-finally
- // handler.
- // There should be no try-catch blocks as they would prohibit us from
- // finding external catcher in the first place (see catcher_ check above).
- //
- // Note, that finally clause would rethrow an exception unless it's
- // aborted by jumps in control flow like return, break, etc. and we'll
- // have another chances to set proper v8::TryCatch.
- StackHandler* handler =
- StackHandler::FromAddress(Top::handler(Top::GetCurrentThread()));
- while (handler != NULL && handler->address() < external_handler_address) {
- ASSERT(!handler->is_try_catch());
- if (handler->is_try_finally()) return false;
-
- handler = handler->next();
- }
-
- return true;
-}
-
-
-void Top::ReportPendingMessages() {
- ASSERT(has_pending_exception());
- // If the pending exception is OutOfMemoryException set out_of_memory in
- // the global context. Note: We have to mark the global context here
- // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
- // set it.
- bool external_caught = IsExternallyCaught();
- thread_local_.external_caught_exception_ = external_caught;
- HandleScope scope;
- if (thread_local_.pending_exception_ == Failure::OutOfMemoryException()) {
- context()->mark_out_of_memory();
- } else if (thread_local_.pending_exception_ ==
- Heap::termination_exception()) {
- if (external_caught) {
- try_catch_handler()->can_continue_ = false;
- try_catch_handler()->exception_ = Heap::null_value();
- }
- } else {
- // At this point all non-object (failure) exceptions have
- // been dealt with so this shouldn't fail.
- Object* pending_exception_object = pending_exception()->ToObjectUnchecked();
- Handle<Object> exception(pending_exception_object);
- thread_local_.external_caught_exception_ = false;
- if (external_caught) {
- try_catch_handler()->can_continue_ = true;
- try_catch_handler()->exception_ = thread_local_.pending_exception_;
- if (!thread_local_.pending_message_obj_->IsTheHole()) {
- try_catch_handler()->message_ = thread_local_.pending_message_obj_;
- }
- }
- if (thread_local_.has_pending_message_) {
- thread_local_.has_pending_message_ = false;
- if (thread_local_.pending_message_ != NULL) {
- MessageHandler::ReportMessage(thread_local_.pending_message_);
- } else if (!thread_local_.pending_message_obj_->IsTheHole()) {
- Handle<Object> message_obj(thread_local_.pending_message_obj_);
- if (thread_local_.pending_message_script_ != NULL) {
- Handle<Script> script(thread_local_.pending_message_script_);
- int start_pos = thread_local_.pending_message_start_pos_;
- int end_pos = thread_local_.pending_message_end_pos_;
- MessageLocation location(script, start_pos, end_pos);
- MessageHandler::ReportMessage(&location, message_obj);
- } else {
- MessageHandler::ReportMessage(NULL, message_obj);
- }
- }
- }
- thread_local_.external_caught_exception_ = external_caught;
- set_pending_exception(*exception);
- }
- clear_pending_message();
-}
-
-
-void Top::TraceException(bool flag) {
- FLAG_trace_exception = flag;
-}
-
-
-bool Top::OptionalRescheduleException(bool is_bottom_call) {
- // Allways reschedule out of memory exceptions.
- if (!is_out_of_memory()) {
- bool is_termination_exception =
- pending_exception() == Heap::termination_exception();
-
- // Do not reschedule the exception if this is the bottom call.
- bool clear_exception = is_bottom_call;
-
- if (is_termination_exception) {
- if (is_bottom_call) {
- thread_local_.external_caught_exception_ = false;
- clear_pending_exception();
- return false;
- }
- } else if (thread_local_.external_caught_exception_) {
- // If the exception is externally caught, clear it if there are no
- // JavaScript frames on the way to the C++ frame that has the
- // external handler.
- ASSERT(thread_local_.try_catch_handler_address() != NULL);
- Address external_handler_address =
- thread_local_.try_catch_handler_address();
- JavaScriptFrameIterator it;
- if (it.done() || (it.frame()->sp() > external_handler_address)) {
- clear_exception = true;
- }
- }
-
- // Clear the exception if needed.
- if (clear_exception) {
- thread_local_.external_caught_exception_ = false;
- clear_pending_exception();
- return false;
- }
- }
-
- // Reschedule the exception.
- thread_local_.scheduled_exception_ = pending_exception();
- clear_pending_exception();
- return true;
-}
-
-
-void Top::SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options) {
- capture_stack_trace_for_uncaught_exceptions = capture;
- stack_trace_for_uncaught_exceptions_frame_limit = frame_limit;
- stack_trace_for_uncaught_exceptions_options = options;
-}
-
-
-bool Top::is_out_of_memory() {
- if (has_pending_exception()) {
- MaybeObject* e = pending_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- if (has_scheduled_exception()) {
- MaybeObject* e = scheduled_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- return false;
-}
-
-
-Handle<Context> Top::global_context() {
- GlobalObject* global = thread_local_.context_->global();
- return Handle<Context>(global->global_context());
-}
-
-
-Handle<Context> Top::GetCallingGlobalContext() {
- JavaScriptFrameIterator it;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::InDebugger()) {
- while (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- Context* context = Context::cast(frame->context());
- if (context->global_context() == *Debug::debug_context()) {
- it.Advance();
- } else {
- break;
- }
- }
- }
-#endif // ENABLE_DEBUGGER_SUPPORT
- if (it.done()) return Handle<Context>::null();
- JavaScriptFrame* frame = it.frame();
- Context* context = Context::cast(frame->context());
- return Handle<Context>(context->global_context());
-}
-
-
-char* Top::ArchiveThread(char* to) {
- memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(thread_local_));
- InitializeThreadLocal();
- return to + sizeof(thread_local_);
-}
-
-
-char* Top::RestoreThread(char* from) {
- memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(thread_local_));
- // This might be just paranoia, but it seems to be needed in case a
- // thread_local_ is restored on a separate OS thread.
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
- thread_local_.simulator_ = Simulator::current();
-#elif V8_TARGET_ARCH_MIPS
- thread_local_.simulator_ = assembler::mips::Simulator::current();
-#endif
-#endif
- return from + sizeof(thread_local_);
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/top.h b/deps/v8/src/top.h
deleted file mode 100644
index 26ae542f5..000000000
--- a/deps/v8/src/top.h
+++ /dev/null
@@ -1,608 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_TOP_H_
-#define V8_TOP_H_
-
-#include "atomicops.h"
-#include "compilation-cache.h"
-#include "frames-inl.h"
-#include "runtime-profiler.h"
-
-namespace v8 {
-namespace internal {
-
-class Simulator;
-
-#define RETURN_IF_SCHEDULED_EXCEPTION() \
- if (Top::has_scheduled_exception()) return Top::PromoteScheduledException()
-
-#define RETURN_IF_EMPTY_HANDLE_VALUE(call, value) \
- if (call.is_null()) { \
- ASSERT(Top::has_pending_exception()); \
- return value; \
- }
-
-#define RETURN_IF_EMPTY_HANDLE(call) \
- RETURN_IF_EMPTY_HANDLE_VALUE(call, Failure::Exception())
-
-// Top has static variables used for JavaScript execution.
-
-class SaveContext; // Forward declaration.
-class ThreadVisitor; // Defined in v8threads.h
-class VMState; // Defined in vm-state.h
-
-class ThreadLocalTop BASE_EMBEDDED {
- public:
- // Initialize the thread data.
- void Initialize();
-
- // Get the top C++ try catch handler or NULL if none are registered.
- //
- // This method is not guarenteed to return an address that can be
- // used for comparison with addresses into the JS stack. If such an
- // address is needed, use try_catch_handler_address.
- v8::TryCatch* TryCatchHandler();
-
- // Get the address of the top C++ try catch handler or NULL if
- // none are registered.
- //
- // This method always returns an address that can be compared to
- // pointers into the JavaScript stack. When running on actual
- // hardware, try_catch_handler_address and TryCatchHandler return
- // the same pointer. When running on a simulator with a separate JS
- // stack, try_catch_handler_address returns a JS stack address that
- // corresponds to the place on the JS stack where the C++ handler
- // would have been if the stack were not separate.
- inline Address try_catch_handler_address() {
- return try_catch_handler_address_;
- }
-
- // Set the address of the top C++ try catch handler.
- inline void set_try_catch_handler_address(Address address) {
- try_catch_handler_address_ = address;
- }
-
- void Free() {
- ASSERT(!has_pending_message_);
- ASSERT(!external_caught_exception_);
- ASSERT(try_catch_handler_address_ == NULL);
- }
-
- // The context where the current execution method is created and for variable
- // lookups.
- Context* context_;
- int thread_id_;
- MaybeObject* pending_exception_;
- bool has_pending_message_;
- const char* pending_message_;
- Object* pending_message_obj_;
- Script* pending_message_script_;
- int pending_message_start_pos_;
- int pending_message_end_pos_;
- // Use a separate value for scheduled exceptions to preserve the
- // invariants that hold about pending_exception. We may want to
- // unify them later.
- MaybeObject* scheduled_exception_;
- bool external_caught_exception_;
- SaveContext* save_context_;
- v8::TryCatch* catcher_;
-
- // Stack.
- Address c_entry_fp_; // the frame pointer of the top c entry frame
- Address handler_; // try-blocks are chained through the stack
-
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
- Simulator* simulator_;
-#elif V8_TARGET_ARCH_MIPS
- assembler::mips::Simulator* simulator_;
-#endif
-#endif // USE_SIMULATOR
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Address js_entry_sp_; // the stack pointer of the bottom js entry frame
- Address external_callback_; // the external callback we're currently in
-#endif
-
-#ifdef ENABLE_VMSTATE_TRACKING
- StateTag current_vm_state_;
-
- // Used for communication with the runtime profiler thread.
- // Possible values are specified in RuntimeProfilerState.
- Atomic32 runtime_profiler_state_;
-#endif
-
- // Generated code scratch locations.
- int32_t formal_count_;
-
- // Call back function to report unsafe JS accesses.
- v8::FailedAccessCheckCallback failed_access_check_callback_;
-
- private:
- Address try_catch_handler_address_;
-};
-
-#define TOP_ADDRESS_LIST(C) \
- C(handler_address) \
- C(c_entry_fp_address) \
- C(context_address) \
- C(pending_exception_address) \
- C(external_caught_exception_address)
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-#define TOP_ADDRESS_LIST_PROF(C) \
- C(js_entry_sp_address)
-#else
-#define TOP_ADDRESS_LIST_PROF(C)
-#endif
-
-
-class Top {
- public:
- enum AddressId {
-#define C(name) k_##name,
- TOP_ADDRESS_LIST(C)
- TOP_ADDRESS_LIST_PROF(C)
-#undef C
- k_top_address_count
- };
-
- static Address get_address_from_id(AddressId id);
-
- // Access to top context (where the current function object was created).
- static Context* context() { return thread_local_.context_; }
- static void set_context(Context* context) {
- thread_local_.context_ = context;
- }
- static Context** context_address() { return &thread_local_.context_; }
-
- static SaveContext* save_context() {return thread_local_.save_context_; }
- static void set_save_context(SaveContext* save) {
- thread_local_.save_context_ = save;
- }
-
- // Access to current thread id.
- static int thread_id() { return thread_local_.thread_id_; }
- static void set_thread_id(int id) { thread_local_.thread_id_ = id; }
-
- // Interface to pending exception.
- static MaybeObject* pending_exception() {
- ASSERT(has_pending_exception());
- return thread_local_.pending_exception_;
- }
- static bool external_caught_exception() {
- return thread_local_.external_caught_exception_;
- }
- static void set_pending_exception(MaybeObject* exception) {
- thread_local_.pending_exception_ = exception;
- }
- static void clear_pending_exception() {
- thread_local_.pending_exception_ = Heap::the_hole_value();
- }
-
- static MaybeObject** pending_exception_address() {
- return &thread_local_.pending_exception_;
- }
- static bool has_pending_exception() {
- return !thread_local_.pending_exception_->IsTheHole();
- }
- static void clear_pending_message() {
- thread_local_.has_pending_message_ = false;
- thread_local_.pending_message_ = NULL;
- thread_local_.pending_message_obj_ = Heap::the_hole_value();
- thread_local_.pending_message_script_ = NULL;
- }
- static v8::TryCatch* try_catch_handler() {
- return thread_local_.TryCatchHandler();
- }
- static Address try_catch_handler_address() {
- return thread_local_.try_catch_handler_address();
- }
- // This method is called by the api after operations that may throw
- // exceptions. If an exception was thrown and not handled by an external
- // handler the exception is scheduled to be rethrown when we return to running
- // JavaScript code. If an exception is scheduled true is returned.
- static bool OptionalRescheduleException(bool is_bottom_call);
-
-
- static bool* external_caught_exception_address() {
- return &thread_local_.external_caught_exception_;
- }
-
- static MaybeObject** scheduled_exception_address() {
- return &thread_local_.scheduled_exception_;
- }
-
- static MaybeObject* scheduled_exception() {
- ASSERT(has_scheduled_exception());
- return thread_local_.scheduled_exception_;
- }
- static bool has_scheduled_exception() {
- return !thread_local_.scheduled_exception_->IsTheHole();
- }
- static void clear_scheduled_exception() {
- thread_local_.scheduled_exception_ = Heap::the_hole_value();
- }
-
- static bool IsExternallyCaught();
-
- static void SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options);
-
- // Tells whether the current context has experienced an out of memory
- // exception.
- static bool is_out_of_memory();
-
- static bool is_catchable_by_javascript(MaybeObject* exception) {
- return (exception != Failure::OutOfMemoryException()) &&
- (exception != Heap::termination_exception());
- }
-
- // JS execution stack (see frames.h).
- static Address c_entry_fp(ThreadLocalTop* thread) {
- return thread->c_entry_fp_;
- }
- static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
-
- static inline Address* c_entry_fp_address() {
- return &thread_local_.c_entry_fp_;
- }
- static inline Address* handler_address() { return &thread_local_.handler_; }
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // Bottom JS entry (see StackTracer::Trace in log.cc).
- static Address js_entry_sp(ThreadLocalTop* thread) {
- return thread->js_entry_sp_;
- }
- static inline Address* js_entry_sp_address() {
- return &thread_local_.js_entry_sp_;
- }
-
- static Address external_callback() {
- return thread_local_.external_callback_;
- }
- static void set_external_callback(Address callback) {
- thread_local_.external_callback_ = callback;
- }
-#endif
-
-#ifdef ENABLE_VMSTATE_TRACKING
- static StateTag current_vm_state() {
- return thread_local_.current_vm_state_;
- }
-
- static void SetCurrentVMState(StateTag state) {
- if (RuntimeProfiler::IsEnabled()) {
- if (state == JS) {
- // JS or non-JS -> JS transition.
- RuntimeProfilerState old_state = SwapRuntimeProfilerState(PROF_IN_JS);
- if (old_state == PROF_NOT_IN_JS_WAITING_FOR_JS) {
- // If the runtime profiler was waiting, we reset the eager
- // optimizing data in the compilation cache to get a fresh
- // start after not running JavaScript code for a while and
- // signal the runtime profiler so it can resume.
- CompilationCache::ResetEagerOptimizingData();
- runtime_profiler_semaphore_->Signal();
- }
- } else if (thread_local_.current_vm_state_ == JS) {
- // JS -> non-JS transition. Update the runtime profiler state.
- ASSERT(IsInJSState());
- SetRuntimeProfilerState(PROF_NOT_IN_JS);
- }
- }
- thread_local_.current_vm_state_ = state;
- }
-
- // Called in the runtime profiler thread.
- // Returns whether the current VM state is set to JS.
- static bool IsInJSState() {
- ASSERT(RuntimeProfiler::IsEnabled());
- return static_cast<RuntimeProfilerState>(
- NoBarrier_Load(&thread_local_.runtime_profiler_state_)) == PROF_IN_JS;
- }
-
- // Called in the runtime profiler thread.
- // Waits for the VM state to transtion from non-JS to JS. Returns
- // true when notified of the transition, false when the current
- // state is not the expected non-JS state.
- static bool WaitForJSState() {
- ASSERT(RuntimeProfiler::IsEnabled());
- // Try to switch to waiting state.
- RuntimeProfilerState old_state = CompareAndSwapRuntimeProfilerState(
- PROF_NOT_IN_JS, PROF_NOT_IN_JS_WAITING_FOR_JS);
- if (old_state == PROF_NOT_IN_JS) {
- runtime_profiler_semaphore_->Wait();
- return true;
- }
- return false;
- }
-
- // When shutting down we join the profiler thread. Doing so while
- // it's waiting on a semaphore will cause a deadlock, so we have to
- // wake it up first.
- static void WakeUpRuntimeProfilerThreadBeforeShutdown() {
- runtime_profiler_semaphore_->Signal();
- }
-#endif
-
- // Generated code scratch locations.
- static void* formal_count_address() { return &thread_local_.formal_count_; }
-
- static void PrintCurrentStackTrace(FILE* out);
- static void PrintStackTrace(FILE* out, char* thread_data);
- static void PrintStack(StringStream* accumulator);
- static void PrintStack();
- static Handle<String> StackTraceString();
- static Handle<JSArray> CaptureCurrentStackTrace(
- int frame_limit,
- StackTrace::StackTraceOptions options);
-
- // Returns if the top context may access the given global object. If
- // the result is false, the pending exception is guaranteed to be
- // set.
- static bool MayNamedAccess(JSObject* receiver,
- Object* key,
- v8::AccessType type);
- static bool MayIndexedAccess(JSObject* receiver,
- uint32_t index,
- v8::AccessType type);
-
- static void SetFailedAccessCheckCallback(
- v8::FailedAccessCheckCallback callback);
- static void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
-
- // Exception throwing support. The caller should use the result
- // of Throw() as its return value.
- static Failure* Throw(Object* exception, MessageLocation* location = NULL);
- // Re-throw an exception. This involves no error reporting since
- // error reporting was handled when the exception was thrown
- // originally.
- static Failure* ReThrow(MaybeObject* exception,
- MessageLocation* location = NULL);
- static void ScheduleThrow(Object* exception);
- static void ReportPendingMessages();
- static Failure* ThrowIllegalOperation();
-
- // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
- static Failure* PromoteScheduledException();
- static void DoThrow(MaybeObject* exception,
- MessageLocation* location,
- const char* message);
- // Checks if exception should be reported and finds out if it's
- // caught externally.
- static bool ShouldReportException(bool* can_be_caught_externally,
- bool catchable_by_javascript);
-
- // Attempts to compute the current source location, storing the
- // result in the target out parameter.
- static void ComputeLocation(MessageLocation* target);
-
- // Override command line flag.
- static void TraceException(bool flag);
-
- // Out of resource exception helpers.
- static Failure* StackOverflow();
- static Failure* TerminateExecution();
-
- // Administration
- static void Initialize();
- static void TearDown();
- static void Iterate(ObjectVisitor* v);
- static void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
- static char* Iterate(ObjectVisitor* v, char* t);
- static void IterateThread(ThreadVisitor* v);
- static void IterateThread(ThreadVisitor* v, char* t);
-
- // Returns the global object of the current context. It could be
- // a builtin object, or a js global object.
- static Handle<GlobalObject> global() {
- return Handle<GlobalObject>(context()->global());
- }
-
- // Returns the global proxy object of the current context.
- static Object* global_proxy() {
- return context()->global_proxy();
- }
-
- // Returns the current global context.
- static Handle<Context> global_context();
-
- // Returns the global context of the calling JavaScript code. That
- // is, the global context of the top-most JavaScript frame.
- static Handle<Context> GetCallingGlobalContext();
-
- static Handle<JSBuiltinsObject> builtins() {
- return Handle<JSBuiltinsObject>(thread_local_.context_->builtins());
- }
-
- static void RegisterTryCatchHandler(v8::TryCatch* that);
- static void UnregisterTryCatchHandler(v8::TryCatch* that);
-
-#define TOP_GLOBAL_CONTEXT_FIELD_ACCESSOR(index, type, name) \
- static Handle<type> name() { \
- return Handle<type>(context()->global_context()->name()); \
- }
- GLOBAL_CONTEXT_FIELDS(TOP_GLOBAL_CONTEXT_FIELD_ACCESSOR)
-#undef TOP_GLOBAL_CONTEXT_FIELD_ACCESSOR
-
- static inline ThreadLocalTop* GetCurrentThread() { return &thread_local_; }
- static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
- static char* ArchiveThread(char* to);
- static char* RestoreThread(char* from);
- static void FreeThreadResources() { thread_local_.Free(); }
-
- static const char* kStackOverflowMessage;
-
- private:
-#ifdef ENABLE_VMSTATE_TRACKING
- // Set of states used when communicating with the runtime profiler.
- //
- // The set of possible transitions is divided between the VM and the
- // profiler threads.
- //
- // The VM thread can perform these transitions:
- // o IN_JS -> NOT_IN_JS
- // o NOT_IN_JS -> IN_JS
- // o NOT_IN_JS_WAITING_FOR_JS -> IN_JS notifying the profiler thread
- // using the semaphore.
- // All the above transitions are caused by VM state changes.
- //
- // The profiler thread can only perform a single transition
- // NOT_IN_JS -> NOT_IN_JS_WAITING_FOR_JS before it starts waiting on
- // the semaphore.
- enum RuntimeProfilerState {
- PROF_NOT_IN_JS,
- PROF_NOT_IN_JS_WAITING_FOR_JS,
- PROF_IN_JS
- };
-
- static void SetRuntimeProfilerState(RuntimeProfilerState state) {
- NoBarrier_Store(&thread_local_.runtime_profiler_state_, state);
- }
-
- static RuntimeProfilerState SwapRuntimeProfilerState(
- RuntimeProfilerState state) {
- return static_cast<RuntimeProfilerState>(
- NoBarrier_AtomicExchange(&thread_local_.runtime_profiler_state_,
- state));
- }
-
- static RuntimeProfilerState CompareAndSwapRuntimeProfilerState(
- RuntimeProfilerState old_state,
- RuntimeProfilerState state) {
- return static_cast<RuntimeProfilerState>(
- NoBarrier_CompareAndSwap(&thread_local_.runtime_profiler_state_,
- old_state,
- state));
- }
-
- static Semaphore* runtime_profiler_semaphore_;
-#endif // ENABLE_VMSTATE_TRACKING
-
- // The context that initiated this JS execution.
- static ThreadLocalTop thread_local_;
- static void InitializeThreadLocal();
- static void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
- static void MarkCompactPrologue(bool is_compacting,
- ThreadLocalTop* archived_thread_data);
- static void MarkCompactEpilogue(bool is_compacting,
- ThreadLocalTop* archived_thread_data);
-
- // Debug.
- // Mutex for serializing access to break control structures.
- static Mutex* break_access_;
-
- friend class SaveContext;
- friend class AssertNoContextChange;
- friend class ExecutionAccess;
- friend class ThreadLocalTop;
-
- static void FillCache();
-};
-
-
-// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
-// class as a work around for a bug in the generated code found with these
-// versions of GCC. See V8 issue 122 for details.
-class SaveContext BASE_EMBEDDED {
- public:
- SaveContext()
- : context_(Top::context()),
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- dummy_(Top::context()),
-#endif
- prev_(Top::save_context()) {
- Top::set_save_context(this);
-
- // If there is no JS frame under the current C frame, use the value 0.
- JavaScriptFrameIterator it;
- js_sp_ = it.done() ? 0 : it.frame()->sp();
- }
-
- ~SaveContext() {
- Top::set_context(*context_);
- Top::set_save_context(prev_);
- }
-
- Handle<Context> context() { return context_; }
- SaveContext* prev() { return prev_; }
-
- // Returns true if this save context is below a given JavaScript frame.
- bool below(JavaScriptFrame* frame) {
- return (js_sp_ == 0) || (frame->sp() < js_sp_);
- }
-
- private:
- Handle<Context> context_;
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- Handle<Context> dummy_;
-#endif
- SaveContext* prev_;
- Address js_sp_; // The top JS frame's sp when saving context.
-};
-
-
-class AssertNoContextChange BASE_EMBEDDED {
-#ifdef DEBUG
- public:
- AssertNoContextChange() :
- context_(Top::context()) {
- }
-
- ~AssertNoContextChange() {
- ASSERT(Top::context() == *context_);
- }
-
- private:
- HandleScope scope_;
- Handle<Context> context_;
-#else
- public:
- AssertNoContextChange() { }
-#endif
-};
-
-
-class ExecutionAccess BASE_EMBEDDED {
- public:
- ExecutionAccess() { Lock(); }
- ~ExecutionAccess() { Unlock(); }
-
- static void Lock() { Top::break_access_->Lock(); }
- static void Unlock() { Top::break_access_->Unlock(); }
-
- static bool TryLock() {
- return Top::break_access_->TryLock();
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_TOP_H_
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 0bb726212..4c34ff8fb 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,7 @@
#include "v8.h"
#include "ast.h"
+#include "code-stubs.h"
#include "compiler.h"
#include "ic.h"
#include "macro-assembler.h"
@@ -58,91 +59,147 @@ TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
}
-STATIC_ASSERT(DEFAULT_STRING_STUB == Code::kNoExtraICState);
-
-
TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
Handle<Context> global_context) {
global_context_ = global_context;
- Initialize(code);
+ BuildDictionary(code);
+ ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
+}
+
+
+Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) {
+ int entry = dictionary_->FindEntry(ast_id);
+ return entry != NumberDictionary::kNotFound
+ ? Handle<Object>(dictionary_->ValueAt(entry))
+ : Isolate::Current()->factory()->undefined_value();
+}
+
+
+bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
+ Handle<Object> map_or_code(GetInfo(expr->id()));
+ if (map_or_code->IsMap()) return true;
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ return code->is_keyed_load_stub() &&
+ code->ic_state() == MONOMORPHIC &&
+ Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
+ code->FindFirstMap() != NULL;
+ }
+ return false;
}
-void TypeFeedbackOracle::Initialize(Handle<Code> code) {
- ASSERT(map_.is_null()); // Only initialize once.
- map_ = Factory::NewJSObject(Top::object_function());
- PopulateMap(code);
+bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
+ Handle<Object> map_or_code(GetInfo(expr->id()));
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ Builtins* builtins = Isolate::Current()->builtins();
+ return code->is_keyed_load_stub() &&
+ *code != builtins->builtin(Builtins::kKeyedLoadIC_Generic) &&
+ code->ic_state() == MEGAMORPHIC;
+ }
+ return false;
}
-bool TypeFeedbackOracle::LoadIsMonomorphic(Property* expr) {
- return GetElement(map_, expr->position())->IsMap();
+bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
+ Handle<Object> map_or_code(GetInfo(expr->id()));
+ if (map_or_code->IsMap()) return true;
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ return code->is_keyed_store_stub() &&
+ code->ic_state() == MONOMORPHIC &&
+ Code::ExtractTypeFromFlags(code->flags()) == NORMAL;
+ }
+ return false;
}
-bool TypeFeedbackOracle:: StoreIsMonomorphic(Assignment* expr) {
- return GetElement(map_, expr->position())->IsMap();
+bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
+ Handle<Object> map_or_code(GetInfo(expr->id()));
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ Builtins* builtins = Isolate::Current()->builtins();
+ return code->is_keyed_store_stub() &&
+ *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
+ code->ic_state() == MEGAMORPHIC;
+ }
+ return false;
}
bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
- Handle<Object> value = GetElement(map_, expr->position());
+ Handle<Object> value = GetInfo(expr->id());
return value->IsMap() || value->IsSmi();
}
Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
- ASSERT(LoadIsMonomorphic(expr));
- return Handle<Map>::cast(GetElement(map_, expr->position()));
+ ASSERT(LoadIsMonomorphicNormal(expr));
+ Handle<Object> map_or_code(GetInfo(expr->id()));
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ Map* first_map = code->FindFirstMap();
+ ASSERT(first_map != NULL);
+ return Handle<Map>(first_map);
+ }
+ return Handle<Map>::cast(map_or_code);
}
-Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Assignment* expr) {
- ASSERT(StoreIsMonomorphic(expr));
- return Handle<Map>::cast(GetElement(map_, expr->position()));
+Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
+ ASSERT(StoreIsMonomorphicNormal(expr));
+ Handle<Object> map_or_code(GetInfo(expr->id()));
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ return Handle<Map>(code->FindFirstMap());
+ }
+ return Handle<Map>::cast(map_or_code);
}
ZoneMapList* TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
Handle<String> name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
- return CollectReceiverTypes(expr->position(), name, flags);
+ return CollectReceiverTypes(expr->id(), name, flags);
}
ZoneMapList* TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
Handle<String> name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
- return CollectReceiverTypes(expr->position(), name, flags);
+ return CollectReceiverTypes(expr->id(), name, flags);
}
ZoneMapList* TypeFeedbackOracle::CallReceiverTypes(Call* expr,
- Handle<String> name) {
+ Handle<String> name,
+ CallKind call_kind) {
int arity = expr->arguments()->length();
- // Note: these flags won't let us get maps from stubs with
- // non-default extra ic state in the megamorphic case. In the more
- // important monomorphic case the map is obtained directly, so it's
- // not a problem until we decide to emit more polymorphic code.
+
+ // Note: Currently we do not take string extra ic data into account
+ // here.
+ Code::ExtraICState extra_ic_state =
+ CallIC::Contextual::encode(call_kind == CALL_AS_FUNCTION);
+
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
NORMAL,
- Code::kNoExtraICState,
+ extra_ic_state,
OWN_MAP,
NOT_IN_LOOP,
arity);
- return CollectReceiverTypes(expr->position(), name, flags);
+ return CollectReceiverTypes(expr->id(), name, flags);
}
CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
- Handle<Object> value = GetElement(map_, expr->position());
+ Handle<Object> value = GetInfo(expr->id());
if (!value->IsSmi()) return RECEIVER_MAP_CHECK;
CheckType check = static_cast<CheckType>(Smi::cast(*value)->value());
ASSERT(check != RECEIVER_MAP_CHECK);
return check;
}
-
Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
CheckType check) {
JSFunction* function = NULL;
@@ -166,13 +223,13 @@ Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
- Handle<Object> object = GetElement(map_, expr->position());
- return *object == Builtins::builtin(id);
+ return *GetInfo(expr->id()) ==
+ Isolate::Current()->builtins()->builtin(id);
}
TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
- Handle<Object> object = GetElement(map_, expr->position());
+ Handle<Object> object = GetInfo(expr->id());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
@@ -182,12 +239,14 @@ TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
switch (state) {
case CompareIC::UNINITIALIZED:
// Uninitialized means never executed.
- // TODO(fschneider): Introduce a separate value for never-executed ICs.
- return unknown;
+ return TypeInfo::Uninitialized();
case CompareIC::SMIS:
return TypeInfo::Smi();
case CompareIC::HEAP_NUMBERS:
return TypeInfo::Number();
+ case CompareIC::SYMBOLS:
+ case CompareIC::STRINGS:
+ return TypeInfo::String();
case CompareIC::OBJECTS:
// TODO(kasperl): We really need a type for JS objects here.
return TypeInfo::NonPrimitive();
@@ -198,59 +257,74 @@ TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
}
+bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
+ Handle<Object> object = GetInfo(expr->id());
+ if (!object->IsCode()) return false;
+ Handle<Code> code = Handle<Code>::cast(object);
+ if (!code->is_compare_ic_stub()) return false;
+ CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+ return state == CompareIC::SYMBOLS;
+}
+
+
+TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
+ Handle<Object> object = GetInfo(expr->id());
+ TypeInfo unknown = TypeInfo::Unknown();
+ if (!object->IsCode()) return unknown;
+ Handle<Code> code = Handle<Code>::cast(object);
+ ASSERT(code->is_unary_op_stub());
+ UnaryOpIC::TypeInfo type = static_cast<UnaryOpIC::TypeInfo>(
+ code->unary_op_type());
+ switch (type) {
+ case UnaryOpIC::SMI:
+ return TypeInfo::Smi();
+ case UnaryOpIC::HEAP_NUMBER:
+ return TypeInfo::Double();
+ default:
+ return unknown;
+ }
+}
+
+
TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
- Handle<Object> object = GetElement(map_, expr->position());
+ Handle<Object> object = GetInfo(expr->id());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
if (code->is_binary_op_stub()) {
BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
code->binary_op_type());
- switch (type) {
- case BinaryOpIC::UNINIT_OR_SMI:
- return TypeInfo::Smi();
- case BinaryOpIC::DEFAULT:
- return (expr->op() == Token::DIV || expr->op() == Token::MUL)
- ? TypeInfo::Double()
- : TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBERS:
- return TypeInfo::Double();
- default:
- return unknown;
- }
- } else if (code->is_type_recording_binary_op_stub()) {
- TRBinaryOpIC::TypeInfo type = static_cast<TRBinaryOpIC::TypeInfo>(
- code->type_recording_binary_op_type());
- TRBinaryOpIC::TypeInfo result_type = static_cast<TRBinaryOpIC::TypeInfo>(
- code->type_recording_binary_op_result_type());
+ BinaryOpIC::TypeInfo result_type = static_cast<BinaryOpIC::TypeInfo>(
+ code->binary_op_result_type());
switch (type) {
- case TRBinaryOpIC::UNINITIALIZED:
+ case BinaryOpIC::UNINITIALIZED:
// Uninitialized means never executed.
- // TODO(fschneider): Introduce a separate value for never-executed ICs
- return unknown;
- case TRBinaryOpIC::SMI:
+ return TypeInfo::Uninitialized();
+ case BinaryOpIC::SMI:
switch (result_type) {
- case TRBinaryOpIC::UNINITIALIZED:
- case TRBinaryOpIC::SMI:
+ case BinaryOpIC::UNINITIALIZED:
+ case BinaryOpIC::SMI:
return TypeInfo::Smi();
- case TRBinaryOpIC::INT32:
+ case BinaryOpIC::INT32:
return TypeInfo::Integer32();
- case TRBinaryOpIC::HEAP_NUMBER:
+ case BinaryOpIC::HEAP_NUMBER:
return TypeInfo::Double();
default:
return unknown;
}
- case TRBinaryOpIC::INT32:
+ case BinaryOpIC::INT32:
if (expr->op() == Token::DIV ||
- result_type == TRBinaryOpIC::HEAP_NUMBER) {
+ result_type == BinaryOpIC::HEAP_NUMBER) {
return TypeInfo::Double();
}
return TypeInfo::Integer32();
- case TRBinaryOpIC::HEAP_NUMBER:
+ case BinaryOpIC::HEAP_NUMBER:
return TypeInfo::Double();
- case TRBinaryOpIC::STRING:
- case TRBinaryOpIC::GENERIC:
+ case BinaryOpIC::BOTH_STRING:
+ return TypeInfo::String();
+ case BinaryOpIC::STRING:
+ case BinaryOpIC::GENERIC:
return unknown;
default:
return unknown;
@@ -261,7 +335,7 @@ TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
- Handle<Object> object = GetElement(map_, clause->position());
+ Handle<Object> object = GetInfo(clause->CompareId());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
@@ -287,13 +361,43 @@ TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
}
-ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(int position,
+TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
+ Handle<Object> object = GetInfo(expr->CountId());
+ TypeInfo unknown = TypeInfo::Unknown();
+ if (!object->IsCode()) return unknown;
+ Handle<Code> code = Handle<Code>::cast(object);
+ if (!code->is_binary_op_stub()) return unknown;
+
+ BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
+ code->binary_op_type());
+ switch (type) {
+ case BinaryOpIC::UNINITIALIZED:
+ case BinaryOpIC::SMI:
+ return TypeInfo::Smi();
+ case BinaryOpIC::INT32:
+ return TypeInfo::Integer32();
+ case BinaryOpIC::HEAP_NUMBER:
+ return TypeInfo::Double();
+ case BinaryOpIC::BOTH_STRING:
+ case BinaryOpIC::STRING:
+ case BinaryOpIC::GENERIC:
+ return unknown;
+ default:
+ return unknown;
+ }
+ UNREACHABLE();
+ return unknown;
+}
+
+
+ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(unsigned ast_id,
Handle<String> name,
Code::Flags flags) {
- Handle<Object> object = GetElement(map_, position);
+ Isolate* isolate = Isolate::Current();
+ Handle<Object> object = GetInfo(ast_id);
if (object->IsUndefined() || object->IsSmi()) return NULL;
- if (*object == Builtins::builtin(Builtins::StoreIC_GlobalProxy)) {
+ if (*object == isolate->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
// TODO(fschneider): We could collect the maps and signal that
// we need a generic store (or load) here.
ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
@@ -305,7 +409,7 @@ ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(int position,
} else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
ZoneMapList* types = new ZoneMapList(4);
ASSERT(object->IsCode());
- StubCache::CollectMatchingMaps(types, *name, flags);
+ isolate->stub_cache()->CollectMatchingMaps(types, *name, flags);
return types->length() > 0 ? types : NULL;
} else {
return NULL;
@@ -313,94 +417,130 @@ ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(int position,
}
-void TypeFeedbackOracle::PopulateMap(Handle<Code> code) {
- HandleScope scope;
-
- const int kInitialCapacity = 16;
- List<int> code_positions(kInitialCapacity);
- List<int> source_positions(kInitialCapacity);
- CollectPositions(*code, &code_positions, &source_positions);
-
- int length = code_positions.length();
- ASSERT(source_positions.length() == length);
- for (int i = 0; i < length; i++) {
- RelocInfo info(code->instruction_start() + code_positions[i],
- RelocInfo::CODE_TARGET, 0);
- Handle<Code> target(Code::GetCodeFromTargetAddress(info.target_address()));
- int position = source_positions[i];
- InlineCacheState state = target->ic_state();
- Code::Kind kind = target->kind();
- if (kind == Code::BINARY_OP_IC ||
- kind == Code::TYPE_RECORDING_BINARY_OP_IC ||
- kind == Code::COMPARE_IC) {
- // TODO(kasperl): Avoid having multiple ICs with the same
- // position by making sure that we have position information
- // recorded for all binary ICs.
- if (GetElement(map_, position)->IsUndefined()) {
- SetElement(map_, position, target);
- }
- } else if (state == MONOMORPHIC) {
- if (target->kind() != Code::CALL_IC ||
- target->check_type() == RECEIVER_MAP_CHECK) {
- Handle<Map> map = Handle<Map>(target->FindFirstMap());
- if (*map == NULL) {
- SetElement(map_, position, target);
- } else {
- SetElement(map_, position, map);
- }
- } else {
- ASSERT(target->kind() == Code::CALL_IC);
- CheckType check = target->check_type();
- ASSERT(check != RECEIVER_MAP_CHECK);
- SetElement(map_, position, Handle<Object>(Smi::FromInt(check)));
- ASSERT(Smi::cast(*GetElement(map_, position))->value() == check);
+void TypeFeedbackOracle::CollectKeyedReceiverTypes(
+ unsigned ast_id,
+ ZoneMapList* types) {
+ Handle<Object> object = GetInfo(ast_id);
+ if (!object->IsCode()) return;
+ Handle<Code> code = Handle<Code>::cast(object);
+ if (code->kind() == Code::KEYED_LOAD_IC ||
+ code->kind() == Code::KEYED_STORE_IC) {
+ AssertNoAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Object* object = info->target_object();
+ if (object->IsMap()) {
+ types->Add(Handle<Map>(Map::cast(object)));
}
- } else if (state == MEGAMORPHIC) {
- SetElement(map_, position, target);
}
}
}
-void TypeFeedbackOracle::CollectPositions(Code* code,
- List<int>* code_positions,
- List<int>* source_positions) {
+// Things are a bit tricky here: The iterator for the RelocInfos and the infos
+// themselves are not GC-safe, so we first get all infos, then we create the
+// dictionary (possibly triggering GC), and finally we relocate the collected
+// infos before we process them.
+void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
AssertNoAllocation no_allocation;
- int position = 0;
- // Because the ICs we use for global variables access in the full
- // code generator do not have any meaningful positions, we avoid
- // collecting those by filtering out contextual code targets.
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::kPositionMask;
- for (RelocIterator it(code, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- RelocInfo::Mode mode = info->rmode();
- if (RelocInfo::IsCodeTarget(mode)) {
- Code* target = Code::GetCodeFromTargetAddress(info->target_address());
- if (target->is_inline_cache_stub()) {
- InlineCacheState state = target->ic_state();
- Code::Kind kind = target->kind();
- if (kind == Code::BINARY_OP_IC) {
- if (target->binary_op_type() == BinaryOpIC::GENERIC) continue;
- } else if (kind == Code::TYPE_RECORDING_BINARY_OP_IC) {
- if (target->type_recording_binary_op_type() ==
- TRBinaryOpIC::GENERIC) {
- continue;
- }
- } else if (kind == Code::COMPARE_IC) {
- if (target->compare_state() == CompareIC::GENERIC) continue;
+ ZoneList<RelocInfo> infos(16);
+ HandleScope scope;
+ GetRelocInfos(code, &infos);
+ CreateDictionary(code, &infos);
+ ProcessRelocInfos(&infos);
+ // Allocate handle in the parent scope.
+ dictionary_ = scope.CloseAndEscape(dictionary_);
+}
+
+
+void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
+ ZoneList<RelocInfo>* infos) {
+ int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
+ for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+ infos->Add(*it.rinfo());
+ }
+}
+
+
+void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
+ ZoneList<RelocInfo>* infos) {
+ DisableAssertNoAllocation allocation_allowed;
+ byte* old_start = code->instruction_start();
+ dictionary_ = FACTORY->NewNumberDictionary(infos->length());
+ byte* new_start = code->instruction_start();
+ RelocateRelocInfos(infos, old_start, new_start);
+}
+
+
+void TypeFeedbackOracle::RelocateRelocInfos(ZoneList<RelocInfo>* infos,
+ byte* old_start,
+ byte* new_start) {
+ for (int i = 0; i < infos->length(); i++) {
+ RelocInfo* info = &(*infos)[i];
+ info->set_pc(new_start + (info->pc() - old_start));
+ }
+}
+
+
+void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
+ for (int i = 0; i < infos->length(); i++) {
+ unsigned ast_id = static_cast<unsigned>((*infos)[i].data());
+ Code* target = Code::GetCodeFromTargetAddress((*infos)[i].target_address());
+ ProcessTarget(ast_id, target);
+ }
+}
+
+
+void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) {
+ switch (target->kind()) {
+ case Code::LOAD_IC:
+ case Code::STORE_IC:
+ case Code::CALL_IC:
+ case Code::KEYED_CALL_IC:
+ if (target->ic_state() == MONOMORPHIC) {
+ if (target->kind() == Code::CALL_IC &&
+ target->check_type() != RECEIVER_MAP_CHECK) {
+ SetInfo(ast_id, Smi::FromInt(target->check_type()));
} else {
- if (state != MONOMORPHIC && state != MEGAMORPHIC) continue;
+ Object* map = target->FindFirstMap();
+ SetInfo(ast_id, map == NULL ? static_cast<Object*>(target) : map);
}
- code_positions->Add(
- static_cast<int>(info->pc() - code->instruction_start()));
- source_positions->Add(position);
+ } else if (target->ic_state() == MEGAMORPHIC) {
+ SetInfo(ast_id, target);
}
- } else {
- ASSERT(RelocInfo::IsPosition(mode));
- position = static_cast<int>(info->data());
- }
+ break;
+
+ case Code::KEYED_LOAD_IC:
+ case Code::KEYED_STORE_IC:
+ if (target->ic_state() == MONOMORPHIC ||
+ target->ic_state() == MEGAMORPHIC) {
+ SetInfo(ast_id, target);
+ }
+ break;
+
+ case Code::UNARY_OP_IC:
+ case Code::BINARY_OP_IC:
+ case Code::COMPARE_IC:
+ SetInfo(ast_id, target);
+ break;
+
+ default:
+ break;
}
}
+
+void TypeFeedbackOracle::SetInfo(unsigned ast_id, Object* target) {
+ ASSERT(dictionary_->FindEntry(ast_id) == NumberDictionary::kNotFound);
+ MaybeObject* maybe_result = dictionary_->AtNumberPut(ast_id, target);
+ USE(maybe_result);
+#ifdef DEBUG
+ Object* result = NULL;
+ // Dictionary has been allocated with sufficient size for all elements.
+ ASSERT(maybe_result->ToObject(&result));
+ ASSERT(*dictionary_ == result);
+#endif
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 34ff58452..75aabe8c6 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,7 @@
#ifndef V8_TYPE_INFO_H_
#define V8_TYPE_INFO_H_
+#include "allocation.h"
#include "globals.h"
#include "zone.h"
#include "zone-inl.h"
@@ -35,19 +36,21 @@
namespace v8 {
namespace internal {
+const int kMaxKeyedPolymorphism = 4;
+
// Unknown
-// | |
-// | \--------------|
-// Primitive Non-primitive
-// | \--------| |
-// Number String |
-// / | | |
-// Double Integer32 | /
-// | | / /
-// | Smi / /
-// | | / /
-// | | / /
-// Uninitialized.--/
+// | \____________
+// | |
+// Primitive Non-primitive
+// | \_______ |
+// | | |
+// Number String |
+// / \ | |
+// Double Integer32 | /
+// | | / /
+// | Smi / /
+// | | / __/
+// Uninitialized.
class TypeInfo {
public:
@@ -71,32 +74,6 @@ class TypeInfo {
// We haven't started collecting info yet.
static TypeInfo Uninitialized() { return TypeInfo(kUninitialized); }
- // Return compact representation. Very sensitive to enum values below!
- // Compacting drops information about primitive types and strings types.
- // We use the compact representation when we only care about number types.
- int ThreeBitRepresentation() {
- ASSERT(type_ != kUninitialized);
- int answer = type_ & 0xf;
- answer = answer > 6 ? answer - 2 : answer;
- ASSERT(answer >= 0);
- ASSERT(answer <= 7);
- return answer;
- }
-
- // Decode compact representation. Very sensitive to enum values below!
- static TypeInfo ExpandedRepresentation(int three_bit_representation) {
- Type t = static_cast<Type>(three_bit_representation > 4 ?
- three_bit_representation + 2 :
- three_bit_representation);
- t = (t == kUnknown) ? t : static_cast<Type>(t | kPrimitive);
- ASSERT(t == kUnknown ||
- t == kNumber ||
- t == kInteger32 ||
- t == kSmi ||
- t == kDouble);
- return TypeInfo(t);
- }
-
int ToInt() {
return type_;
}
@@ -227,9 +204,11 @@ enum StringStubFeedback {
// Forward declarations.
class Assignment;
+class UnaryOperation;
class BinaryOperation;
class Call;
class CompareOperation;
+class CountOperation;
class CompilationInfo;
class Property;
class CaseClause;
@@ -238,16 +217,22 @@ class TypeFeedbackOracle BASE_EMBEDDED {
public:
TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
- bool LoadIsMonomorphic(Property* expr);
- bool StoreIsMonomorphic(Assignment* expr);
+ bool LoadIsMonomorphicNormal(Property* expr);
+ bool LoadIsMegamorphicWithTypeInfo(Property* expr);
+ bool StoreIsMonomorphicNormal(Expression* expr);
+ bool StoreIsMegamorphicWithTypeInfo(Expression* expr);
bool CallIsMonomorphic(Call* expr);
Handle<Map> LoadMonomorphicReceiverType(Property* expr);
- Handle<Map> StoreMonomorphicReceiverType(Assignment* expr);
+ Handle<Map> StoreMonomorphicReceiverType(Expression* expr);
ZoneMapList* LoadReceiverTypes(Property* expr, Handle<String> name);
ZoneMapList* StoreReceiverTypes(Assignment* expr, Handle<String> name);
- ZoneMapList* CallReceiverTypes(Call* expr, Handle<String> name);
+ ZoneMapList* CallReceiverTypes(Call* expr,
+ Handle<String> name,
+ CallKind call_kind);
+ void CollectKeyedReceiverTypes(unsigned ast_id,
+ ZoneMapList* types);
CheckType GetCallCheckType(Call* expr);
Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
@@ -255,25 +240,35 @@ class TypeFeedbackOracle BASE_EMBEDDED {
bool LoadIsBuiltin(Property* expr, Builtins::Name id);
// Get type information for arithmetic operations and compares.
+ TypeInfo UnaryType(UnaryOperation* expr);
TypeInfo BinaryType(BinaryOperation* expr);
TypeInfo CompareType(CompareOperation* expr);
+ bool IsSymbolCompare(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause);
+ TypeInfo IncrementType(CountOperation* expr);
private:
- void Initialize(Handle<Code> code);
-
- ZoneMapList* CollectReceiverTypes(int position,
+ ZoneMapList* CollectReceiverTypes(unsigned ast_id,
Handle<String> name,
Code::Flags flags);
- void PopulateMap(Handle<Code> code);
+ void SetInfo(unsigned ast_id, Object* target);
+
+ void BuildDictionary(Handle<Code> code);
+ void GetRelocInfos(Handle<Code> code, ZoneList<RelocInfo>* infos);
+ void CreateDictionary(Handle<Code> code, ZoneList<RelocInfo>* infos);
+ void RelocateRelocInfos(ZoneList<RelocInfo>* infos,
+ byte* old_start,
+ byte* new_start);
+ void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
+ void ProcessTarget(unsigned ast_id, Code* target);
- void CollectPositions(Code* code,
- List<int>* code_positions,
- List<int>* source_positions);
+ // Returns an element from the backing store. Returns undefined if
+ // there is no information.
+ Handle<Object> GetInfo(unsigned ast_id);
Handle<Context> global_context_;
- Handle<JSObject> map_;
+ Handle<NumberDictionary> dictionary_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
diff --git a/deps/v8/src/unbound-queue.h b/deps/v8/src/unbound-queue.h
index 443d5ce6d..59a426b7f 100644
--- a/deps/v8/src/unbound-queue.h
+++ b/deps/v8/src/unbound-queue.h
@@ -28,6 +28,8 @@
#ifndef V8_UNBOUND_QUEUE_
#define V8_UNBOUND_QUEUE_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index 346f673fd..6e0ac1a35 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -1572,7 +1572,7 @@ int CanonicalizationRange::Convert(uchar c,
}
-uchar UnicodeData::kMaxCodePoint = 65533;
+const uchar UnicodeData::kMaxCodePoint = 65533;
int UnicodeData::GetByteCount() {
return kUppercaseTable0Size * sizeof(int32_t) // NOLINT
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 9d1d68323..39fc34968 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -97,7 +97,7 @@ class UnicodeData {
private:
friend class Test;
static int GetByteCount();
- static uchar kMaxCodePoint;
+ static const uchar kMaxCodePoint;
};
// --- U t f 8 ---
diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js
index e94b3fe56..72ca6f156 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/uri.js
@@ -166,7 +166,10 @@ function URIDecodeOctets(octets, result, index) {
// ECMA-262, section 15.1.3
function Encode(uri, unescape) {
var uriLength = uri.length;
- var result = new $Array(uriLength);
+ // We are going to pass result to %StringFromCharCodeArray
+ // which does not expect any getters/setters installed
+ // on the incoming array.
+ var result = new InternalArray(uriLength);
var index = 0;
for (var k = 0; k < uriLength; k++) {
var cc1 = uri.charCodeAt(k);
@@ -192,7 +195,10 @@ function Encode(uri, unescape) {
// ECMA-262, section 15.1.3
function Decode(uri, reserved) {
var uriLength = uri.length;
- var result = new $Array(uriLength);
+ // We are going to pass result to %StringFromCharCodeArray
+ // which does not expect any getters/setters installed
+ // on the incoming array.
+ var result = new InternalArray(uriLength);
var index = 0;
for (var k = 0; k < uriLength; k++) {
var ch = uri.charAt(k);
diff --git a/deps/v8/src/jump-target-inl.h b/deps/v8/src/utils-inl.h
index 4c9ee5bc4..76a3c104e 100644
--- a/deps/v8/src/jump-target-inl.h
+++ b/deps/v8/src/utils-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,24 +25,24 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_JUMP_TARGET_INL_H_
-#define V8_JUMP_TARGET_INL_H_
+#ifndef V8_UTILS_INL_H_
+#define V8_UTILS_INL_H_
-#include "virtual-frame-inl.h"
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-#include "jump-target-heavy-inl.h"
-#else
-#include "jump-target-light-inl.h"
-#endif
+#include "list-inl.h"
namespace v8 {
namespace internal {
-CodeGenerator* JumpTarget::cgen() {
- return CodeGeneratorScope::Current();
+template<typename T, int growth_factor, int max_growth>
+void Collector<T, growth_factor, max_growth>::Reset() {
+ for (int i = chunks_.length() - 1; i >= 0; i--) {
+ chunks_.at(i).Dispose();
+ }
+ chunks_.Rewind(0);
+ index_ = 0;
+ size_ = 0;
}
} } // namespace v8::internal
-#endif // V8_JUMP_TARGET_INL_H_
+#endif // V8_UTILS_INL_H_
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 219343b7f..331c01add 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -52,11 +52,9 @@ static inline bool IsPowerOf2(T x) {
// X must be a power of 2. Returns the number of trailing zeros.
-template <typename T>
-static inline int WhichPowerOf2(T x) {
+static inline int WhichPowerOf2(uint32_t x) {
ASSERT(IsPowerOf2(x));
ASSERT(x != 0);
- if (x < 0) return 31;
int bits = 0;
#ifdef DEBUG
int original_x = x;
@@ -222,6 +220,11 @@ class BitField {
return static_cast<uint32_t>(value) << shift;
}
+ // Returns a uint32_t with the bit field value updated.
+ static uint32_t update(uint32_t previous, T value) {
+ return (previous & ~mask()) | encode(value);
+ }
+
// Extracts the bit field from the value.
static T decode(uint32_t value) {
return static_cast<T>((value & mask()) >> shift);
@@ -251,6 +254,12 @@ static inline uint32_t ComputeIntegerHash(uint32_t key) {
}
+static inline uint32_t ComputePointerHash(void* ptr) {
+ return ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)));
+}
+
+
// ----------------------------------------------------------------------------
// Miscellaneous
@@ -576,14 +585,7 @@ class Collector {
}
// Resets the collector to be empty.
- virtual void Reset() {
- for (int i = chunks_.length() - 1; i >= 0; i--) {
- chunks_.at(i).Dispose();
- }
- chunks_.Rewind(0);
- index_ = 0;
- size_ = 0;
- }
+ virtual void Reset();
// Total number of elements added to collector so far.
inline int size() { return size_; }
@@ -784,10 +786,42 @@ struct BitCastHelper<Dest, Source*> {
};
template <class Dest, class Source>
+INLINE(Dest BitCast(const Source& source));
+
+template <class Dest, class Source>
inline Dest BitCast(const Source& source) {
return BitCastHelper<Dest, Source>::cast(source);
}
+
+template<typename ElementType, int NumElements>
+class EmbeddedContainer {
+ public:
+ EmbeddedContainer() : elems_() { }
+
+ int length() { return NumElements; }
+ ElementType& operator[](int i) {
+ ASSERT(i < length());
+ return elems_[i];
+ }
+
+ private:
+ ElementType elems_[NumElements];
+};
+
+
+template<typename ElementType>
+class EmbeddedContainer<ElementType, 0> {
+ public:
+ int length() { return 0; }
+ ElementType& operator[](int i) {
+ UNREACHABLE();
+ static ElementType t = 0;
+ return t;
+ }
+};
+
+
} } // namespace v8::internal
#endif // V8_UTILS_H_
diff --git a/deps/v8/src/v8-counters.cc b/deps/v8/src/v8-counters.cc
index de2ce6695..c6aa9cb7f 100644
--- a/deps/v8/src/v8-counters.cc
+++ b/deps/v8/src/v8-counters.cc
@@ -32,24 +32,31 @@
namespace v8 {
namespace internal {
+Counters::Counters() {
#define HT(name, caption) \
- HistogramTimer Counters::name = { #caption, NULL, false, 0, 0 }; \
-
- HISTOGRAM_TIMER_LIST(HT)
-#undef SR
+ HistogramTimer name = { #caption, NULL, false, 0, 0 }; \
+ name##_ = name;
+ HISTOGRAM_TIMER_LIST(HT)
+#undef HT
#define SC(name, caption) \
- StatsCounter Counters::name = { "c:" #caption, NULL, false };
+ StatsCounter name = { "c:" #caption, NULL, false };\
+ name##_ = name;
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
+ STATS_COUNTER_LIST_1(SC)
+ STATS_COUNTER_LIST_2(SC)
#undef SC
-StatsCounter Counters::state_counters[] = {
+ StatsCounter state_counters[] = {
#define COUNTER_NAME(name) \
- { "c:V8.State" #name, NULL, false },
- STATE_TAG_LIST(COUNTER_NAME)
+ { "c:V8.State" #name, NULL, false },
+ STATE_TAG_LIST(COUNTER_NAME)
#undef COUNTER_NAME
-};
+ };
+
+ for (int i = 0; i < kSlidingStateWindowCounterCount; ++i) {
+ state_counters_[i] = state_counters[i];
+ }
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 9b91acebc..17e67016c 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -126,14 +126,16 @@ namespace internal {
V8.GCCompactorCausedByWeakHandles) \
SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
- SC(map_slow_to_fast_elements, V8.MapSlowToFastElements) \
- SC(map_fast_to_slow_elements, V8.MapFastToSlowElements) \
- SC(map_to_pixel_array_elements, V8.MapToPixelArrayElements) \
+ SC(map_to_fast_elements, V8.MapToFastElements) \
+ SC(map_to_fast_double_elements, V8.MapToFastDoubleElements) \
+ SC(map_to_slow_elements, V8.MapToSlowElements) \
+ SC(map_to_external_array_elements, V8.MapToExternalArrayElements) \
/* How is the generic keyed-load stub used? */ \
SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
+ SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs) \
SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
/* How is the generic keyed-call stub used? */ \
SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast) \
@@ -179,6 +181,8 @@ namespace internal {
SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
+ SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs) \
+ SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow) \
SC(store_normal_miss, V8.StoreNormalMiss) \
SC(store_normal_hit, V8.StoreNormalHit) \
SC(cow_arrays_created_stub, V8.COWArraysCreatedStub) \
@@ -202,13 +206,8 @@ namespace internal {
SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \
- SC(memcopy_aligned, V8.MemCopyAligned) \
- SC(memcopy_unaligned, V8.MemCopyUnaligned) \
- SC(memcopy_noxmm, V8.MemCopyNoXMM) \
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
- SC(reloc_info_count, V8.RelocInfoCount) \
- SC(reloc_info_size, V8.RelocInfoSize) \
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
SC(compute_entry_frame, V8.ComputeEntryFrame) \
SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
@@ -254,15 +253,15 @@ namespace internal {
// This file contains all the v8 counters that are in use.
-class Counters : AllStatic {
+class Counters {
public:
#define HT(name, caption) \
- static HistogramTimer name;
+ HistogramTimer* name() { return &name##_; }
HISTOGRAM_TIMER_LIST(HT)
#undef HT
#define SC(name, caption) \
- static StatsCounter name;
+ StatsCounter* name() { return &name##_; }
STATS_COUNTER_LIST_1(SC)
STATS_COUNTER_LIST_2(SC)
#undef SC
@@ -272,17 +271,43 @@ class Counters : AllStatic {
HISTOGRAM_TIMER_LIST(RATE_ID)
#undef RATE_ID
#define COUNTER_ID(name, caption) k_##name,
- STATS_COUNTER_LIST_1(COUNTER_ID)
- STATS_COUNTER_LIST_2(COUNTER_ID)
+ STATS_COUNTER_LIST_1(COUNTER_ID)
+ STATS_COUNTER_LIST_2(COUNTER_ID)
#undef COUNTER_ID
#define COUNTER_ID(name) k_##name,
- STATE_TAG_LIST(COUNTER_ID)
+ STATE_TAG_LIST(COUNTER_ID)
#undef COUNTER_ID
stats_counter_count
};
+ StatsCounter* state_counters(StateTag state) {
+ return &state_counters_[state];
+ }
+
+ private:
+#define HT(name, caption) \
+ HistogramTimer name##_;
+ HISTOGRAM_TIMER_LIST(HT)
+#undef HT
+
+#define SC(name, caption) \
+ StatsCounter name##_;
+ STATS_COUNTER_LIST_1(SC)
+ STATS_COUNTER_LIST_2(SC)
+#undef SC
+
+ enum {
+#define COUNTER_ID(name) __##name,
+ STATE_TAG_LIST(COUNTER_ID)
+#undef COUNTER_ID
+ kSlidingStateWindowCounterCount
+ };
+
// Sliding state window counters.
- static StatsCounter state_counters[];
+ StatsCounter state_counters_[kSlidingStateWindowCounterCount];
+ friend class Isolate;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 945043da9..0b562fc28 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,6 +27,7 @@
#include "v8.h"
+#include "isolate.h"
#include "bootstrapper.h"
#include "debug.h"
#include "deoptimizer.h"
@@ -36,12 +37,13 @@
#include "log.h"
#include "runtime-profiler.h"
#include "serialize.h"
-#include "simulator.h"
-#include "stub-cache.h"
namespace v8 {
namespace internal {
+static Mutex* init_once_mutex = OS::CreateMutex();
+static bool init_once_called = false;
+
bool V8::is_running_ = false;
bool V8::has_been_setup_ = false;
bool V8::has_been_disposed_ = false;
@@ -50,102 +52,33 @@ bool V8::use_crankshaft_ = true;
bool V8::Initialize(Deserializer* des) {
- bool create_heap_objects = des == NULL;
- if (has_been_disposed_ || has_fatal_error_) return false;
- if (IsRunning()) return true;
+ InitializeOncePerProcess();
+
+ // The current thread may not yet had entered an isolate to run.
+ // Note the Isolate::Current() may be non-null because for various
+ // initialization purposes an initializing thread may be assigned an isolate
+ // but not actually enter it.
+ if (i::Isolate::CurrentPerIsolateThreadData() == NULL) {
+ i::Isolate::EnterDefaultIsolate();
+ }
-#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI)
- use_crankshaft_ = false;
-#else
- use_crankshaft_ = FLAG_crankshaft;
-#endif
+ ASSERT(i::Isolate::CurrentPerIsolateThreadData() != NULL);
+ ASSERT(i::Isolate::CurrentPerIsolateThreadData()->thread_id().Equals(
+ i::ThreadId::Current()));
+ ASSERT(i::Isolate::CurrentPerIsolateThreadData()->isolate() ==
+ i::Isolate::Current());
+
+ if (IsDead()) return false;
+
+ Isolate* isolate = Isolate::Current();
+ if (isolate->IsInitialized()) return true;
- // Peephole optimization might interfere with deoptimization.
- FLAG_peephole_optimization = !use_crankshaft_;
is_running_ = true;
has_been_setup_ = true;
has_fatal_error_ = false;
has_been_disposed_ = false;
-#ifdef DEBUG
- // The initialization process does not handle memory exhaustion.
- DisallowAllocationFailure disallow_allocation_failure;
-#endif
-
- // Enable logging before setting up the heap
- Logger::Setup();
-
- CpuProfiler::Setup();
- HeapProfiler::Setup();
-
- // Setup the platform OS support.
- OS::Setup();
-
- // Initialize other runtime facilities
-#if defined(USE_SIMULATOR)
-#if defined(V8_TARGET_ARCH_ARM)
- Simulator::Initialize();
-#elif defined(V8_TARGET_ARCH_MIPS)
- ::assembler::mips::Simulator::Initialize();
-#endif
-#endif
-
- { // NOLINT
- // Ensure that the thread has a valid stack guard. The v8::Locker object
- // will ensure this too, but we don't have to use lockers if we are only
- // using one thread.
- ExecutionAccess lock;
- StackGuard::InitThread(lock);
- }
-
- // Setup the object heap
- ASSERT(!Heap::HasBeenSetup());
- if (!Heap::Setup(create_heap_objects)) {
- SetFatalError();
- return false;
- }
-
- Bootstrapper::Initialize(create_heap_objects);
- Builtins::Setup(create_heap_objects);
- Top::Initialize();
-
- if (FLAG_preemption) {
- v8::Locker locker;
- v8::Locker::StartPreemption(100);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug::Setup(create_heap_objects);
-#endif
- StubCache::Initialize(create_heap_objects);
-
- // If we are deserializing, read the state into the now-empty heap.
- if (des != NULL) {
- des->Deserialize();
- StubCache::Clear();
- }
-
- // Deserializing may put strange things in the root array's copy of the
- // stack guard.
- Heap::SetStackLimits();
-
- // Setup the CPU support. Must be done after heap setup and after
- // any deserialization because we have to have the initial heap
- // objects in place for creating the code object used for probing.
- CPU::Setup();
-
- Deoptimizer::Setup();
- LAllocator::Setup();
- RuntimeProfiler::Setup();
-
- // If we are deserializing, log non-function code objects and compiled
- // functions found in the snapshot.
- if (des != NULL && FLAG_log_code) {
- HandleScope scope;
- LOG(LogCodeObjects());
- LOG(LogCompiledFunctions());
- }
- return true;
+ return isolate->Init(des);
}
@@ -156,31 +89,11 @@ void V8::SetFatalError() {
void V8::TearDown() {
- if (!has_been_setup_ || has_been_disposed_) return;
-
- if (FLAG_time_hydrogen) HStatistics::Instance()->Print();
-
- // We must stop the logger before we tear down other components.
- Logger::EnsureTickerStopped();
-
- Deoptimizer::TearDown();
-
- if (FLAG_preemption) {
- v8::Locker locker;
- v8::Locker::StopPreemption();
- }
-
- Builtins::TearDown();
- Bootstrapper::TearDown();
+ Isolate* isolate = Isolate::Current();
+ ASSERT(isolate->IsDefaultIsolate());
- Top::TearDown();
-
- HeapProfiler::TearDown();
- CpuProfiler::TearDown();
- RuntimeProfiler::TearDown();
-
- Logger::TearDown();
- Heap::TearDown();
+ if (!has_been_setup_ || has_been_disposed_) return;
+ isolate->TearDown();
is_running_ = false;
has_been_disposed_ = true;
@@ -218,7 +131,9 @@ static uint32_t random_base(random_state *state) {
// Used by JavaScript APIs
-uint32_t V8::Random() {
+uint32_t V8::Random(Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ // TODO(isolates): move lo and hi to isolate
static random_state state = {0, 0};
return random_base(&state);
}
@@ -227,7 +142,9 @@ uint32_t V8::Random() {
// Used internally by the JIT and memory allocator for security
// purposes. So, we keep a different state to prevent informations
// leaks that could be used in an exploit.
-uint32_t V8::RandomPrivate() {
+uint32_t V8::RandomPrivate(Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ // TODO(isolates): move lo and hi to isolate
static random_state state = {0, 0};
return random_base(&state);
}
@@ -239,7 +156,7 @@ bool V8::IdleNotification() {
if (!FLAG_use_idle_notification) return true;
// Tell the heap that it may want to adjust.
- return Heap::IdleNotification();
+ return HEAP->IdleNotification();
}
@@ -250,8 +167,8 @@ typedef union {
} double_int_union;
-Object* V8::FillHeapNumberWithRandom(Object* heap_number) {
- uint64_t random_bits = Random();
+Object* V8::FillHeapNumberWithRandom(Object* heap_number, Isolate* isolate) {
+ uint64_t random_bits = Random(isolate);
// Make a double* from address (heap_number + sizeof(double)).
double_int_union* r = reinterpret_cast<double_int_union*>(
reinterpret_cast<char*>(heap_number) +
@@ -267,4 +184,30 @@ Object* V8::FillHeapNumberWithRandom(Object* heap_number) {
return heap_number;
}
+
+void V8::InitializeOncePerProcess() {
+ ScopedLock lock(init_once_mutex);
+ if (init_once_called) return;
+ init_once_called = true;
+
+ // Setup the platform OS support.
+ OS::Setup();
+
+ use_crankshaft_ = FLAG_crankshaft;
+
+ if (Serializer::enabled()) {
+ use_crankshaft_ = false;
+ }
+
+ CPU::Setup();
+ if (!CPU::SupportsCrankshaft()) {
+ use_crankshaft_ = false;
+ }
+
+ RuntimeProfiler::GlobalSetup();
+
+ // Peephole optimization might interfere with deoptimization.
+ FLAG_peephole_optimization = !use_crankshaft_;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index cc1673e13..e74a60c2f 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,13 +35,10 @@
#if defined(GOOGLE3)
// Google3 special flag handling.
#if defined(DEBUG) && defined(NDEBUG)
-// If both are defined in Google3, then we are building an optimized v8 with
-// assertions enabled.
+// V8 only uses DEBUG and whenever it is set we are building a debug
+// version of V8. We do not use NDEBUG and simply undef it here for
+// consistency.
#undef NDEBUG
-#elif !defined(DEBUG) && !defined(NDEBUG)
-// If neither is defined in Google3, then we are building a debug v8. Mark it
-// as such.
-#define DEBUG
#endif
#endif // defined(GOOGLE3)
@@ -66,6 +63,7 @@
#include "log-inl.h"
#include "cpu-profiler-inl.h"
#include "handles-inl.h"
+#include "isolate-inl.h"
namespace v8 {
namespace internal {
@@ -84,8 +82,8 @@ class V8 : public AllStatic {
static void TearDown();
static bool IsRunning() { return is_running_; }
static bool UseCrankshaft() { return use_crankshaft_; }
- static void DisableCrankshaft() { use_crankshaft_ = false; }
// To be dead you have to have lived
+ // TODO(isolates): move IsDead to Isolate.
static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
static void SetFatalError();
@@ -94,18 +92,21 @@ class V8 : public AllStatic {
bool take_snapshot = false);
// Random number generation support. Not cryptographically safe.
- static uint32_t Random();
+ static uint32_t Random(Isolate* isolate);
// We use random numbers internally in memory allocation and in the
// compilers for security. In order to prevent information leaks we
// use a separate random state for internal random number
// generation.
- static uint32_t RandomPrivate();
- static Object* FillHeapNumberWithRandom(Object* heap_number);
+ static uint32_t RandomPrivate(Isolate* isolate);
+ static Object* FillHeapNumberWithRandom(Object* heap_number,
+ Isolate* isolate);
// Idle notification directly from the API.
static bool IdleNotification();
private:
+ static void InitializeOncePerProcess();
+
// True if engine is currently running
static bool is_running_;
// True if V8 has ever been run
diff --git a/deps/v8/src/v8dll-main.cc b/deps/v8/src/v8dll-main.cc
index 3d4b3a379..49d868957 100644
--- a/deps/v8/src/v8dll-main.cc
+++ b/deps/v8/src/v8dll-main.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,10 +25,14 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <windows.h>
-
+// The GYP based build ends up defining USING_V8_SHARED when compiling this
+// file.
+#undef USING_V8_SHARED
#include "../include/v8.h"
+#ifdef WIN32
+#include <windows.h> // NOLINT
+
extern "C" {
BOOL WINAPI DllMain(HANDLE hinstDLL,
DWORD dwReason,
@@ -37,3 +41,4 @@ BOOL WINAPI DllMain(HANDLE hinstDLL,
return TRUE;
}
}
+#endif
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index d11bc3833..a23ca194a 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -154,7 +154,7 @@ class Object;
class MaybeObject;
class OldSpace;
class Property;
-class Proxy;
+class Foreign;
class RegExpNode;
struct RegExpCompileData;
class RegExpTree;
@@ -185,6 +185,8 @@ class Mutex;
typedef bool (*WeakSlotCallback)(Object** pointer);
+typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
+
// -----------------------------------------------------------------------------
// Miscellaneous
@@ -218,7 +220,12 @@ enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
+enum VisitMode {
+ VISIT_ALL,
+ VISIT_ALL_IN_SCAVENGE,
+ VISIT_ALL_IN_SWEEP_NEWSPACE,
+ VISIT_ONLY_STRONG
+};
// Flag indicating whether code is built into the VM (one of the natives files).
enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
@@ -303,7 +310,9 @@ enum InLoopFlag {
enum CallFunctionFlags {
NO_CALL_FUNCTION_FLAGS = 0,
- RECEIVER_MIGHT_BE_VALUE = 1 << 0 // Receiver might not be a JSObject.
+ // Receiver might implicitly be the global objects. If it is, the
+ // hole is passed to the call function stub.
+ RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0
};
@@ -318,14 +327,16 @@ enum InlineCacheHolderFlag {
// Must fit in the BitField PropertyDetails::TypeField.
// A copy of this is in mirror-debugger.js.
enum PropertyType {
- NORMAL = 0, // only in slow mode
- FIELD = 1, // only in fast mode
- CONSTANT_FUNCTION = 2, // only in fast mode
- CALLBACKS = 3,
- INTERCEPTOR = 4, // only in lookup results, not in descriptors.
- MAP_TRANSITION = 5, // only in fast mode
- CONSTANT_TRANSITION = 6, // only in fast mode
- NULL_DESCRIPTOR = 7, // only in fast mode
+ NORMAL = 0, // only in slow mode
+ FIELD = 1, // only in fast mode
+ CONSTANT_FUNCTION = 2, // only in fast mode
+ CALLBACKS = 3,
+ HANDLER = 4, // only in lookup results, not in descriptors
+ INTERCEPTOR = 5, // only in lookup results, not in descriptors
+ MAP_TRANSITION = 6, // only in fast mode
+ EXTERNAL_ARRAY_TRANSITION = 7,
+ CONSTANT_TRANSITION = 8, // only in fast mode
+ NULL_DESCRIPTOR = 9, // only in fast mode
// All properties before MAP_TRANSITION are real.
FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
// There are no IC stubs for NULL_DESCRIPTORS. Therefore,
@@ -443,11 +454,11 @@ enum StateTag {
#define TRACK_MEMORY(name) \
void* operator new(size_t size) { \
void* result = ::operator new(size); \
- Logger::NewEvent(name, result, size); \
+ Logger::NewEventStatic(name, result, size); \
return result; \
} \
void operator delete(void* object) { \
- Logger::DeleteEvent(name, object); \
+ Logger::DeleteEventStatic(name, object); \
::operator delete(object); \
}
#else
@@ -467,12 +478,33 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
CPUID = 10, // x86
VFP3 = 1, // ARM
ARMv7 = 2, // ARM
- SAHF = 0}; // x86
+ SAHF = 0, // x86
+ FPU = 1}; // MIPS
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
enum StrictModeFlag {
kNonStrictMode,
- kStrictMode
+ kStrictMode,
+ // This value is never used, but is needed to prevent GCC 4.5 from failing
+ // to compile when we assert that a flag is either kNonStrictMode or
+ // kStrictMode.
+ kInvalidStrictFlag
+};
+
+
+// Used to specify if a macro instruction must perform a smi check on tagged
+// values.
+enum SmiCheckType {
+ DONT_DO_SMI_CHECK = 0,
+ DO_SMI_CHECK
+};
+
+
+// Used to specify whether a receiver is implicitly or explicitly
+// provided to a call.
+enum CallKind {
+ CALL_AS_METHOD = 0,
+ CALL_AS_FUNCTION
};
} } // namespace v8::internal
diff --git a/deps/v8/src/memory.h b/deps/v8/src/v8memory.h
index 901e78d29..901e78d29 100644
--- a/deps/v8/src/memory.h
+++ b/deps/v8/src/v8memory.h
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 823f8ee57..0afe231c8 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -56,6 +56,7 @@ function InstallFunctions(object, attributes, functions) {
%FunctionSetName(f, key);
%FunctionRemovePrototype(f);
%SetProperty(object, key, f, attributes);
+ %SetNativeFlag(f);
}
%ToFastProperties(object);
}
@@ -105,7 +106,7 @@ function GlobalParseInt(string, radix) {
// Truncate number.
return string | 0;
}
- if (IS_UNDEFINED(radix)) radix = 0;
+ radix = radix | 0;
} else {
radix = TO_INT32(radix);
if (!(radix == 0 || (2 <= radix && radix <= 36)))
@@ -131,10 +132,19 @@ function GlobalParseFloat(string) {
function GlobalEval(x) {
if (!IS_STRING(x)) return x;
+ var receiver = this;
var global_receiver = %GlobalReceiver(global);
- var this_is_global_receiver = (this === global_receiver);
+
+ if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ receiver = global_receiver;
+ }
+
+ var this_is_global_receiver = (receiver === global_receiver);
var global_is_detached = (global === global_receiver);
+ // For consistency with JSC we require the global object passed to
+ // eval to be the global object from which 'eval' originated. This
+ // is not mandated by the spec.
if (!this_is_global_receiver || global_is_detached) {
throw new $EvalError('The "this" object passed to eval must ' +
'be the global object from which eval originated');
@@ -143,7 +153,7 @@ function GlobalEval(x) {
var f = %CompileString(x);
if (!IS_FUNCTION(f)) return f;
- return f.call(this);
+ return %_CallFunction(receiver, f);
}
@@ -196,12 +206,20 @@ $Object.prototype.constructor = $Object;
// ECMA-262 - 15.2.4.2
function ObjectToString() {
+ if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ return '[object Undefined]';
+ }
+ if (IS_NULL(this)) return '[object Null]';
return "[object " + %_ClassOf(ToObject(this)) + "]";
}
// ECMA-262 - 15.2.4.3
function ObjectToLocaleString() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Object.prototype.toLocaleString"]);
+ }
return this.toString();
}
@@ -214,12 +232,16 @@ function ObjectValueOf() {
// ECMA-262 - 15.2.4.5
function ObjectHasOwnProperty(V) {
- return %HasLocalProperty(ToObject(this), ToString(V));
+ return %HasLocalProperty(TO_OBJECT_INLINE(this), TO_STRING_INLINE(V));
}
// ECMA-262 - 15.2.4.6
function ObjectIsPrototypeOf(V) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Object.prototype.isPrototypeOf"]);
+ }
if (!IS_SPEC_OBJECT(V)) return false;
return %IsInPrototypeChain(this, V);
}
@@ -233,41 +255,53 @@ function ObjectPropertyIsEnumerable(V) {
// Extensions for providing property getters and setters.
function ObjectDefineGetter(name, fun) {
- if (this == null && !IS_UNDETECTABLE(this)) {
- throw new $TypeError('Object.prototype.__defineGetter__: this is Null');
+ var receiver = this;
+ if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ receiver = %GlobalReceiver(global);
}
if (!IS_FUNCTION(fun)) {
throw new $TypeError('Object.prototype.__defineGetter__: Expecting function');
}
- return %DefineAccessor(ToObject(this), ToString(name), GETTER, fun);
+ var desc = new PropertyDescriptor();
+ desc.setGet(fun);
+ desc.setEnumerable(true);
+ desc.setConfigurable(true);
+ DefineOwnProperty(ToObject(receiver), ToString(name), desc, false);
}
function ObjectLookupGetter(name) {
- if (this == null && !IS_UNDETECTABLE(this)) {
- throw new $TypeError('Object.prototype.__lookupGetter__: this is Null');
+ var receiver = this;
+ if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ receiver = %GlobalReceiver(global);
}
- return %LookupAccessor(ToObject(this), ToString(name), GETTER);
+ return %LookupAccessor(ToObject(receiver), ToString(name), GETTER);
}
function ObjectDefineSetter(name, fun) {
- if (this == null && !IS_UNDETECTABLE(this)) {
- throw new $TypeError('Object.prototype.__defineSetter__: this is Null');
+ var receiver = this;
+ if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ receiver = %GlobalReceiver(global);
}
if (!IS_FUNCTION(fun)) {
throw new $TypeError(
'Object.prototype.__defineSetter__: Expecting function');
}
- return %DefineAccessor(ToObject(this), ToString(name), SETTER, fun);
+ var desc = new PropertyDescriptor();
+ desc.setSet(fun);
+ desc.setEnumerable(true);
+ desc.setConfigurable(true);
+ DefineOwnProperty(ToObject(receiver), ToString(name), desc, false);
}
function ObjectLookupSetter(name) {
- if (this == null && !IS_UNDETECTABLE(this)) {
- throw new $TypeError('Object.prototype.__lookupSetter__: this is Null');
+ var receiver = this;
+ if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ receiver = %GlobalReceiver(global);
}
- return %LookupAccessor(ToObject(this), ToString(name), SETTER);
+ return %LookupAccessor(ToObject(receiver), ToString(name), SETTER);
}
@@ -302,6 +336,7 @@ function IsInconsistentDescriptor(desc) {
return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
}
+
// ES5 8.10.4
function FromPropertyDescriptor(desc) {
if (IS_UNDEFINED(desc)) return desc;
@@ -365,6 +400,23 @@ function ToPropertyDescriptor(obj) {
}
+// For Harmony proxies.
+function ToCompletePropertyDescriptor(obj) {
+ var desc = ToPropertyDescriptor(obj)
+ if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
+ if (!("value" in desc)) desc.value = void 0;
+ if (!("writable" in desc)) desc.writable = false;
+ } else {
+ // Is accessor descriptor.
+ if (!("get" in desc)) desc.get = void 0;
+ if (!("set" in desc)) desc.set = void 0;
+ }
+ if (!("enumerable" in desc)) desc.enumerable = false;
+ if (!("configurable" in desc)) desc.configurable = false;
+ return desc;
+}
+
+
function PropertyDescriptor() {
// Initialize here so they are all in-object and have the same map.
// Default values from ES5 8.6.1.
@@ -382,6 +434,10 @@ function PropertyDescriptor() {
this.hasSetter_ = false;
}
+PropertyDescriptor.prototype.__proto__ = null;
+PropertyDescriptor.prototype.toString = function() {
+ return "[object PropertyDescriptor]";
+};
PropertyDescriptor.prototype.setValue = function(value) {
this.value_ = value;
@@ -483,7 +539,7 @@ PropertyDescriptor.prototype.hasSetter = function() {
// property descriptor. For a description of the array layout please
// see the runtime.cc file.
function ConvertDescriptorArrayToDescriptor(desc_array) {
- if (desc_array == false) {
+ if (desc_array === false) {
throw 'Internal error: invalid desc_array';
}
@@ -509,9 +565,25 @@ function ConvertDescriptorArrayToDescriptor(desc_array) {
// ES5 section 8.12.2.
function GetProperty(obj, p) {
+ if (%IsJSProxy(obj)) {
+ var handler = %GetHandler(obj);
+ var getProperty = handler.getPropertyDescriptor;
+ if (IS_UNDEFINED(getProperty)) {
+ throw MakeTypeError("handler_trap_missing",
+ [handler, "getPropertyDescriptor"]);
+ }
+ var descriptor = getProperty.call(handler, p);
+ if (IS_UNDEFINED(descriptor)) return descriptor;
+ var desc = ToCompletePropertyDescriptor(descriptor);
+ if (!desc.configurable) {
+ throw MakeTypeError("proxy_prop_not_configurable",
+ [handler, "getPropertyDescriptor", p, descriptor]);
+ }
+ return desc;
+ }
var prop = GetOwnProperty(obj);
if (!IS_UNDEFINED(prop)) return prop;
- var proto = obj.__proto__;
+ var proto = %GetPrototype(obj);
if (IS_NULL(proto)) return void 0;
return GetProperty(proto, p);
}
@@ -519,6 +591,12 @@ function GetProperty(obj, p) {
// ES5 section 8.12.6
function HasProperty(obj, p) {
+ if (%IsJSProxy(obj)) {
+ var handler = %GetHandler(obj);
+ var has = handler.has;
+ if (IS_UNDEFINED(has)) has = DerivedHasTrap;
+ return ToBoolean(has.call(handler, obj, p));
+ }
var desc = GetProperty(obj, p);
return IS_UNDEFINED(desc) ? false : true;
}
@@ -532,7 +610,7 @@ function GetOwnProperty(obj, p) {
var props = %GetOwnProperty(ToObject(obj), ToString(p));
// A false value here means that access checks failed.
- if (props == false) return void 0;
+ if (props === false) return void 0;
return ConvertDescriptorArrayToDescriptor(props);
}
@@ -542,15 +620,20 @@ function GetOwnProperty(obj, p) {
function DefineOwnProperty(obj, p, desc, should_throw) {
var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
// A false value here means that access checks failed.
- if (current_or_access == false) return void 0;
+ if (current_or_access === false) return void 0;
var current = ConvertDescriptorArrayToDescriptor(current_or_access);
var extensible = %IsExtensible(ToObject(obj));
// Error handling according to spec.
// Step 3
- if (IS_UNDEFINED(current) && !extensible)
- throw MakeTypeError("define_disallowed", ["defineProperty"]);
+ if (IS_UNDEFINED(current) && !extensible) {
+ if (should_throw) {
+ throw MakeTypeError("define_disallowed", [p]);
+ } else {
+ return;
+ }
+ }
if (!IS_UNDEFINED(current)) {
// Step 5 and 6
@@ -575,31 +658,55 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
if (desc.isConfigurable() ||
(desc.hasEnumerable() &&
desc.isEnumerable() != current.isEnumerable())) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ if (should_throw) {
+ throw MakeTypeError("redefine_disallowed", [p]);
+ } else {
+ return;
+ }
}
// Step 8
if (!IsGenericDescriptor(desc)) {
// Step 9a
if (IsDataDescriptor(current) != IsDataDescriptor(desc)) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ if (should_throw) {
+ throw MakeTypeError("redefine_disallowed", [p]);
+ } else {
+ return;
+ }
}
// Step 10a
if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
if (!current.isWritable() && desc.isWritable()) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ if (should_throw) {
+ throw MakeTypeError("redefine_disallowed", [p]);
+ } else {
+ return;
+ }
}
if (!current.isWritable() && desc.hasValue() &&
!SameValue(desc.getValue(), current.getValue())) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ if (should_throw) {
+ throw MakeTypeError("redefine_disallowed", [p]);
+ } else {
+ return;
+ }
}
}
// Step 11
if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ if (should_throw) {
+ throw MakeTypeError("redefine_disallowed", [p]);
+ } else {
+ return;
+ }
}
if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ if (should_throw) {
+ throw MakeTypeError("redefine_disallowed", [p]);
+ } else {
+ return;
+ }
}
}
}
@@ -678,7 +785,7 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
function ObjectGetPrototypeOf(obj) {
if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
- return obj.__proto__;
+ return %GetPrototype(obj);
}
@@ -691,11 +798,43 @@ function ObjectGetOwnPropertyDescriptor(obj, p) {
}
+// For Harmony proxies
+function ToStringArray(obj, trap) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("proxy_non_object_prop_names", [obj, trap]);
+ }
+ var n = ToUint32(obj.length);
+ var array = new $Array(n);
+ var names = {}
+ for (var index = 0; index < n; index++) {
+ var s = ToString(obj[index]);
+ if (s in names) {
+ throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s])
+ }
+ array[index] = s;
+ names.s = 0;
+ }
+ return array;
+}
+
+
// ES5 section 15.2.3.4.
function ObjectGetOwnPropertyNames(obj) {
if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
+ // Special handling for proxies.
+ if (%IsJSProxy(obj)) {
+ var handler = %GetHandler(obj);
+ var getOwnPropertyNames = handler.getOwnPropertyNames;
+ if (IS_UNDEFINED(getOwnPropertyNames)) {
+ throw MakeTypeError("handler_trap_missing",
+ [handler, "getOwnPropertyNames"]);
+ }
+ var names = getOwnPropertyNames.call(handler);
+ return ToStringArray(names, "getOwnPropertyNames");
+ }
+
// Find all the indexed properties.
// Get the local element names.
@@ -799,8 +938,10 @@ function ObjectSeal(obj) {
for (var i = 0; i < names.length; i++) {
var name = names[i];
var desc = GetOwnProperty(obj, name);
- if (desc.isConfigurable()) desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
+ if (desc.isConfigurable()) {
+ desc.setConfigurable(false);
+ DefineOwnProperty(obj, name, desc, true);
+ }
}
return ObjectPreventExtension(obj);
}
@@ -815,9 +956,11 @@ function ObjectFreeze(obj) {
for (var i = 0; i < names.length; i++) {
var name = names[i];
var desc = GetOwnProperty(obj, name);
- if (IsDataDescriptor(desc)) desc.setWritable(false);
- if (desc.isConfigurable()) desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
+ if (desc.isWritable() || desc.isConfigurable()) {
+ if (IsDataDescriptor(desc)) desc.setWritable(false);
+ desc.setConfigurable(false);
+ DefineOwnProperty(obj, name, desc, true);
+ }
}
return ObjectPreventExtension(obj);
}
@@ -873,7 +1016,7 @@ function ObjectIsFrozen(obj) {
// ES5 section 15.2.3.13
function ObjectIsExtensible(obj) {
if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
+ throw MakeTypeError("obj_ctor_property_non_object", ["isExtensible"]);
}
return %IsExtensible(obj);
}
@@ -1009,6 +1152,10 @@ function NumberToString(radix) {
// ECMA-262 section 15.7.4.3
function NumberToLocaleString() {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Number.prototype.toLocaleString"]);
+ }
return this.toString();
}
@@ -1029,6 +1176,10 @@ function NumberToFixed(fractionDigits) {
if (f < 0 || f > 20) {
throw new $RangeError("toFixed() digits argument must be between 0 and 20");
}
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Number.prototype.toFixed"]);
+ }
var x = ToNumber(this);
return %NumberToFixed(x, f);
}
@@ -1043,6 +1194,10 @@ function NumberToExponential(fractionDigits) {
throw new $RangeError("toExponential() argument must be between 0 and 20");
}
}
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Number.prototype.toExponential"]);
+ }
var x = ToNumber(this);
return %NumberToExponential(x, f);
}
@@ -1050,6 +1205,10 @@ function NumberToExponential(fractionDigits) {
// ECMA-262 section 15.7.4.7
function NumberToPrecision(precision) {
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Number.prototype.toPrecision"]);
+ }
if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this));
var p = TO_INTEGER(precision);
if (p < 1 || p > 21) {
@@ -1158,7 +1317,7 @@ function FunctionBind(this_arg) { // Length is 1.
return fn.apply(this_arg, arguments);
};
} else {
- var bound_args = new $Array(argc_bound);
+ var bound_args = new InternalArray(argc_bound);
for(var i = 0; i < argc_bound; i++) {
bound_args[i] = %_Arguments(i+1);
}
@@ -1176,7 +1335,7 @@ function FunctionBind(this_arg) { // Length is 1.
// Combine the args we got from the bind call with the args
// given as argument to the invocation.
var argc = %_ArgumentsLength();
- var args = new $Array(argc + argc_bound);
+ var args = new InternalArray(argc + argc_bound);
// Add bound arguments.
for (var i = 0; i < argc_bound; i++) {
args[i] = bound_args[i];
@@ -1199,7 +1358,8 @@ function FunctionBind(this_arg) { // Length is 1.
// Set the correct length.
var length = (this.length - argc_bound) > 0 ? this.length - argc_bound : 0;
%FunctionSetLength(result, length);
-
+ %FunctionRemovePrototype(result);
+ %FunctionSetBound(result);
return result;
}
@@ -1208,7 +1368,7 @@ function NewFunction(arg1) { // length == 1
var n = %_ArgumentsLength();
var p = '';
if (n > 1) {
- p = new $Array(n - 1);
+ p = new InternalArray(n - 1);
for (var i = 0; i < n - 1; i++) p[i] = %_Arguments(i);
p = Join(p, n - 1, ',', NonStringToString);
// If the formal parameters string include ) - an illegal
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 8a5fe6902..978e2ddf3 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -36,11 +36,6 @@
namespace v8 {
-static internal::Thread::LocalStorageKey thread_state_key =
- internal::Thread::CreateThreadLocalKey();
-static internal::Thread::LocalStorageKey thread_id_key =
- internal::Thread::CreateThreadLocalKey();
-
// Track whether this V8 instance has ever called v8::Locker. This allows the
// API code to verify that the lock is always held when V8 is being entered.
@@ -48,66 +43,94 @@ bool Locker::active_ = false;
// Constructor for the Locker object. Once the Locker is constructed the
-// current thread will be guaranteed to have the big V8 lock.
-Locker::Locker() : has_lock_(false), top_level_(true) {
+// current thread will be guaranteed to have the lock for a given isolate.
+Locker::Locker(v8::Isolate* isolate)
+ : has_lock_(false),
+ top_level_(false),
+ isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
+ if (isolate_ == NULL) {
+ isolate_ = i::Isolate::GetDefaultIsolateForLocking();
+ }
// Record that the Locker has been used at least once.
active_ = true;
// Get the big lock if necessary.
- if (!internal::ThreadManager::IsLockedByCurrentThread()) {
- internal::ThreadManager::Lock();
+ if (!isolate_->thread_manager()->IsLockedByCurrentThread()) {
+ isolate_->thread_manager()->Lock();
has_lock_ = true;
+
// Make sure that V8 is initialized. Archiving of threads interferes
// with deserialization by adding additional root pointers, so we must
// initialize here, before anyone can call ~Locker() or Unlocker().
- if (!internal::V8::IsRunning()) {
+ if (!isolate_->IsInitialized()) {
+ isolate_->Enter();
V8::Initialize();
+ isolate_->Exit();
}
+
// This may be a locker within an unlocker in which case we have to
// get the saved state for this thread and restore it.
- if (internal::ThreadManager::RestoreThread()) {
+ if (isolate_->thread_manager()->RestoreThread()) {
top_level_ = false;
} else {
- internal::ExecutionAccess access;
- internal::StackGuard::ClearThread(access);
- internal::StackGuard::InitThread(access);
+ internal::ExecutionAccess access(isolate_);
+ isolate_->stack_guard()->ClearThread(access);
+ isolate_->stack_guard()->InitThread(access);
+ }
+ if (isolate_->IsDefaultIsolate()) {
+ // This only enters if not yet entered.
+ internal::Isolate::EnterDefaultIsolate();
}
}
- ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
-
- // Make sure this thread is assigned a thread id.
- internal::ThreadManager::AssignId();
+ ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
}
-bool Locker::IsLocked() {
- return internal::ThreadManager::IsLockedByCurrentThread();
+bool Locker::IsLocked(v8::Isolate* isolate) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ if (internal_isolate == NULL) {
+ internal_isolate = i::Isolate::GetDefaultIsolateForLocking();
+ }
+ return internal_isolate->thread_manager()->IsLockedByCurrentThread();
}
Locker::~Locker() {
- ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
+ ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
if (has_lock_) {
+ if (isolate_->IsDefaultIsolate()) {
+ isolate_->Exit();
+ }
if (top_level_) {
- internal::ThreadManager::FreeThreadResources();
+ isolate_->thread_manager()->FreeThreadResources();
} else {
- internal::ThreadManager::ArchiveThread();
+ isolate_->thread_manager()->ArchiveThread();
}
- internal::ThreadManager::Unlock();
+ isolate_->thread_manager()->Unlock();
}
}
-Unlocker::Unlocker() {
- ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
- internal::ThreadManager::ArchiveThread();
- internal::ThreadManager::Unlock();
+Unlocker::Unlocker(v8::Isolate* isolate)
+ : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
+ if (isolate_ == NULL) {
+ isolate_ = i::Isolate::GetDefaultIsolateForLocking();
+ }
+ ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
+ if (isolate_->IsDefaultIsolate()) {
+ isolate_->Exit();
+ }
+ isolate_->thread_manager()->ArchiveThread();
+ isolate_->thread_manager()->Unlock();
}
Unlocker::~Unlocker() {
- ASSERT(!internal::ThreadManager::IsLockedByCurrentThread());
- internal::ThreadManager::Lock();
- internal::ThreadManager::RestoreThread();
+ ASSERT(!isolate_->thread_manager()->IsLockedByCurrentThread());
+ isolate_->thread_manager()->Lock();
+ isolate_->thread_manager()->RestoreThread();
+ if (isolate_->IsDefaultIsolate()) {
+ isolate_->Enter();
+ }
}
@@ -125,52 +148,56 @@ namespace internal {
bool ThreadManager::RestoreThread() {
+ ASSERT(IsLockedByCurrentThread());
// First check whether the current thread has been 'lazily archived', ie
// not archived at all. If that is the case we put the state storage we
// had prepared back in the free list, since we didn't need it after all.
- if (lazily_archived_thread_.IsSelf()) {
- lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
- ASSERT(Thread::GetThreadLocal(thread_state_key) ==
- lazily_archived_thread_state_);
- lazily_archived_thread_state_->set_id(kInvalidId);
+ if (lazily_archived_thread_.Equals(ThreadId::Current())) {
+ lazily_archived_thread_ = ThreadId::Invalid();
+ Isolate::PerIsolateThreadData* per_thread =
+ isolate_->FindPerThreadDataForThisThread();
+ ASSERT(per_thread != NULL);
+ ASSERT(per_thread->thread_state() == lazily_archived_thread_state_);
+ lazily_archived_thread_state_->set_id(ThreadId::Invalid());
lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
lazily_archived_thread_state_ = NULL;
- Thread::SetThreadLocal(thread_state_key, NULL);
+ per_thread->set_thread_state(NULL);
return true;
}
// Make sure that the preemption thread cannot modify the thread state while
// it is being archived or restored.
- ExecutionAccess access;
+ ExecutionAccess access(isolate_);
// If there is another thread that was lazily archived then we have to really
// archive it now.
if (lazily_archived_thread_.IsValid()) {
EagerlyArchiveThread();
}
- ThreadState* state =
- reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
- if (state == NULL) {
+ Isolate::PerIsolateThreadData* per_thread =
+ isolate_->FindPerThreadDataForThisThread();
+ if (per_thread == NULL || per_thread->thread_state() == NULL) {
// This is a new thread.
- StackGuard::InitThread(access);
+ isolate_->stack_guard()->InitThread(access);
return false;
}
+ ThreadState* state = per_thread->thread_state();
char* from = state->data();
- from = HandleScopeImplementer::RestoreThread(from);
- from = Top::RestoreThread(from);
- from = Relocatable::RestoreState(from);
+ from = isolate_->handle_scope_implementer()->RestoreThread(from);
+ from = isolate_->RestoreThread(from);
+ from = Relocatable::RestoreState(isolate_, from);
#ifdef ENABLE_DEBUGGER_SUPPORT
- from = Debug::RestoreDebug(from);
+ from = isolate_->debug()->RestoreDebug(from);
#endif
- from = StackGuard::RestoreStackGuard(from);
- from = RegExpStack::RestoreStack(from);
- from = Bootstrapper::RestoreState(from);
- Thread::SetThreadLocal(thread_state_key, NULL);
+ from = isolate_->stack_guard()->RestoreStackGuard(from);
+ from = isolate_->regexp_stack()->RestoreStack(from);
+ from = isolate_->bootstrapper()->RestoreState(from);
+ per_thread->set_thread_state(NULL);
if (state->terminate_on_restore()) {
- StackGuard::TerminateExecution();
+ isolate_->stack_guard()->TerminateExecution();
state->set_terminate_on_restore(false);
}
- state->set_id(kInvalidId);
+ state->set_id(ThreadId::Invalid());
state->Unlink();
state->LinkInto(ThreadState::FREE_LIST);
return true;
@@ -179,20 +206,20 @@ bool ThreadManager::RestoreThread() {
void ThreadManager::Lock() {
mutex_->Lock();
- mutex_owner_.Initialize(ThreadHandle::SELF);
+ mutex_owner_ = ThreadId::Current();
ASSERT(IsLockedByCurrentThread());
}
void ThreadManager::Unlock() {
- mutex_owner_.Initialize(ThreadHandle::INVALID);
+ mutex_owner_ = ThreadId::Invalid();
mutex_->Unlock();
}
static int ArchiveSpacePerThread() {
return HandleScopeImplementer::ArchiveSpacePerThread() +
- Top::ArchiveSpacePerThread() +
+ Isolate::ArchiveSpacePerThread() +
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug::ArchiveSpacePerThread() +
#endif
@@ -203,13 +230,12 @@ static int ArchiveSpacePerThread() {
}
-ThreadState* ThreadState::free_anchor_ = new ThreadState();
-ThreadState* ThreadState::in_use_anchor_ = new ThreadState();
-
-
-ThreadState::ThreadState() : id_(ThreadManager::kInvalidId),
- terminate_on_restore_(false),
- next_(this), previous_(this) {
+ThreadState::ThreadState(ThreadManager* thread_manager)
+ : id_(ThreadId::Invalid()),
+ terminate_on_restore_(false),
+ next_(this),
+ previous_(this),
+ thread_manager_(thread_manager) {
}
@@ -226,7 +252,8 @@ void ThreadState::Unlink() {
void ThreadState::LinkInto(List list) {
ThreadState* flying_anchor =
- list == FREE_LIST ? free_anchor_ : in_use_anchor_;
+ list == FREE_LIST ? thread_manager_->free_anchor_
+ : thread_manager_->in_use_anchor_;
next_ = flying_anchor->next_;
previous_ = flying_anchor;
flying_anchor->next_ = this;
@@ -234,10 +261,10 @@ void ThreadState::LinkInto(List list) {
}
-ThreadState* ThreadState::GetFree() {
+ThreadState* ThreadManager::GetFreeThreadState() {
ThreadState* gotten = free_anchor_->next_;
if (gotten == free_anchor_) {
- ThreadState* new_thread_state = new ThreadState();
+ ThreadState* new_thread_state = new ThreadState(this);
new_thread_state->AllocateSpace();
return new_thread_state;
}
@@ -246,13 +273,13 @@ ThreadState* ThreadState::GetFree() {
// Gets the first in the list of archived threads.
-ThreadState* ThreadState::FirstInUse() {
+ThreadState* ThreadManager::FirstThreadStateInUse() {
return in_use_anchor_->Next();
}
ThreadState* ThreadState::Next() {
- if (next_ == in_use_anchor_) return NULL;
+ if (next_ == thread_manager_->in_use_anchor_) return NULL;
return next_;
}
@@ -260,144 +287,140 @@ ThreadState* ThreadState::Next() {
// Thread ids must start with 1, because in TLS having thread id 0 can't
// be distinguished from not having a thread id at all (since NULL is
// defined as 0.)
-int ThreadManager::last_id_ = 0;
-Mutex* ThreadManager::mutex_ = OS::CreateMutex();
-ThreadHandle ThreadManager::mutex_owner_(ThreadHandle::INVALID);
-ThreadHandle ThreadManager::lazily_archived_thread_(ThreadHandle::INVALID);
-ThreadState* ThreadManager::lazily_archived_thread_state_ = NULL;
+ThreadManager::ThreadManager()
+ : mutex_(OS::CreateMutex()),
+ mutex_owner_(ThreadId::Invalid()),
+ lazily_archived_thread_(ThreadId::Invalid()),
+ lazily_archived_thread_state_(NULL),
+ free_anchor_(NULL),
+ in_use_anchor_(NULL) {
+ free_anchor_ = new ThreadState(this);
+ in_use_anchor_ = new ThreadState(this);
+}
+
+
+ThreadManager::~ThreadManager() {
+ // TODO(isolates): Destroy mutexes.
+}
void ThreadManager::ArchiveThread() {
- ASSERT(!lazily_archived_thread_.IsValid());
+ ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid()));
ASSERT(!IsArchived());
- ThreadState* state = ThreadState::GetFree();
+ ASSERT(IsLockedByCurrentThread());
+ ThreadState* state = GetFreeThreadState();
state->Unlink();
- Thread::SetThreadLocal(thread_state_key, reinterpret_cast<void*>(state));
- lazily_archived_thread_.Initialize(ThreadHandle::SELF);
+ Isolate::PerIsolateThreadData* per_thread =
+ isolate_->FindOrAllocatePerThreadDataForThisThread();
+ per_thread->set_thread_state(state);
+ lazily_archived_thread_ = ThreadId::Current();
lazily_archived_thread_state_ = state;
- ASSERT(state->id() == kInvalidId);
+ ASSERT(state->id().Equals(ThreadId::Invalid()));
state->set_id(CurrentId());
- ASSERT(state->id() != kInvalidId);
+ ASSERT(!state->id().Equals(ThreadId::Invalid()));
}
void ThreadManager::EagerlyArchiveThread() {
+ ASSERT(IsLockedByCurrentThread());
ThreadState* state = lazily_archived_thread_state_;
state->LinkInto(ThreadState::IN_USE_LIST);
char* to = state->data();
// Ensure that data containing GC roots are archived first, and handle them
// in ThreadManager::Iterate(ObjectVisitor*).
- to = HandleScopeImplementer::ArchiveThread(to);
- to = Top::ArchiveThread(to);
- to = Relocatable::ArchiveState(to);
+ to = isolate_->handle_scope_implementer()->ArchiveThread(to);
+ to = isolate_->ArchiveThread(to);
+ to = Relocatable::ArchiveState(isolate_, to);
#ifdef ENABLE_DEBUGGER_SUPPORT
- to = Debug::ArchiveDebug(to);
+ to = isolate_->debug()->ArchiveDebug(to);
#endif
- to = StackGuard::ArchiveStackGuard(to);
- to = RegExpStack::ArchiveStack(to);
- to = Bootstrapper::ArchiveState(to);
- lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
+ to = isolate_->stack_guard()->ArchiveStackGuard(to);
+ to = isolate_->regexp_stack()->ArchiveStack(to);
+ to = isolate_->bootstrapper()->ArchiveState(to);
+ lazily_archived_thread_ = ThreadId::Invalid();
lazily_archived_thread_state_ = NULL;
}
void ThreadManager::FreeThreadResources() {
- HandleScopeImplementer::FreeThreadResources();
- Top::FreeThreadResources();
+ isolate_->handle_scope_implementer()->FreeThreadResources();
+ isolate_->FreeThreadResources();
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug::FreeThreadResources();
+ isolate_->debug()->FreeThreadResources();
#endif
- StackGuard::FreeThreadResources();
- RegExpStack::FreeThreadResources();
- Bootstrapper::FreeThreadResources();
+ isolate_->stack_guard()->FreeThreadResources();
+ isolate_->regexp_stack()->FreeThreadResources();
+ isolate_->bootstrapper()->FreeThreadResources();
}
bool ThreadManager::IsArchived() {
- return Thread::HasThreadLocal(thread_state_key);
+ Isolate::PerIsolateThreadData* data =
+ isolate_->FindPerThreadDataForThisThread();
+ return data != NULL && data->thread_state() != NULL;
}
-
void ThreadManager::Iterate(ObjectVisitor* v) {
// Expecting no threads during serialization/deserialization
- for (ThreadState* state = ThreadState::FirstInUse();
+ for (ThreadState* state = FirstThreadStateInUse();
state != NULL;
state = state->Next()) {
char* data = state->data();
data = HandleScopeImplementer::Iterate(v, data);
- data = Top::Iterate(v, data);
+ data = isolate_->Iterate(v, data);
data = Relocatable::Iterate(v, data);
}
}
void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
- for (ThreadState* state = ThreadState::FirstInUse();
+ for (ThreadState* state = FirstThreadStateInUse();
state != NULL;
state = state->Next()) {
char* data = state->data();
data += HandleScopeImplementer::ArchiveSpacePerThread();
- Top::IterateThread(v, data);
- }
-}
-
-
-int ThreadManager::CurrentId() {
- return Thread::GetThreadLocalInt(thread_id_key);
-}
-
-
-void ThreadManager::AssignId() {
- if (!HasId()) {
- ASSERT(Locker::IsLocked());
- int thread_id = ++last_id_;
- ASSERT(thread_id > 0); // see the comment near last_id_ definition.
- Thread::SetThreadLocalInt(thread_id_key, thread_id);
- Top::set_thread_id(thread_id);
+ isolate_->IterateThread(v, data);
}
}
-bool ThreadManager::HasId() {
- return Thread::HasThreadLocal(thread_id_key);
+ThreadId ThreadManager::CurrentId() {
+ return ThreadId::Current();
}
-void ThreadManager::TerminateExecution(int thread_id) {
- for (ThreadState* state = ThreadState::FirstInUse();
+void ThreadManager::TerminateExecution(ThreadId thread_id) {
+ for (ThreadState* state = FirstThreadStateInUse();
state != NULL;
state = state->Next()) {
- if (thread_id == state->id()) {
+ if (thread_id.Equals(state->id())) {
state->set_terminate_on_restore(true);
}
}
}
-// This is the ContextSwitcher singleton. There is at most a single thread
-// running which delivers preemption events to V8 threads.
-ContextSwitcher* ContextSwitcher::singleton_ = NULL;
-
-
-ContextSwitcher::ContextSwitcher(int every_n_ms)
+ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
: Thread("v8:CtxtSwitcher"),
keep_going_(true),
- sleep_ms_(every_n_ms) {
+ sleep_ms_(every_n_ms),
+ isolate_(isolate) {
}
// Set the scheduling interval of V8 threads. This function starts the
// ContextSwitcher thread if needed.
void ContextSwitcher::StartPreemption(int every_n_ms) {
- ASSERT(Locker::IsLocked());
- if (singleton_ == NULL) {
+ Isolate* isolate = Isolate::Current();
+ ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
+ if (isolate->context_switcher() == NULL) {
// If the ContextSwitcher thread is not running at the moment start it now.
- singleton_ = new ContextSwitcher(every_n_ms);
- singleton_->Start();
+ isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
+ isolate->context_switcher()->Start();
} else {
// ContextSwitcher thread is already running, so we just change the
// scheduling interval.
- singleton_->sleep_ms_ = every_n_ms;
+ isolate->context_switcher()->sleep_ms_ = every_n_ms;
}
}
@@ -405,15 +428,17 @@ void ContextSwitcher::StartPreemption(int every_n_ms) {
// Disable preemption of V8 threads. If multiple threads want to use V8 they
// must cooperatively schedule amongst them from this point on.
void ContextSwitcher::StopPreemption() {
- ASSERT(Locker::IsLocked());
- if (singleton_ != NULL) {
+ Isolate* isolate = Isolate::Current();
+ ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
+ if (isolate->context_switcher() != NULL) {
// The ContextSwitcher thread is running. We need to stop it and release
// its resources.
- singleton_->keep_going_ = false;
- singleton_->Join(); // Wait for the ContextSwitcher thread to exit.
+ isolate->context_switcher()->keep_going_ = false;
+ // Wait for the ContextSwitcher thread to exit.
+ isolate->context_switcher()->Join();
// Thread has exited, now we can delete it.
- delete(singleton_);
- singleton_ = NULL;
+ delete(isolate->context_switcher());
+ isolate->set_context_switcher(NULL);
}
}
@@ -423,7 +448,7 @@ void ContextSwitcher::StopPreemption() {
void ContextSwitcher::Run() {
while (keep_going_) {
OS::Sleep(sleep_ms_);
- StackGuard::Preempt();
+ isolate()->stack_guard()->Preempt();
}
}
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index da56d0525..3ba823a72 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -34,8 +34,6 @@ namespace internal {
class ThreadState {
public:
- // Iterate over in-use states.
- static ThreadState* FirstInUse();
// Returns NULL after the last one.
ThreadState* Next();
@@ -44,11 +42,9 @@ class ThreadState {
void LinkInto(List list);
void Unlink();
- static ThreadState* GetFree();
-
// Id of thread.
- void set_id(int id) { id_ = id; }
- int id() { return id_; }
+ void set_id(ThreadId id) { id_ = id; }
+ ThreadId id() { return id_; }
// Should the thread be terminated when it is restored?
bool terminate_on_restore() { return terminate_on_restore_; }
@@ -59,23 +55,19 @@ class ThreadState {
// Get data area for archiving a thread.
char* data() { return data_; }
private:
- ThreadState();
+ explicit ThreadState(ThreadManager* thread_manager);
void AllocateSpace();
- int id_;
+ ThreadId id_;
bool terminate_on_restore_;
char* data_;
ThreadState* next_;
ThreadState* previous_;
- // In the following two lists there is always at least one object on the list.
- // The first object is a flying anchor that is only there to simplify linking
- // and unlinking.
- // Head of linked list of free states.
- static ThreadState* free_anchor_;
- // Head of linked list of states in use.
- static ThreadState* in_use_anchor_;
+ ThreadManager* thread_manager_;
+
+ friend class ThreadManager;
};
@@ -86,42 +78,60 @@ class ThreadLocalTop;
class ThreadVisitor {
public:
// ThreadLocalTop may be only available during this call.
- virtual void VisitThread(ThreadLocalTop* top) = 0;
+ virtual void VisitThread(Isolate* isolate, ThreadLocalTop* top) = 0;
protected:
virtual ~ThreadVisitor() {}
};
-class ThreadManager : public AllStatic {
+class ThreadManager {
public:
- static void Lock();
- static void Unlock();
-
- static void ArchiveThread();
- static bool RestoreThread();
- static void FreeThreadResources();
- static bool IsArchived();
+ void Lock();
+ void Unlock();
+
+ void ArchiveThread();
+ bool RestoreThread();
+ void FreeThreadResources();
+ bool IsArchived();
+
+ void Iterate(ObjectVisitor* v);
+ void IterateArchivedThreads(ThreadVisitor* v);
+ bool IsLockedByCurrentThread() {
+ return mutex_owner_.Equals(ThreadId::Current());
+ }
- static void Iterate(ObjectVisitor* v);
- static void IterateArchivedThreads(ThreadVisitor* v);
- static bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
+ ThreadId CurrentId();
- static int CurrentId();
- static void AssignId();
- static bool HasId();
+ void TerminateExecution(ThreadId thread_id);
- static void TerminateExecution(int thread_id);
+ // Iterate over in-use states.
+ ThreadState* FirstThreadStateInUse();
+ ThreadState* GetFreeThreadState();
- static const int kInvalidId = -1;
private:
- static void EagerlyArchiveThread();
+ ThreadManager();
+ ~ThreadManager();
- static int last_id_; // V8 threads are identified through an integer.
- static Mutex* mutex_;
- static ThreadHandle mutex_owner_;
- static ThreadHandle lazily_archived_thread_;
- static ThreadState* lazily_archived_thread_state_;
+ void EagerlyArchiveThread();
+
+ Mutex* mutex_;
+ ThreadId mutex_owner_;
+ ThreadId lazily_archived_thread_;
+ ThreadState* lazily_archived_thread_state_;
+
+ // In the following two lists there is always at least one object on the list.
+ // The first object is a flying anchor that is only there to simplify linking
+ // and unlinking.
+ // Head of linked list of free states.
+ ThreadState* free_anchor_;
+ // Head of linked list of states in use.
+ ThreadState* in_use_anchor_;
+
+ Isolate* isolate_;
+
+ friend class Isolate;
+ friend class ThreadState;
};
@@ -142,14 +152,15 @@ class ContextSwitcher: public Thread {
static void PreemptionReceived();
private:
- explicit ContextSwitcher(int every_n_ms);
+ ContextSwitcher(Isolate* isolate, int every_n_ms);
+
+ Isolate* isolate() const { return isolate_; }
void Run();
bool keep_going_;
int sleep_ms_;
-
- static ContextSwitcher* singleton_;
+ Isolate* isolate_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h
index 0aa53cac5..93fc1fda5 100644
--- a/deps/v8/src/v8utils.h
+++ b/deps/v8/src/v8utils.h
@@ -120,7 +120,9 @@ inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
// Memory
// Copies data from |src| to |dst|. The data spans MUST not overlap.
-inline void CopyWords(Object** dst, Object** src, int num_words) {
+template <typename T>
+inline void CopyWords(T* dst, T* src, int num_words) {
+ STATIC_ASSERT(sizeof(T) == kPointerSize);
ASSERT(Min(dst, src) + num_words <= Max(dst, src));
ASSERT(num_words > 0);
@@ -254,51 +256,14 @@ class StringBuilder {
};
-// Custom memcpy implementation for platforms where the standard version
-// may not be good enough.
-#if defined(V8_TARGET_ARCH_IA32)
-
-// The default memcpy on ia32 architectures is generally not as efficient
-// as possible. (If any further ia32 platforms are introduced where the
-// memcpy function is efficient, exclude them from this branch).
-
-typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
-
-// Implemented in codegen-<arch>.cc.
-MemCopyFunction CreateMemCopyFunction();
-
-// Copy memory area to disjoint memory area.
-static inline void MemCopy(void* dest, const void* src, size_t size) {
- static MemCopyFunction memcopy = CreateMemCopyFunction();
- (*memcopy)(dest, src, size);
-#ifdef DEBUG
- CHECK_EQ(0, memcmp(dest, src, size));
-#endif
-}
-
-// Limit below which the extra overhead of the MemCopy function is likely
-// to outweigh the benefits of faster copying.
-static const int kMinComplexMemCopy = 64;
-
-#else // V8_TARGET_ARCH_IA32
-
-static inline void MemCopy(void* dest, const void* src, size_t size) {
- memcpy(dest, src, size);
-}
-
-static const int kMinComplexMemCopy = 256;
-
-#endif // V8_TARGET_ARCH_IA32
-
-
// Copy from ASCII/16bit chars to ASCII/16bit chars.
template <typename sourcechar, typename sinkchar>
static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
sinkchar* limit = dest + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*dest) == sizeof(*src)) {
- if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
- MemCopy(dest, src, chars * sizeof(*dest));
+ if (chars >= static_cast<int>(OS::kMinComplexMemCopy / sizeof(*dest))) {
+ OS::MemCopy(dest, src, chars * sizeof(*dest));
return;
}
// Number of characters in a uintptr_t.
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index fa7ce1b0c..67150ea13 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,26 +35,8 @@ namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
-// Implementation StaticType.
-
-
-const char* StaticType::Type2String(StaticType* type) {
- switch (type->kind_) {
- case UNKNOWN:
- return "UNKNOWN";
- case LIKELY_SMI:
- return "LIKELY_SMI";
- default:
- UNREACHABLE();
- }
- return "UNREACHABLE";
-}
-
-
-// ----------------------------------------------------------------------------
// Implementation Variable.
-
const char* Variable::Mode2String(Mode mode) {
switch (mode) {
case VAR: return "VAR";
@@ -75,32 +57,26 @@ Property* Variable::AsProperty() const {
}
-Slot* Variable::AsSlot() const {
- return rewrite_ == NULL ? NULL : rewrite_->AsSlot();
-}
+Slot* Variable::AsSlot() const { return rewrite_; }
bool Variable::IsStackAllocated() const {
- Slot* slot = AsSlot();
- return slot != NULL && slot->IsStackAllocated();
+ return rewrite_ != NULL && rewrite_->IsStackAllocated();
}
bool Variable::IsParameter() const {
- Slot* s = AsSlot();
- return s != NULL && s->type() == Slot::PARAMETER;
+ return rewrite_ != NULL && rewrite_->type() == Slot::PARAMETER;
}
bool Variable::IsStackLocal() const {
- Slot* s = AsSlot();
- return s != NULL && s->type() == Slot::LOCAL;
+ return rewrite_ != NULL && rewrite_->type() == Slot::LOCAL;
}
bool Variable::IsContextSlot() const {
- Slot* s = AsSlot();
- return s != NULL && s->type() == Slot::CONTEXT;
+ return rewrite_ != NULL && rewrite_->type() == Slot::CONTEXT;
}
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index 5d27a02d5..a9c06d1ee 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,46 +33,6 @@
namespace v8 {
namespace internal {
-// Variables and AST expression nodes can track their "type" to enable
-// optimizations and removal of redundant checks when generating code.
-
-class StaticType {
- public:
- enum Kind {
- UNKNOWN,
- LIKELY_SMI
- };
-
- StaticType() : kind_(UNKNOWN) {}
-
- bool Is(Kind kind) const { return kind_ == kind; }
-
- bool IsKnown() const { return !Is(UNKNOWN); }
- bool IsUnknown() const { return Is(UNKNOWN); }
- bool IsLikelySmi() const { return Is(LIKELY_SMI); }
-
- void CopyFrom(StaticType* other) {
- kind_ = other->kind_;
- }
-
- static const char* Type2String(StaticType* type);
-
- // LIKELY_SMI accessors
- void SetAsLikelySmi() {
- kind_ = LIKELY_SMI;
- }
-
- void SetAsLikelySmiIfUnknown() {
- if (IsUnknown()) {
- SetAsLikelySmi();
- }
- }
-
- private:
- Kind kind_;
-};
-
-
// The AST refers to variables via VariableProxies - placeholders for the actual
// variables. Variables themselves are never directly referred to from the AST,
// they are maintained by scopes, and referred to from VariableProxies and Slots
@@ -121,7 +81,7 @@ class Variable: public ZoneObject {
// Printing support
static const char* Mode2String(Mode mode);
- // Type testing & conversion
+ // Type testing & conversion. Global variables are not slots.
Property* AsProperty() const;
Slot* AsSlot() const;
@@ -165,7 +125,7 @@ class Variable: public ZoneObject {
// True if the variable is named eval and not known to be shadowed.
bool is_possibly_eval() const {
- return IsVariable(Factory::eval_symbol()) &&
+ return IsVariable(FACTORY->eval_symbol()) &&
(mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
}
@@ -178,10 +138,8 @@ class Variable: public ZoneObject {
local_if_not_shadowed_ = local;
}
- Expression* rewrite() const { return rewrite_; }
- void set_rewrite(Expression* expr) { rewrite_ = expr; }
-
- StaticType* type() { return &type_; }
+ Slot* rewrite() const { return rewrite_; }
+ void set_rewrite(Slot* slot) { rewrite_ = slot; }
private:
Scope* scope_;
@@ -191,12 +149,8 @@ class Variable: public ZoneObject {
Variable* local_if_not_shadowed_;
- // Static type information
- StaticType type_;
-
// Code generation.
- // rewrite_ is usually a Slot or a Property, but may be any expression.
- Expression* rewrite_;
+ Slot* rewrite_;
// Valid as a LHS? (const and this are not valid LHS, for example)
bool is_valid_LHS_;
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index f70e40591..be8d453d4 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,16 +33,37 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 1
+#define MINOR_VERSION 4
#define BUILD_NUMBER 8
-#define PATCH_LEVEL 25
-#define CANDIDATE_VERSION false
+#define PATCH_LEVEL 0
+// Use 1 for candidates and 0 otherwise.
+// (Boolean macro values are not supported by all preprocessors.)
+#define IS_CANDIDATE_VERSION 0
// Define SONAME to have the SCons build the put a specific SONAME into the
// shared library instead the generic SONAME generated from the V8 version
// number. This define is mainly used by the SCons build script.
#define SONAME ""
+#if IS_CANDIDATE_VERSION
+#define CANDIDATE_STRING " (candidate)"
+#else
+#define CANDIDATE_STRING ""
+#endif
+
+#define SX(x) #x
+#define S(x) SX(x)
+
+#if PATCH_LEVEL > 0
+#define VERSION_STRING \
+ S(MAJOR_VERSION) "." S(MINOR_VERSION) "." S(BUILD_NUMBER) "." \
+ S(PATCH_LEVEL) CANDIDATE_STRING
+#else
+#define VERSION_STRING \
+ S(MAJOR_VERSION) "." S(MINOR_VERSION) "." S(BUILD_NUMBER) \
+ CANDIDATE_STRING
+#endif
+
namespace v8 {
namespace internal {
@@ -50,9 +71,9 @@ int Version::major_ = MAJOR_VERSION;
int Version::minor_ = MINOR_VERSION;
int Version::build_ = BUILD_NUMBER;
int Version::patch_ = PATCH_LEVEL;
-bool Version::candidate_ = CANDIDATE_VERSION;
+bool Version::candidate_ = (IS_CANDIDATE_VERSION != 0);
const char* Version::soname_ = SONAME;
-
+const char* Version::version_string_ = VERSION_STRING;
// Calculate the V8 version string.
void Version::GetString(Vector<char> str) {
diff --git a/deps/v8/src/version.h b/deps/v8/src/version.h
index c322a2fc0..4b3e7e2bd 100644
--- a/deps/v8/src/version.h
+++ b/deps/v8/src/version.h
@@ -46,13 +46,17 @@ class Version {
// Calculate the SONAME for the V8 shared library.
static void GetSONAME(Vector<char> str);
+ static const char* GetVersion() { return version_string_; }
+
private:
+ // NOTE: can't make these really const because of test-version.cc.
static int major_;
static int minor_;
static int build_;
static int patch_;
static bool candidate_;
static const char* soname_;
+ static const char* version_string_;
// In test-version.cc.
friend void SetVersion(int major, int minor, int build, int patch,
diff --git a/deps/v8/src/virtual-frame-heavy-inl.h b/deps/v8/src/virtual-frame-heavy-inl.h
deleted file mode 100644
index cf12eca62..000000000
--- a/deps/v8/src/virtual-frame-heavy-inl.h
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_HEAVY_INL_H_
-#define V8_VIRTUAL_FRAME_HEAVY_INL_H_
-
-#include "type-info.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "register-allocator-inl.h"
-#include "codegen-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// On entry to a function, the virtual frame already contains the receiver,
-// the parameters, and a return address. All frame elements are in memory.
-VirtualFrame::VirtualFrame()
- : elements_(parameter_count() + local_count() + kPreallocatedElements),
- stack_pointer_(parameter_count() + 1) { // 0-based index of TOS.
- for (int i = 0; i <= stack_pointer_; i++) {
- elements_.Add(FrameElement::MemoryElement(TypeInfo::Unknown()));
- }
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- register_locations_[i] = kIllegalIndex;
- }
-}
-
-
-// When cloned, a frame is a deep copy of the original.
-VirtualFrame::VirtualFrame(VirtualFrame* original)
- : elements_(original->element_count()),
- stack_pointer_(original->stack_pointer_) {
- elements_.AddAll(original->elements_);
- // Copy register locations from original.
- memcpy(&register_locations_,
- original->register_locations_,
- sizeof(register_locations_));
-}
-
-
-void VirtualFrame::PushFrameSlotAt(int index) {
- elements_.Add(CopyElementAt(index));
-}
-
-
-void VirtualFrame::Push(Register reg, TypeInfo info) {
- if (is_used(reg)) {
- int index = register_location(reg);
- FrameElement element = CopyElementAt(index, info);
- elements_.Add(element);
- } else {
- Use(reg, element_count());
- FrameElement element =
- FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED, info);
- elements_.Add(element);
- }
-}
-
-
-bool VirtualFrame::ConstantPoolOverflowed() {
- return FrameElement::ConstantPoolOverflowed();
-}
-
-
-bool VirtualFrame::Equals(VirtualFrame* other) {
-#ifdef DEBUG
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (register_location(i) != other->register_location(i)) {
- return false;
- }
- }
- if (element_count() != other->element_count()) return false;
-#endif
- if (stack_pointer_ != other->stack_pointer_) return false;
- for (int i = 0; i < element_count(); i++) {
- if (!elements_[i].Equals(other->elements_[i])) return false;
- }
-
- return true;
-}
-
-
-void VirtualFrame::SetTypeForLocalAt(int index, TypeInfo info) {
- elements_[local0_index() + index].set_type_info(info);
-}
-
-
-// Make the type of all elements be MEMORY.
-void VirtualFrame::SpillAll() {
- for (int i = 0; i < element_count(); i++) {
- SpillElementAt(i);
- }
-}
-
-
-void VirtualFrame::PrepareForReturn() {
- // Spill all locals. This is necessary to make sure all locals have
- // the right value when breaking at the return site in the debugger.
- for (int i = 0; i < expression_base_index(); i++) {
- SpillElementAt(i);
- }
-}
-
-
-void VirtualFrame::SetTypeForParamAt(int index, TypeInfo info) {
- elements_[param0_index() + index].set_type_info(info);
-}
-
-
-void VirtualFrame::Nip(int num_dropped) {
- ASSERT(num_dropped >= 0);
- if (num_dropped == 0) return;
- Result tos = Pop();
- if (num_dropped > 1) {
- Drop(num_dropped - 1);
- }
- SetElementAt(0, &tos);
-}
-
-
-void VirtualFrame::Push(Smi* value) {
- Push(Handle<Object> (value));
-}
-
-
-int VirtualFrame::register_location(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)];
-}
-
-
-void VirtualFrame::set_register_location(Register reg, int index) {
- register_locations_[RegisterAllocator::ToNumber(reg)] = index;
-}
-
-
-bool VirtualFrame::is_used(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)]
- != kIllegalIndex;
-}
-
-
-void VirtualFrame::SetElementAt(int index, Handle<Object> value) {
- Result temp(value);
- SetElementAt(index, &temp);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- return RawCallStub(stub);
-}
-
-
-int VirtualFrame::parameter_count() {
- return cgen()->scope()->num_parameters();
-}
-
-
-int VirtualFrame::local_count() {
- return cgen()->scope()->num_stack_slots();
-}
-
-} } // namespace v8::internal
-
-#endif // V8_VIRTUAL_FRAME_HEAVY_INL_H_
diff --git a/deps/v8/src/virtual-frame-heavy.cc b/deps/v8/src/virtual-frame-heavy.cc
deleted file mode 100644
index 727028005..000000000
--- a/deps/v8/src/virtual-frame-heavy.cc
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void VirtualFrame::SetElementAt(int index, Result* value) {
- int frame_index = element_count() - index - 1;
- ASSERT(frame_index >= 0);
- ASSERT(frame_index < element_count());
- ASSERT(value->is_valid());
- FrameElement original = elements_[frame_index];
-
- // Early exit if the element is the same as the one being set.
- bool same_register = original.is_register()
- && value->is_register()
- && original.reg().is(value->reg());
- bool same_constant = original.is_constant()
- && value->is_constant()
- && original.handle().is_identical_to(value->handle());
- if (same_register || same_constant) {
- value->Unuse();
- return;
- }
-
- InvalidateFrameSlotAt(frame_index);
-
- if (value->is_register()) {
- if (is_used(value->reg())) {
- // The register already appears on the frame. Either the existing
- // register element, or the new element at frame_index, must be made
- // a copy.
- int i = register_location(value->reg());
-
- if (i < frame_index) {
- // The register FrameElement is lower in the frame than the new copy.
- elements_[frame_index] = CopyElementAt(i);
- } else {
- // There was an early bailout for the case of setting a
- // register element to itself.
- ASSERT(i != frame_index);
- elements_[frame_index] = elements_[i];
- elements_[i] = CopyElementAt(frame_index);
- if (elements_[frame_index].is_synced()) {
- elements_[i].set_sync();
- }
- elements_[frame_index].clear_sync();
- set_register_location(value->reg(), frame_index);
- for (int j = i + 1; j < element_count(); j++) {
- if (elements_[j].is_copy() && elements_[j].index() == i) {
- elements_[j].set_index(frame_index);
- }
- }
- }
- } else {
- // The register value->reg() was not already used on the frame.
- Use(value->reg(), frame_index);
- elements_[frame_index] =
- FrameElement::RegisterElement(value->reg(),
- FrameElement::NOT_SYNCED,
- value->type_info());
- }
- } else {
- ASSERT(value->is_constant());
- elements_[frame_index] =
- FrameElement::ConstantElement(value->handle(),
- FrameElement::NOT_SYNCED);
- }
- value->Unuse();
-}
-
-
-// Create a duplicate of an existing valid frame element.
-// We can pass an optional number type information that will override the
-// existing information about the backing element. The new information must
-// not conflict with the existing type information and must be equally or
-// more precise. The default parameter value kUninitialized means that there
-// is no additional information.
-FrameElement VirtualFrame::CopyElementAt(int index, TypeInfo info) {
- ASSERT(index >= 0);
- ASSERT(index < element_count());
-
- FrameElement target = elements_[index];
- FrameElement result;
-
- switch (target.type()) {
- case FrameElement::CONSTANT:
- // We do not copy constants and instead return a fresh unsynced
- // constant.
- result = FrameElement::ConstantElement(target.handle(),
- FrameElement::NOT_SYNCED);
- break;
-
- case FrameElement::COPY:
- // We do not allow copies of copies, so we follow one link to
- // the actual backing store of a copy before making a copy.
- index = target.index();
- ASSERT(elements_[index].is_memory() || elements_[index].is_register());
- // Fall through.
-
- case FrameElement::MEMORY: // Fall through.
- case FrameElement::REGISTER: {
- // All copies are backed by memory or register locations.
- result.set_type(FrameElement::COPY);
- result.clear_copied();
- result.clear_sync();
- result.set_index(index);
- elements_[index].set_copied();
- // Update backing element's number information.
- TypeInfo existing = elements_[index].type_info();
- ASSERT(!existing.IsUninitialized());
- // Assert that the new type information (a) does not conflict with the
- // existing one and (b) is equally or more precise.
- ASSERT((info.ToInt() & existing.ToInt()) == existing.ToInt());
- ASSERT((info.ToInt() | existing.ToInt()) == info.ToInt());
-
- elements_[index].set_type_info(!info.IsUninitialized()
- ? info
- : existing);
- break;
- }
- case FrameElement::INVALID:
- // We should not try to copy invalid elements.
- UNREACHABLE();
- break;
- }
- return result;
-}
-
-
-// Modify the state of the virtual frame to match the actual frame by adding
-// extra in-memory elements to the top of the virtual frame. The extra
-// elements will be externally materialized on the actual frame (eg, by
-// pushing an exception handler). No code is emitted.
-void VirtualFrame::Adjust(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == element_count() - 1);
-
- for (int i = 0; i < count; i++) {
- elements_.Add(FrameElement::MemoryElement(TypeInfo::Unknown()));
- }
- stack_pointer_ += count;
-}
-
-
-void VirtualFrame::ForgetElements(int count) {
- ASSERT(count >= 0);
- ASSERT(element_count() >= count);
-
- for (int i = 0; i < count; i++) {
- FrameElement last = elements_.RemoveLast();
- if (last.is_register()) {
- // A hack to properly count register references for the code
- // generator's current frame and also for other frames. The
- // same code appears in PrepareMergeTo.
- if (cgen()->frame() == this) {
- Unuse(last.reg());
- } else {
- set_register_location(last.reg(), kIllegalIndex);
- }
- }
- }
-}
-
-
-// Make the type of the element at a given index be MEMORY.
-void VirtualFrame::SpillElementAt(int index) {
- if (!elements_[index].is_valid()) return;
-
- SyncElementAt(index);
- // Number type information is preserved.
- // Copies get their number information from their backing element.
- TypeInfo info;
- if (!elements_[index].is_copy()) {
- info = elements_[index].type_info();
- } else {
- info = elements_[elements_[index].index()].type_info();
- }
- // The element is now in memory. Its copied flag is preserved.
- FrameElement new_element = FrameElement::MemoryElement(info);
- if (elements_[index].is_copied()) {
- new_element.set_copied();
- }
- if (elements_[index].is_untagged_int32()) {
- new_element.set_untagged_int32(true);
- }
- if (elements_[index].is_register()) {
- Unuse(elements_[index].reg());
- }
- elements_[index] = new_element;
-}
-
-
-// Clear the dirty bit for the element at a given index.
-void VirtualFrame::SyncElementAt(int index) {
- if (index <= stack_pointer_) {
- if (!elements_[index].is_synced()) SyncElementBelowStackPointer(index);
- } else if (index == stack_pointer_ + 1) {
- SyncElementByPushing(index);
- } else {
- SyncRange(stack_pointer_ + 1, index);
- }
-}
-
-
-void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
- // Perform state changes on this frame that will make merge to the
- // expected frame simpler or else increase the likelihood that his
- // frame will match another.
- for (int i = 0; i < element_count(); i++) {
- FrameElement source = elements_[i];
- FrameElement target = expected->elements_[i];
-
- if (!target.is_valid() ||
- (target.is_memory() && !source.is_memory() && source.is_synced())) {
- // No code needs to be generated to invalidate valid elements.
- // No code needs to be generated to move values to memory if
- // they are already synced. We perform those moves here, before
- // merging.
- if (source.is_register()) {
- // If the frame is the code generator's current frame, we have
- // to decrement both the frame-internal and global register
- // counts.
- if (cgen()->frame() == this) {
- Unuse(source.reg());
- } else {
- set_register_location(source.reg(), kIllegalIndex);
- }
- }
- elements_[i] = target;
- } else if (target.is_register() && !target.is_synced() &&
- !source.is_memory()) {
- // If an element's target is a register that doesn't need to be
- // synced, and the element is not in memory, then the sync state
- // of the element is irrelevant. We clear the sync bit.
- ASSERT(source.is_valid());
- elements_[i].clear_sync();
- }
- }
-}
-
-
-void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
- ASSERT(height() >= dropped_args);
- ASSERT(height() >= spilled_args);
- ASSERT(dropped_args <= spilled_args);
-
- SyncRange(0, element_count() - 1);
- // Spill registers.
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) {
- SpillElementAt(register_location(i));
- }
- }
-
- // Spill the arguments.
- for (int i = element_count() - spilled_args; i < element_count(); i++) {
- if (!elements_[i].is_memory()) {
- SpillElementAt(i);
- }
- }
-
- // Forget the frame elements that will be popped by the call.
- Forget(dropped_args);
-}
-
-
-// If there are any registers referenced only by the frame, spill one.
-Register VirtualFrame::SpillAnyRegister() {
- // Find the leftmost (ordered by register number) register whose only
- // reference is in the frame.
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i) && cgen()->allocator()->count(i) == 1) {
- SpillElementAt(register_location(i));
- ASSERT(!cgen()->allocator()->is_used(i));
- return RegisterAllocator::ToRegister(i);
- }
- }
- return no_reg;
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/virtual-frame-light-inl.h b/deps/v8/src/virtual-frame-light-inl.h
deleted file mode 100644
index 19520a634..000000000
--- a/deps/v8/src/virtual-frame-light-inl.h
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_LIGHT_INL_H_
-#define V8_VIRTUAL_FRAME_LIGHT_INL_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "type-info.h"
-
-#include "codegen-inl.h"
-#include "jump-target-light-inl.h"
-
-namespace v8 {
-namespace internal {
-
-VirtualFrame::VirtualFrame(InvalidVirtualFrameInitializer* dummy)
- : element_count_(0),
- top_of_stack_state_(NO_TOS_REGISTERS),
- register_allocation_map_(0),
- tos_known_smi_map_(0) { }
-
-
-// On entry to a function, the virtual frame already contains the receiver,
-// the parameters, and a return address. All frame elements are in memory.
-VirtualFrame::VirtualFrame()
- : element_count_(parameter_count() + 2),
- top_of_stack_state_(NO_TOS_REGISTERS),
- register_allocation_map_(0),
- tos_known_smi_map_(0) { }
-
-
-// When cloned, a frame is a deep copy of the original.
-VirtualFrame::VirtualFrame(VirtualFrame* original)
- : element_count_(original->element_count()),
- top_of_stack_state_(original->top_of_stack_state_),
- register_allocation_map_(original->register_allocation_map_),
- tos_known_smi_map_(0) { }
-
-
-bool VirtualFrame::Equals(const VirtualFrame* other) {
- ASSERT(element_count() == other->element_count());
- if (top_of_stack_state_ != other->top_of_stack_state_) return false;
- if (register_allocation_map_ != other->register_allocation_map_) return false;
- if (tos_known_smi_map_ != other->tos_known_smi_map_) return false;
-
- return true;
-}
-
-
-void VirtualFrame::PrepareForReturn() {
- // Don't bother flushing tos registers as returning does not require more
- // access to the expression stack.
- top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-VirtualFrame::RegisterAllocationScope::RegisterAllocationScope(
- CodeGenerator* cgen)
- : cgen_(cgen),
- old_is_spilled_(SpilledScope::is_spilled_) {
- SpilledScope::is_spilled_ = false;
- if (old_is_spilled_) {
- VirtualFrame* frame = cgen->frame();
- if (frame != NULL) {
- frame->AssertIsSpilled();
- }
- }
-}
-
-
-VirtualFrame::RegisterAllocationScope::~RegisterAllocationScope() {
- SpilledScope::is_spilled_ = old_is_spilled_;
- if (old_is_spilled_) {
- VirtualFrame* frame = cgen_->frame();
- if (frame != NULL) {
- frame->SpillAll();
- }
- }
-}
-
-
-CodeGenerator* VirtualFrame::cgen() const {
- return CodeGeneratorScope::Current();
-}
-
-
-MacroAssembler* VirtualFrame::masm() { return cgen()->masm(); }
-
-
-void VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
- if (arg_count != 0) Forget(arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- masm()->CallStub(stub);
-}
-
-
-int VirtualFrame::parameter_count() const {
- return cgen()->scope()->num_parameters();
-}
-
-
-int VirtualFrame::local_count() const {
- return cgen()->scope()->num_stack_slots();
-}
-
-
-int VirtualFrame::frame_pointer() const { return parameter_count() + 3; }
-
-
-int VirtualFrame::context_index() { return frame_pointer() - 1; }
-
-
-int VirtualFrame::function_index() { return frame_pointer() - 2; }
-
-
-int VirtualFrame::local0_index() const { return frame_pointer() + 2; }
-
-
-int VirtualFrame::fp_relative(int index) {
- ASSERT(index < element_count());
- ASSERT(frame_pointer() < element_count()); // FP is on the frame.
- return (frame_pointer() - index) * kPointerSize;
-}
-
-
-int VirtualFrame::expression_base_index() const {
- return local0_index() + local_count();
-}
-
-
-int VirtualFrame::height() const {
- return element_count() - expression_base_index();
-}
-
-
-MemOperand VirtualFrame::LocalAt(int index) {
- ASSERT(0 <= index);
- ASSERT(index < local_count());
- return MemOperand(fp, kLocal0Offset - index * kPointerSize);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_VIRTUAL_FRAME_LIGHT_INL_H_
diff --git a/deps/v8/src/virtual-frame-light.cc b/deps/v8/src/virtual-frame-light.cc
deleted file mode 100644
index bbaaaf5fa..000000000
--- a/deps/v8/src/virtual-frame-light.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void VirtualFrame::Adjust(int count) {
- ASSERT(count >= 0);
- RaiseHeight(count, 0);
-}
-
-
-// If there are any registers referenced only by the frame, spill one.
-Register VirtualFrame::SpillAnyRegister() {
- UNIMPLEMENTED();
- return no_reg;
-}
-
-
-InvalidVirtualFrameInitializer* kInvalidVirtualFrameInitializer = NULL;
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/virtual-frame.cc b/deps/v8/src/virtual-frame.cc
deleted file mode 100644
index 310ff5949..000000000
--- a/deps/v8/src/virtual-frame.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-// Specialization of List::ResizeAdd to non-inlined version for FrameElements.
-// The function ResizeAdd becomes a real function, whose implementation is the
-// inlined ResizeAddInternal.
-template <>
-void List<FrameElement,
- FreeStoreAllocationPolicy>::ResizeAdd(const FrameElement& element) {
- ResizeAddInternal(element);
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/virtual-frame.h b/deps/v8/src/virtual-frame.h
deleted file mode 100644
index 65d100989..000000000
--- a/deps/v8/src/virtual-frame.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_H_
-#define V8_VIRTUAL_FRAME_H_
-
-#include "frame-element.h"
-#include "macro-assembler.h"
-
-#include "list-inl.h"
-#include "utils.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/virtual-frame-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/virtual-frame-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/virtual-frame-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/virtual-frame-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-// Add() on List is inlined, ResizeAdd() called by Add() is inlined except for
-// Lists of FrameElements, and ResizeAddInternal() is inlined in ResizeAdd().
-template <>
-void List<FrameElement,
- FreeStoreAllocationPolicy>::ResizeAdd(const FrameElement& element);
-} } // namespace v8::internal
-
-#endif // V8_VIRTUAL_FRAME_H_
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index da912b746..1f363de62 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -58,25 +58,27 @@ inline const char* StateToString(StateTag state) {
}
}
-VMState::VMState(StateTag tag) : previous_tag_(Top::current_vm_state()) {
+
+VMState::VMState(Isolate* isolate, StateTag tag)
+ : isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) {
- LOG(UncheckedStringEvent("Entering", StateToString(tag)));
- LOG(UncheckedStringEvent("From", StateToString(previous_tag_)));
+ LOG(isolate, UncheckedStringEvent("Entering", StateToString(tag)));
+ LOG(isolate, UncheckedStringEvent("From", StateToString(previous_tag_)));
}
#endif
- Top::SetCurrentVMState(tag);
+ isolate_->SetCurrentVMState(tag);
#ifdef ENABLE_HEAP_PROTECTION
if (FLAG_protect_heap) {
if (tag == EXTERNAL) {
// We are leaving V8.
ASSERT(previous_tag_ != EXTERNAL);
- Heap::Protect();
+ isolate_->heap()->Protect();
} else if (previous_tag_ = EXTERNAL) {
// We are entering V8.
- Heap::Unprotect();
+ isolate_->heap()->Unprotect();
}
}
#endif
@@ -86,27 +88,29 @@ VMState::VMState(StateTag tag) : previous_tag_(Top::current_vm_state()) {
VMState::~VMState() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) {
- LOG(UncheckedStringEvent("Leaving",
- StateToString(Top::current_vm_state())));
- LOG(UncheckedStringEvent("To", StateToString(previous_tag_)));
+ LOG(isolate_,
+ UncheckedStringEvent("Leaving",
+ StateToString(isolate_->current_vm_state())));
+ LOG(isolate_,
+ UncheckedStringEvent("To", StateToString(previous_tag_)));
}
#endif // ENABLE_LOGGING_AND_PROFILING
#ifdef ENABLE_HEAP_PROTECTION
- StateTag tag = Top::current_vm_state();
+ StateTag tag = isolate_->current_vm_state();
#endif
- Top::SetCurrentVMState(previous_tag_);
+ isolate_->SetCurrentVMState(previous_tag_);
#ifdef ENABLE_HEAP_PROTECTION
if (FLAG_protect_heap) {
if (tag == EXTERNAL) {
// We are reentering V8.
ASSERT(previous_tag_ != EXTERNAL);
- Heap::Unprotect();
+ isolate_->heap()->Unprotect();
} else if (previous_tag_ == EXTERNAL) {
// We are leaving V8.
- Heap::Protect();
+ isolate_->heap()->Protect();
}
}
#endif // ENABLE_HEAP_PROTECTION
@@ -117,13 +121,13 @@ VMState::~VMState() {
#ifdef ENABLE_LOGGING_AND_PROFILING
-ExternalCallbackScope::ExternalCallbackScope(Address callback)
- : previous_callback_(Top::external_callback()) {
- Top::set_external_callback(callback);
+ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
+ : isolate_(isolate), previous_callback_(isolate->external_callback()) {
+ isolate_->set_external_callback(callback);
}
ExternalCallbackScope::~ExternalCallbackScope() {
- Top::set_external_callback(previous_callback_);
+ isolate_->set_external_callback(previous_callback_);
}
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/deps/v8/src/vm-state.h b/deps/v8/src/vm-state.h
index df7fb30ac..2062340f9 100644
--- a/deps/v8/src/vm-state.h
+++ b/deps/v8/src/vm-state.h
@@ -28,7 +28,8 @@
#ifndef V8_VM_STATE_H_
#define V8_VM_STATE_H_
-#include "top.h"
+#include "allocation.h"
+#include "isolate.h"
namespace v8 {
namespace internal {
@@ -36,15 +37,16 @@ namespace internal {
class VMState BASE_EMBEDDED {
#ifdef ENABLE_VMSTATE_TRACKING
public:
- inline explicit VMState(StateTag tag);
+ inline VMState(Isolate* isolate, StateTag tag);
inline ~VMState();
private:
+ Isolate* isolate_;
StateTag previous_tag_;
#else
public:
- explicit VMState(StateTag state) {}
+ VMState(Isolate* isolate, StateTag state) {}
#endif
};
@@ -52,13 +54,14 @@ class VMState BASE_EMBEDDED {
class ExternalCallbackScope BASE_EMBEDDED {
#ifdef ENABLE_LOGGING_AND_PROFILING
public:
- inline explicit ExternalCallbackScope(Address callback);
+ inline ExternalCallbackScope(Isolate* isolate, Address callback);
inline ~ExternalCallbackScope();
private:
+ Isolate* isolate_;
Address previous_callback_;
#else
public:
- explicit ExternalCallbackScope(Address callback) {}
+ ExternalCallbackScope(Isolate* isolate, Address callback) {}
#endif
};
diff --git a/deps/v8/src/win32-headers.h b/deps/v8/src/win32-headers.h
index b51a38a15..fca5c137e 100644
--- a/deps/v8/src/win32-headers.h
+++ b/deps/v8/src/win32-headers.h
@@ -66,6 +66,7 @@
#endif // __MINGW32__
#ifndef __MINGW32__
#include <dbghelp.h> // For SymLoadModule64 and al.
+#include <errno.h> // For STRUNCATE
#endif // __MINGW32__
#include <limits.h> // For INT_MAX and al.
#include <tlhelp32.h> // For Module32First and al.
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index b082624f4..8db54f075 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -30,7 +30,7 @@
#include "cpu.h"
#include "debug.h"
-#include "memory.h"
+#include "v8memory.h"
namespace v8 {
namespace internal {
@@ -61,9 +61,15 @@ void Assembler::emitw(uint16_t x) {
}
-void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
+void Assembler::emit_code_target(Handle<Code> target,
+ RelocInfo::Mode rmode,
+ unsigned ast_id) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
- RecordRelocInfo(rmode);
+ if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id);
+ } else {
+ RecordRelocInfo(rmode);
+ }
int current = code_targets_.length();
if (current > 0 && code_targets_.last().is_identical_to(target)) {
// Optimization if we keep jumping to the same code target.
@@ -372,11 +378,12 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
+ // TODO(isolates): Get a cached isolate below.
+ } else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ IsPatchedDebugBreakSlotSequence())) &&
+ Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
@@ -386,25 +393,25 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
template<typename StaticVisitor>
-void RelocInfo::Visit() {
+void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(target_object_address());
+ StaticVisitor::VisitPointer(heap, target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(this);
+ StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(this);
+ StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
+ } else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(this);
+ StaticVisitor::VisitDebugTarget(heap, this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this);
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 35c05b3ac..745fdaeb8 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -38,21 +38,38 @@ namespace internal {
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
-// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
-// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
-uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures;
-uint64_t CpuFeatures::enabled_ = 0;
+
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
-void CpuFeatures::Probe(bool portable) {
- ASSERT(Heap::HasBeenSetup());
+
+void CpuFeatures::Probe() {
+ ASSERT(!initialized_);
+#ifdef DEBUG
+ initialized_ = true;
+#endif
supported_ = kDefaultCpuFeatures;
- if (portable && Serializer::enabled()) {
+ if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
}
- Assembler assm(NULL, 0);
+ const int kBufferSize = 4 * KB;
+ VirtualMemory* memory = new VirtualMemory(kBufferSize);
+ if (!memory->IsReserved()) {
+ delete memory;
+ return;
+ }
+ ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
+ if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
+ delete memory;
+ return;
+ }
+
+ Assembler assm(NULL, memory->address(), kBufferSize);
Label cpuid, done;
#define __ assm.
// Save old rsp, since we are going to modify the stack.
@@ -82,7 +99,7 @@ void CpuFeatures::Probe(bool portable) {
// ecx:edx. Temporarily enable CPUID support because we know it's
// safe here.
__ bind(&cpuid);
- __ movq(rax, Immediate(1));
+ __ movl(rax, Immediate(1));
supported_ = kDefaultCpuFeatures | (1 << CPUID);
{ Scope fscope(CPUID);
__ cpuid();
@@ -116,28 +133,20 @@ void CpuFeatures::Probe(bool portable) {
__ ret(0);
#undef __
- CodeDesc desc;
- assm.GetCode(&desc);
- MaybeObject* maybe_code = Heap::CreateCode(desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Object>());
- Object* code;
- if (!maybe_code->ToObject(&code)) return;
- if (!code->IsCode()) return;
- PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
- Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+ F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
supported_ = probe();
found_by_runtime_probing_ = supported_;
found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
supported_ |= os_guarantees;
- found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
+ found_by_runtime_probing_ &= ~os_guarantees;
// SSE2 and CMOV must be available on an X64 CPU.
ASSERT(IsSupported(CPUID));
ASSERT(IsSupported(SSE2));
ASSERT(IsSupported(CMOV));
+
+ delete memory;
}
@@ -191,12 +200,12 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Register constants.
const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
- // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12
- 0, 3, 2, 1, 7, 8, 9, 11, 14, 12
+ // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
+ 0, 3, 2, 1, 7, 8, 9, 11, 14, 15
};
const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
- 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1
+ 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, 8, 9
};
@@ -335,18 +344,19 @@ bool Operand::AddressUsesRegister(Register reg) const {
static void InitCoverageLog();
#endif
-byte* Assembler::spare_buffer_ = NULL;
-
-Assembler::Assembler(void* buffer, int buffer_size)
- : code_targets_(100), positions_recorder_(this) {
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
+ code_targets_(100),
+ positions_recorder_(this),
+ emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
- if (spare_buffer_ != NULL) {
- buffer = spare_buffer_;
- spare_buffer_ = NULL;
+ if (isolate() != NULL && isolate()->assembler_spare_buffer() != NULL) {
+ buffer = isolate()->assembler_spare_buffer();
+ isolate()->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
@@ -378,7 +388,6 @@ Assembler::Assembler(void* buffer, int buffer_size)
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- last_pc_ = NULL;
#ifdef GENERATED_CODE_COVERAGE
InitCoverageLog();
@@ -388,8 +397,10 @@ Assembler::Assembler(void* buffer, int buffer_size)
Assembler::~Assembler() {
if (own_buffer_) {
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate() != NULL &&
+ isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
@@ -409,8 +420,6 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->reloc_size =
static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
-
- Counters::reloc_info_size.Increment(desc->reloc_size);
}
@@ -434,7 +443,6 @@ void Assembler::CodeTargetAlign() {
void Assembler::bind_to(Label* L, int pos) {
ASSERT(!L->is_bound()); // Label may only be bound once.
- last_pc_ = NULL;
ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
if (L->is_linked()) {
int current = L->pos();
@@ -450,6 +458,20 @@ void Assembler::bind_to(Label* L, int pos) {
int last_imm32 = pos - (current + sizeof(int32_t));
long_at_put(current, last_imm32);
}
+ while (L->is_near_linked()) {
+ int fixup_pos = L->near_link_pos();
+ int offset_to_next =
+ static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
+ ASSERT(offset_to_next <= 0);
+ int disp = pos - (fixup_pos + sizeof(int8_t));
+ ASSERT(is_int8(disp));
+ set_byte_at(fixup_pos, disp);
+ if (offset_to_next < 0) {
+ L->link_to(fixup_pos + offset_to_next, Label::kNear);
+ } else {
+ L->UnuseNear();
+ }
+ }
L->bind_to(pos);
}
@@ -459,20 +481,6 @@ void Assembler::bind(Label* L) {
}
-void Assembler::bind(NearLabel* L) {
- ASSERT(!L->is_bound());
- last_pc_ = NULL;
- while (L->unresolved_branches_ > 0) {
- int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
- int disp = pc_offset() - branch_pos;
- ASSERT(is_int8(disp));
- set_byte_at(branch_pos - sizeof(int8_t), disp);
- L->unresolved_branches_--;
- }
- L->bind_to(pc_offset());
-}
-
-
void Assembler::GrowBuffer() {
ASSERT(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
@@ -487,7 +495,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > Heap::MaxOldGenerationSize())) {
+ (desc.buffer_size > HEAP->MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -512,17 +520,16 @@ void Assembler::GrowBuffer() {
reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate() != NULL &&
+ isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
buffer_ = desc.buffer;
buffer_size_ = desc.buffer_size;
pc_ += pc_delta;
- if (last_pc_ != NULL) {
- last_pc_ += pc_delta;
- }
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
@@ -560,7 +567,6 @@ void Assembler::emit_operand(int code, const Operand& adr) {
void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(reg, op);
emit(opcode);
emit_operand(reg, op);
@@ -569,7 +575,6 @@ void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT((opcode & 0xC6) == 2);
if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
@@ -586,7 +591,6 @@ void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT((opcode & 0xC6) == 2);
if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
@@ -607,7 +611,6 @@ void Assembler::arithmetic_op_16(byte opcode,
Register reg,
const Operand& rm_reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(reg, rm_reg);
emit(opcode);
@@ -617,7 +620,6 @@ void Assembler::arithmetic_op_16(byte opcode,
void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT((opcode & 0xC6) == 2);
if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
@@ -636,7 +638,6 @@ void Assembler::arithmetic_op_32(byte opcode,
Register reg,
const Operand& rm_reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(reg, rm_reg);
emit(opcode);
emit_operand(reg, rm_reg);
@@ -647,7 +648,6 @@ void Assembler::immediate_arithmetic_op(byte subcode,
Register dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
if (is_int8(src.value_)) {
emit(0x83);
@@ -667,7 +667,6 @@ void Assembler::immediate_arithmetic_op(byte subcode,
const Operand& dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
if (is_int8(src.value_)) {
emit(0x83);
@@ -685,7 +684,6 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
Register dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66); // Operand size override prefix.
emit_optional_rex_32(dst);
if (is_int8(src.value_)) {
@@ -707,7 +705,6 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
const Operand& dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66); // Operand size override prefix.
emit_optional_rex_32(dst);
if (is_int8(src.value_)) {
@@ -726,7 +723,6 @@ void Assembler::immediate_arithmetic_op_32(byte subcode,
Register dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
if (is_int8(src.value_)) {
emit(0x83);
@@ -747,7 +743,6 @@ void Assembler::immediate_arithmetic_op_32(byte subcode,
const Operand& dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
if (is_int8(src.value_)) {
emit(0x83);
@@ -765,7 +760,6 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
const Operand& dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
ASSERT(is_int8(src.value_) || is_uint8(src.value_));
emit(0x80);
@@ -778,7 +772,6 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
Register dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (dst.code() > 3) {
// Use 64-bit mode byte registers.
emit_rex_64(dst);
@@ -792,7 +785,6 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
if (shift_amount.value_ == 1) {
emit_rex_64(dst);
@@ -809,7 +801,6 @@ void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
void Assembler::shift(Register dst, int subcode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xD3);
emit_modrm(subcode, dst);
@@ -818,7 +809,6 @@ void Assembler::shift(Register dst, int subcode) {
void Assembler::shift_32(Register dst, int subcode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xD3);
emit_modrm(subcode, dst);
@@ -827,7 +817,6 @@ void Assembler::shift_32(Register dst, int subcode) {
void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint5(shift_amount.value_)); // illegal shift count
if (shift_amount.value_ == 1) {
emit_optional_rex_32(dst);
@@ -844,7 +833,6 @@ void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src, dst);
emit(0x0F);
emit(0xA3);
@@ -854,7 +842,6 @@ void Assembler::bt(const Operand& dst, Register src) {
void Assembler::bts(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src, dst);
emit(0x0F);
emit(0xAB);
@@ -865,7 +852,6 @@ void Assembler::bts(const Operand& dst, Register src) {
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// 1110 1000 #32-bit disp.
emit(0xE8);
if (L->is_bound()) {
@@ -884,20 +870,20 @@ void Assembler::call(Label* L) {
}
-void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
+void Assembler::call(Handle<Code> target,
+ RelocInfo::Mode rmode,
+ unsigned ast_id) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// 1110 1000 #32-bit disp.
emit(0xE8);
- emit_code_target(target, rmode);
+ emit_code_target(target, rmode, ast_id);
}
void Assembler::call(Register adr) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: FF /2 r64.
emit_optional_rex_32(adr);
emit(0xFF);
@@ -908,7 +894,6 @@ void Assembler::call(Register adr) {
void Assembler::call(const Operand& op) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: FF /2 m64.
emit_optional_rex_32(op);
emit(0xFF);
@@ -923,7 +908,6 @@ void Assembler::call(const Operand& op) {
void Assembler::call(Address target) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// 1110 1000 #32-bit disp.
emit(0xE8);
Address source = pc_ + 4;
@@ -935,13 +919,16 @@ void Assembler::call(Address target) {
void Assembler::clc() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF8);
}
+void Assembler::cld() {
+ EnsureSpace ensure_space(this);
+ emit(0xFC);
+}
+
void Assembler::cdq() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x99);
}
@@ -956,7 +943,6 @@ void Assembler::cmovq(Condition cc, Register dst, Register src) {
// 64-bit architecture.
ASSERT(cc >= 0); // Use mov for unconditional moves.
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
emit(0x0f);
@@ -973,7 +959,6 @@ void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
}
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
emit(0x0f);
@@ -990,7 +975,6 @@ void Assembler::cmovl(Condition cc, Register dst, Register src) {
}
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -1007,7 +991,6 @@ void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
}
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -1019,7 +1002,6 @@ void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
void Assembler::cmpb_al(Immediate imm8) {
ASSERT(is_int8(imm8.value_) || is_uint8(imm8.value_));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x3c);
emit(imm8.value_);
}
@@ -1028,7 +1010,6 @@ void Assembler::cmpb_al(Immediate imm8) {
void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x0F);
emit(0xA2);
}
@@ -1036,7 +1017,6 @@ void Assembler::cpuid() {
void Assembler::cqo() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64();
emit(0x99);
}
@@ -1044,7 +1024,6 @@ void Assembler::cqo() {
void Assembler::decq(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xFF);
emit_modrm(0x1, dst);
@@ -1053,7 +1032,6 @@ void Assembler::decq(Register dst) {
void Assembler::decq(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xFF);
emit_operand(1, dst);
@@ -1062,7 +1040,6 @@ void Assembler::decq(const Operand& dst) {
void Assembler::decl(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_modrm(0x1, dst);
@@ -1071,7 +1048,6 @@ void Assembler::decl(Register dst) {
void Assembler::decl(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_operand(1, dst);
@@ -1080,7 +1056,6 @@ void Assembler::decl(const Operand& dst) {
void Assembler::decb(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (dst.code() > 3) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(dst);
@@ -1092,7 +1067,6 @@ void Assembler::decb(Register dst) {
void Assembler::decb(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFE);
emit_operand(1, dst);
@@ -1101,7 +1075,6 @@ void Assembler::decb(const Operand& dst) {
void Assembler::enter(Immediate size) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xC8);
emitw(size.value_); // 16 bit operand, always.
emit(0);
@@ -1110,14 +1083,12 @@ void Assembler::enter(Immediate size) {
void Assembler::hlt() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF4);
}
void Assembler::idivq(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src);
emit(0xF7);
emit_modrm(0x7, src);
@@ -1126,7 +1097,6 @@ void Assembler::idivq(Register src) {
void Assembler::idivl(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(src);
emit(0xF7);
emit_modrm(0x7, src);
@@ -1135,7 +1105,6 @@ void Assembler::idivl(Register src) {
void Assembler::imul(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src);
emit(0xF7);
emit_modrm(0x5, src);
@@ -1144,7 +1113,6 @@ void Assembler::imul(Register src) {
void Assembler::imul(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x0F);
emit(0xAF);
@@ -1154,7 +1122,6 @@ void Assembler::imul(Register dst, Register src) {
void Assembler::imul(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x0F);
emit(0xAF);
@@ -1164,7 +1131,6 @@ void Assembler::imul(Register dst, const Operand& src) {
void Assembler::imul(Register dst, Register src, Immediate imm) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
if (is_int8(imm.value_)) {
emit(0x6B);
@@ -1180,7 +1146,6 @@ void Assembler::imul(Register dst, Register src, Immediate imm) {
void Assembler::imull(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xAF);
@@ -1190,7 +1155,6 @@ void Assembler::imull(Register dst, Register src) {
void Assembler::imull(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xAF);
@@ -1200,7 +1164,6 @@ void Assembler::imull(Register dst, const Operand& src) {
void Assembler::imull(Register dst, Register src, Immediate imm) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
if (is_int8(imm.value_)) {
emit(0x6B);
@@ -1216,7 +1179,6 @@ void Assembler::imull(Register dst, Register src, Immediate imm) {
void Assembler::incq(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xFF);
emit_modrm(0x0, dst);
@@ -1225,7 +1187,6 @@ void Assembler::incq(Register dst) {
void Assembler::incq(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xFF);
emit_operand(0, dst);
@@ -1234,7 +1195,6 @@ void Assembler::incq(const Operand& dst) {
void Assembler::incl(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_operand(0, dst);
@@ -1243,7 +1203,6 @@ void Assembler::incl(const Operand& dst) {
void Assembler::incl(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_modrm(0, dst);
@@ -1252,12 +1211,11 @@ void Assembler::incl(Register dst) {
void Assembler::int3() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xCC);
}
-void Assembler::j(Condition cc, Label* L) {
+void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
if (cc == always) {
jmp(L);
return;
@@ -1265,7 +1223,6 @@ void Assembler::j(Condition cc, Label* L) {
return;
}
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint4(cc));
if (L->is_bound()) {
const int short_size = 2;
@@ -1282,6 +1239,17 @@ void Assembler::j(Condition cc, Label* L) {
emit(0x80 | cc);
emitl(offs - long_size);
}
+ } else if (distance == Label::kNear) {
+ // 0111 tttn #8-bit disp
+ emit(0x70 | cc);
+ byte disp = 0x00;
+ if (L->is_near_linked()) {
+ int offset = L->near_link_pos() - pc_offset();
+ ASSERT(is_int8(offset));
+ disp = static_cast<byte>(offset & 0xFF);
+ }
+ L->link_to(pc_offset(), Label::kNear);
+ emit(disp);
} else if (L->is_linked()) {
// 0000 1111 1000 tttn #32-bit disp.
emit(0x0F);
@@ -1303,7 +1271,6 @@ void Assembler::j(Condition cc,
Handle<Code> target,
RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint4(cc));
// 0000 1111 1000 tttn #32-bit disp.
emit(0x0F);
@@ -1312,30 +1279,8 @@ void Assembler::j(Condition cc,
}
-void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
+void Assembler::jmp(Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(0 <= cc && cc < 16);
- if (FLAG_emit_branch_hints && hint != no_hint) emit(hint);
- if (L->is_bound()) {
- const int short_size = 2;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- ASSERT(is_int8(offs - short_size));
- // 0111 tttn #8-bit disp
- emit(0x70 | cc);
- emit((offs - short_size) & 0xFF);
- } else {
- emit(0x70 | cc);
- emit(0x00); // The displacement will be resolved later.
- L->link_to(pc_offset());
- }
-}
-
-
-void Assembler::jmp(Label* L) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
const int short_size = sizeof(int8_t);
const int long_size = sizeof(int32_t);
if (L->is_bound()) {
@@ -1350,7 +1295,17 @@ void Assembler::jmp(Label* L) {
emit(0xE9);
emitl(offs - long_size);
}
- } else if (L->is_linked()) {
+ } else if (distance == Label::kNear) {
+ emit(0xEB);
+ byte disp = 0x00;
+ if (L->is_near_linked()) {
+ int offset = L->near_link_pos() - pc_offset();
+ ASSERT(is_int8(offset));
+ disp = static_cast<byte>(offset & 0xFF);
+ }
+ L->link_to(pc_offset(), Label::kNear);
+ emit(disp);
+ } else if (L->is_linked()) {
// 1110 1001 #32-bit disp.
emit(0xE9);
emitl(L->pos());
@@ -1368,35 +1323,14 @@ void Assembler::jmp(Label* L) {
void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// 1110 1001 #32-bit disp.
emit(0xE9);
emit_code_target(target, rmode);
}
-void Assembler::jmp(NearLabel* L) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (L->is_bound()) {
- const int short_size = 2;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- ASSERT(is_int8(offs - short_size));
- // 1110 1011 #8-bit disp.
- emit(0xEB);
- emit((offs - short_size) & 0xFF);
- } else {
- emit(0xEB);
- emit(0x00); // The displacement will be resolved later.
- L->link_to(pc_offset());
- }
-}
-
-
void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode FF/4 r64.
emit_optional_rex_32(target);
emit(0xFF);
@@ -1406,7 +1340,6 @@ void Assembler::jmp(Register target) {
void Assembler::jmp(const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode FF/4 m64.
emit_optional_rex_32(src);
emit(0xFF);
@@ -1416,7 +1349,6 @@ void Assembler::jmp(const Operand& src) {
void Assembler::lea(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x8D);
emit_operand(dst, src);
@@ -1425,7 +1357,6 @@ void Assembler::lea(Register dst, const Operand& src) {
void Assembler::leal(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x8D);
emit_operand(dst, src);
@@ -1434,7 +1365,6 @@ void Assembler::leal(Register dst, const Operand& src) {
void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x48); // REX.W
emit(0xA1);
emitq(reinterpret_cast<uintptr_t>(value), mode);
@@ -1448,15 +1378,18 @@ void Assembler::load_rax(ExternalReference ref) {
void Assembler::leave() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xC9);
}
void Assembler::movb(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_32(dst, src);
+ if (dst.code() > 3) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(dst, src);
+ } else {
+ emit_optional_rex_32(dst, src);
+ }
emit(0x8A);
emit_operand(dst, src);
}
@@ -1464,18 +1397,21 @@ void Assembler::movb(Register dst, const Operand& src) {
void Assembler::movb(Register dst, Immediate imm) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_32(dst);
- emit(0xC6);
- emit_modrm(0x0, dst);
+ if (dst.code() > 3) {
+ emit_rex_32(dst);
+ }
+ emit(0xB0 + dst.low_bits());
emit(imm.value_);
}
void Assembler::movb(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_32(src, dst);
+ if (src.code() > 3) {
+ emit_rex_32(src, dst);
+ } else {
+ emit_optional_rex_32(src, dst);
+ }
emit(0x88);
emit_operand(src, dst);
}
@@ -1483,7 +1419,6 @@ void Assembler::movb(const Operand& dst, Register src) {
void Assembler::movw(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(src, dst);
emit(0x89);
@@ -1493,7 +1428,6 @@ void Assembler::movw(const Operand& dst, Register src) {
void Assembler::movl(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x8B);
emit_operand(dst, src);
@@ -1502,7 +1436,6 @@ void Assembler::movl(Register dst, const Operand& src) {
void Assembler::movl(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.low_bits() == 4) {
emit_optional_rex_32(src, dst);
emit(0x89);
@@ -1517,7 +1450,6 @@ void Assembler::movl(Register dst, Register src) {
void Assembler::movl(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(src, dst);
emit(0x89);
emit_operand(src, dst);
@@ -1526,27 +1458,23 @@ void Assembler::movl(const Operand& dst, Register src) {
void Assembler::movl(const Operand& dst, Immediate value) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xC7);
emit_operand(0x0, dst);
- emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
+ emit(value);
}
void Assembler::movl(Register dst, Immediate value) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
- emit(0xC7);
- emit_modrm(0x0, dst);
- emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
+ emit(0xB8 + dst.low_bits());
+ emit(value);
}
void Assembler::movq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x8B);
emit_operand(dst, src);
@@ -1555,7 +1483,6 @@ void Assembler::movq(Register dst, const Operand& src) {
void Assembler::movq(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.low_bits() == 4) {
emit_rex_64(src, dst);
emit(0x89);
@@ -1570,7 +1497,6 @@ void Assembler::movq(Register dst, Register src) {
void Assembler::movq(Register dst, Immediate value) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xC7);
emit_modrm(0x0, dst);
@@ -1580,7 +1506,6 @@ void Assembler::movq(Register dst, Immediate value) {
void Assembler::movq(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src, dst);
emit(0x89);
emit_operand(src, dst);
@@ -1592,7 +1517,6 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
// address is not GC safe. Use the handle version instead.
ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitq(reinterpret_cast<uintptr_t>(value), rmode);
@@ -1614,7 +1538,6 @@ void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
// value.
}
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitq(value, rmode);
@@ -1629,7 +1552,6 @@ void Assembler::movq(Register dst, ExternalReference ref) {
void Assembler::movq(const Operand& dst, Immediate value) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xC7);
emit_operand(0, dst);
@@ -1641,7 +1563,6 @@ void Assembler::movq(const Operand& dst, Immediate value) {
// (as a 32-bit offset sign extended to 64-bit).
void Assembler::movl(const Operand& dst, Label* src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xC7);
emit_operand(0, dst);
@@ -1671,9 +1592,8 @@ void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE);
} else {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(value->IsHeapObject());
- ASSERT(!Heap::InNewSpace(*value));
+ ASSERT(!HEAP->InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
@@ -1683,7 +1603,6 @@ void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
void Assembler::movsxbq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x0F);
emit(0xBE);
@@ -1693,7 +1612,6 @@ void Assembler::movsxbq(Register dst, const Operand& src) {
void Assembler::movsxwq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x0F);
emit(0xBF);
@@ -1703,7 +1621,6 @@ void Assembler::movsxwq(Register dst, const Operand& src) {
void Assembler::movsxlq(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x63);
emit_modrm(dst, src);
@@ -1712,7 +1629,6 @@ void Assembler::movsxlq(Register dst, Register src) {
void Assembler::movsxlq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x63);
emit_operand(dst, src);
@@ -1721,7 +1637,6 @@ void Assembler::movsxlq(Register dst, const Operand& src) {
void Assembler::movzxbq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB6);
@@ -1731,7 +1646,6 @@ void Assembler::movzxbq(Register dst, const Operand& src) {
void Assembler::movzxbl(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB6);
@@ -1741,7 +1655,6 @@ void Assembler::movzxbl(Register dst, const Operand& src) {
void Assembler::movzxwq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1751,7 +1664,6 @@ void Assembler::movzxwq(Register dst, const Operand& src) {
void Assembler::movzxwl(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1761,7 +1673,6 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
void Assembler::repmovsb() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit(0xA4);
}
@@ -1769,7 +1680,6 @@ void Assembler::repmovsb() {
void Assembler::repmovsw() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66); // Operand size override.
emit(0xF3);
emit(0xA4);
@@ -1778,7 +1688,6 @@ void Assembler::repmovsw() {
void Assembler::repmovsl() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit(0xA5);
}
@@ -1786,7 +1695,6 @@ void Assembler::repmovsl() {
void Assembler::repmovsq() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit_rex_64();
emit(0xA5);
@@ -1795,7 +1703,6 @@ void Assembler::repmovsq() {
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src);
emit(0xF7);
emit_modrm(0x4, src);
@@ -1804,7 +1711,6 @@ void Assembler::mul(Register src) {
void Assembler::neg(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xF7);
emit_modrm(0x3, dst);
@@ -1813,7 +1719,6 @@ void Assembler::neg(Register dst) {
void Assembler::negl(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xF7);
emit_modrm(0x3, dst);
@@ -1822,7 +1727,6 @@ void Assembler::negl(Register dst) {
void Assembler::neg(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xF7);
emit_operand(3, dst);
@@ -1831,14 +1735,12 @@ void Assembler::neg(const Operand& dst) {
void Assembler::nop() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x90);
}
void Assembler::not_(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xF7);
emit_modrm(0x2, dst);
@@ -1847,7 +1749,6 @@ void Assembler::not_(Register dst) {
void Assembler::not_(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xF7);
emit_operand(2, dst);
@@ -1856,7 +1757,6 @@ void Assembler::not_(const Operand& dst) {
void Assembler::notl(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xF7);
emit_modrm(0x2, dst);
@@ -1881,7 +1781,6 @@ void Assembler::nop(int n) {
ASSERT(1 <= n);
ASSERT(n <= 9);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
switch (n) {
case 1:
emit(0x90);
@@ -1952,7 +1851,6 @@ void Assembler::nop(int n) {
void Assembler::pop(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0x58 | dst.low_bits());
}
@@ -1960,7 +1858,6 @@ void Assembler::pop(Register dst) {
void Assembler::pop(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0x8F);
emit_operand(0, dst);
@@ -1969,14 +1866,12 @@ void Assembler::pop(const Operand& dst) {
void Assembler::popfq() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x9D);
}
void Assembler::push(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(src);
emit(0x50 | src.low_bits());
}
@@ -1984,7 +1879,6 @@ void Assembler::push(Register src) {
void Assembler::push(const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(src);
emit(0xFF);
emit_operand(6, src);
@@ -1993,7 +1887,6 @@ void Assembler::push(const Operand& src) {
void Assembler::push(Immediate value) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (is_int8(value.value_)) {
emit(0x6A);
emit(value.value_); // Emit low byte of value.
@@ -2006,7 +1899,6 @@ void Assembler::push(Immediate value) {
void Assembler::push_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x68);
emitl(imm32);
}
@@ -2014,14 +1906,12 @@ void Assembler::push_imm32(int32_t imm32) {
void Assembler::pushfq() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x9C);
}
void Assembler::rdtsc() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x0F);
emit(0x31);
}
@@ -2029,7 +1919,6 @@ void Assembler::rdtsc() {
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint16(imm16));
if (imm16 == 0) {
emit(0xC3);
@@ -2047,7 +1936,6 @@ void Assembler::setcc(Condition cc, Register reg) {
return;
}
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint4(cc));
if (reg.code() > 3) { // Use x64 byte registers, where different.
emit_rex_32(reg);
@@ -2060,7 +1948,6 @@ void Assembler::setcc(Condition cc, Register reg) {
void Assembler::shld(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src, dst);
emit(0x0F);
emit(0xA5);
@@ -2070,7 +1957,6 @@ void Assembler::shld(Register dst, Register src) {
void Assembler::shrd(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src, dst);
emit(0x0F);
emit(0xAD);
@@ -2080,7 +1966,6 @@ void Assembler::shrd(Register dst, Register src) {
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
emit_rex_64(other);
@@ -2099,7 +1984,6 @@ void Assembler::xchg(Register dst, Register src) {
void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x48); // REX.W
emit(0xA3);
emitq(reinterpret_cast<uintptr_t>(dst), mode);
@@ -2113,7 +1997,6 @@ void Assembler::store_rax(ExternalReference ref) {
void Assembler::testb(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.low_bits() == 4) {
emit_rex_32(src, dst);
emit(0x84);
@@ -2132,7 +2015,6 @@ void Assembler::testb(Register dst, Register src) {
void Assembler::testb(Register reg, Immediate mask) {
ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (reg.is(rax)) {
emit(0xA8);
emit(mask.value_); // Low byte emitted.
@@ -2151,7 +2033,6 @@ void Assembler::testb(Register reg, Immediate mask) {
void Assembler::testb(const Operand& op, Immediate mask) {
ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(rax, op);
emit(0xF6);
emit_operand(rax, op); // Operation code 0
@@ -2161,7 +2042,6 @@ void Assembler::testb(const Operand& op, Immediate mask) {
void Assembler::testb(const Operand& op, Register reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (reg.code() > 3) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(reg, op);
@@ -2175,7 +2055,6 @@ void Assembler::testb(const Operand& op, Register reg) {
void Assembler::testl(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.low_bits() == 4) {
emit_optional_rex_32(src, dst);
emit(0x85);
@@ -2195,7 +2074,6 @@ void Assembler::testl(Register reg, Immediate mask) {
return;
}
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (reg.is(rax)) {
emit(0xA9);
emit(mask);
@@ -2215,7 +2093,6 @@ void Assembler::testl(const Operand& op, Immediate mask) {
return;
}
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(rax, op);
emit(0xF7);
emit_operand(rax, op); // Operation code 0
@@ -2225,7 +2102,6 @@ void Assembler::testl(const Operand& op, Immediate mask) {
void Assembler::testq(const Operand& op, Register reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(reg, op);
emit(0x85);
emit_operand(reg, op);
@@ -2234,7 +2110,6 @@ void Assembler::testq(const Operand& op, Register reg) {
void Assembler::testq(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.low_bits() == 4) {
emit_rex_64(src, dst);
emit(0x85);
@@ -2249,7 +2124,6 @@ void Assembler::testq(Register dst, Register src) {
void Assembler::testq(Register dst, Immediate mask) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (dst.is(rax)) {
emit_rex_64();
emit(0xA9);
@@ -2268,14 +2142,12 @@ void Assembler::testq(Register dst, Immediate mask) {
void Assembler::fld(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xD9, 0xC0, i);
}
void Assembler::fld1() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xE8);
}
@@ -2283,7 +2155,6 @@ void Assembler::fld1() {
void Assembler::fldz() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xEE);
}
@@ -2291,7 +2162,6 @@ void Assembler::fldz() {
void Assembler::fldpi() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xEB);
}
@@ -2299,7 +2169,6 @@ void Assembler::fldpi() {
void Assembler::fldln2() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xED);
}
@@ -2307,7 +2176,6 @@ void Assembler::fldln2() {
void Assembler::fld_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(0, adr);
@@ -2316,7 +2184,6 @@ void Assembler::fld_s(const Operand& adr) {
void Assembler::fld_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(0, adr);
@@ -2325,7 +2192,6 @@ void Assembler::fld_d(const Operand& adr) {
void Assembler::fstp_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(3, adr);
@@ -2334,7 +2200,6 @@ void Assembler::fstp_s(const Operand& adr) {
void Assembler::fstp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(3, adr);
@@ -2344,14 +2209,12 @@ void Assembler::fstp_d(const Operand& adr) {
void Assembler::fstp(int index) {
ASSERT(is_uint3(index));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDD, 0xD8, index);
}
void Assembler::fild_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(0, adr);
@@ -2360,7 +2223,6 @@ void Assembler::fild_s(const Operand& adr) {
void Assembler::fild_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDF);
emit_operand(5, adr);
@@ -2369,7 +2231,6 @@ void Assembler::fild_d(const Operand& adr) {
void Assembler::fistp_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(3, adr);
@@ -2379,7 +2240,6 @@ void Assembler::fistp_s(const Operand& adr) {
void Assembler::fisttp_s(const Operand& adr) {
ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(1, adr);
@@ -2389,7 +2249,6 @@ void Assembler::fisttp_s(const Operand& adr) {
void Assembler::fisttp_d(const Operand& adr) {
ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(1, adr);
@@ -2398,7 +2257,6 @@ void Assembler::fisttp_d(const Operand& adr) {
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(2, adr);
@@ -2407,7 +2265,6 @@ void Assembler::fist_s(const Operand& adr) {
void Assembler::fistp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDF);
emit_operand(7, adr);
@@ -2416,7 +2273,6 @@ void Assembler::fistp_d(const Operand& adr) {
void Assembler::fabs() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xE1);
}
@@ -2424,7 +2280,6 @@ void Assembler::fabs() {
void Assembler::fchs() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xE0);
}
@@ -2432,7 +2287,6 @@ void Assembler::fchs() {
void Assembler::fcos() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xFF);
}
@@ -2440,7 +2294,6 @@ void Assembler::fcos() {
void Assembler::fsin() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xFE);
}
@@ -2448,7 +2301,6 @@ void Assembler::fsin() {
void Assembler::fyl2x() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xF1);
}
@@ -2456,21 +2308,18 @@ void Assembler::fyl2x() {
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDC, 0xC0, i);
}
void Assembler::fsub(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDC, 0xE8, i);
}
void Assembler::fisub_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDA);
emit_operand(4, adr);
@@ -2479,56 +2328,48 @@ void Assembler::fisub_s(const Operand& adr) {
void Assembler::fmul(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDC, 0xC8, i);
}
void Assembler::fdiv(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDC, 0xF8, i);
}
void Assembler::faddp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xC0, i);
}
void Assembler::fsubp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xE8, i);
}
void Assembler::fsubrp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xE0, i);
}
void Assembler::fmulp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xC8, i);
}
void Assembler::fdivp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xF8, i);
}
void Assembler::fprem() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xF8);
}
@@ -2536,7 +2377,6 @@ void Assembler::fprem() {
void Assembler::fprem1() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xF5);
}
@@ -2544,14 +2384,12 @@ void Assembler::fprem1() {
void Assembler::fxch(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xD9, 0xC8, i);
}
void Assembler::fincstp() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xF7);
}
@@ -2559,14 +2397,12 @@ void Assembler::fincstp() {
void Assembler::ffree(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDD, 0xC0, i);
}
void Assembler::ftst() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xE4);
}
@@ -2574,14 +2410,12 @@ void Assembler::ftst() {
void Assembler::fucomp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDD, 0xE8, i);
}
void Assembler::fucompp() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xDA);
emit(0xE9);
}
@@ -2589,7 +2423,6 @@ void Assembler::fucompp() {
void Assembler::fucomi(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xDB);
emit(0xE8 + i);
}
@@ -2597,7 +2430,6 @@ void Assembler::fucomi(int i) {
void Assembler::fucomip() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xDF);
emit(0xE9);
}
@@ -2605,7 +2437,6 @@ void Assembler::fucomip() {
void Assembler::fcompp() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xDE);
emit(0xD9);
}
@@ -2613,7 +2444,6 @@ void Assembler::fcompp() {
void Assembler::fnstsw_ax() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xDF);
emit(0xE0);
}
@@ -2621,14 +2451,12 @@ void Assembler::fnstsw_ax() {
void Assembler::fwait() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x9B);
}
void Assembler::frndint() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xFC);
}
@@ -2636,7 +2464,6 @@ void Assembler::frndint() {
void Assembler::fnclex() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xDB);
emit(0xE2);
}
@@ -2646,7 +2473,6 @@ void Assembler::sahf() {
// TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
// in 64-bit mode. Test CpuID.
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x9E);
}
@@ -2662,7 +2488,6 @@ void Assembler::emit_farith(int b1, int b2, int i) {
void Assembler::movd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2673,7 +2498,6 @@ void Assembler::movd(XMMRegister dst, Register src) {
void Assembler::movd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(src, dst);
emit(0x0F);
@@ -2684,7 +2508,6 @@ void Assembler::movd(Register dst, XMMRegister src) {
void Assembler::movq(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_rex_64(dst, src);
emit(0x0F);
@@ -2695,7 +2518,6 @@ void Assembler::movq(XMMRegister dst, Register src) {
void Assembler::movq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_rex_64(src, dst);
emit(0x0F);
@@ -2704,10 +2526,26 @@ void Assembler::movq(Register dst, XMMRegister src) {
}
+void Assembler::movq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ if (dst.low_bits() == 4) {
+ // Avoid unnecessary SIB byte.
+ emit(0xf3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x7e);
+ emit_sse_operand(dst, src);
+ } else {
+ emit(0x66);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0xD6);
+ emit_sse_operand(src, dst);
+ }
+}
+
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_rex_64(src, dst);
emit(0x0F);
@@ -2717,9 +2555,7 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) {
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_rex_64(dst, src);
emit(0x0F);
@@ -2731,7 +2567,6 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
ASSERT(is_uint2(imm8));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2744,7 +2579,6 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
void Assembler::movsd(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2); // double
emit_optional_rex_32(src, dst);
emit(0x0F);
@@ -2755,7 +2589,6 @@ void Assembler::movsd(const Operand& dst, XMMRegister src) {
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2); // double
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2766,7 +2599,6 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) {
void Assembler::movsd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2); // double
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2775,9 +2607,44 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
}
+void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ if (src.low_bits() == 4) {
+ // Try to avoid an unnecessary SIB byte.
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x29);
+ emit_sse_operand(src, dst);
+ } else {
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x28);
+ emit_sse_operand(dst, src);
+ }
+}
+
+
+void Assembler::movapd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ if (src.low_bits() == 4) {
+ // Try to avoid an unnecessary SIB byte.
+ emit(0x66);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x29);
+ emit_sse_operand(src, dst);
+ } else {
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x28);
+ emit_sse_operand(dst, src);
+ }
+}
+
+
void Assembler::movss(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3); // single
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2788,7 +2655,6 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
void Assembler::movss(const Operand& src, XMMRegister dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3); // single
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2799,7 +2665,6 @@ void Assembler::movss(const Operand& src, XMMRegister dst) {
void Assembler::cvttss2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2810,7 +2675,6 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
void Assembler::cvttss2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2821,7 +2685,6 @@ void Assembler::cvttss2si(Register dst, XMMRegister src) {
void Assembler::cvttsd2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2832,7 +2695,6 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
void Assembler::cvttsd2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2843,7 +2705,6 @@ void Assembler::cvttsd2si(Register dst, XMMRegister src) {
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_rex_64(dst, src);
emit(0x0F);
@@ -2854,7 +2715,6 @@ void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2865,7 +2725,6 @@ void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2876,7 +2735,6 @@ void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2887,7 +2745,6 @@ void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_rex_64(dst, src);
emit(0x0F);
@@ -2898,7 +2755,6 @@ void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2909,7 +2765,6 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2920,7 +2775,6 @@ void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2931,7 +2785,6 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2942,7 +2795,6 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) {
void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_rex_64(dst, src);
emit(0x0F);
@@ -2953,7 +2805,6 @@ void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2964,7 +2815,6 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2975,7 +2825,6 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2986,7 +2835,6 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2997,7 +2845,6 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -3008,7 +2855,6 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -3019,7 +2865,6 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) {
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -3028,9 +2873,17 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -3041,7 +2894,6 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -3052,7 +2904,6 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -3061,9 +2912,23 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
}
+void Assembler::roundsd(XMMRegister dst, XMMRegister src,
+ Assembler::RoundingMode mode) {
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x3a);
+ emit(0x0b);
+ emit_sse_operand(dst, src);
+ // Mask precision exeption.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
+
void Assembler::movmskpd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -3114,7 +2979,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Serializer::TooLateToEnableNow();
}
#endif
- if (!Serializer::enabled() && !FLAG_debug_code) {
+ if (!Serializer::enabled() && !emit_debug_code()) {
return;
}
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index f6cd57093..2971db845 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -93,8 +93,8 @@ struct Register {
// rbp - frame pointer
// rsi - context register
// r10 - fixed scratch register
+ // r12 - smi constant register
// r13 - root register
- // r15 - smi constant register
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 10;
@@ -120,7 +120,7 @@ struct Register {
"r9",
"r11",
"r14",
- "r12"
+ "r15"
};
return names[index];
}
@@ -327,22 +327,6 @@ inline Condition ReverseCondition(Condition cc) {
}
-enum Hint {
- no_hint = 0,
- not_taken = 0x2e,
- taken = 0x3e
-};
-
-// The result of negating a hint is as if the corresponding condition
-// were negated by NegateCondition. That is, no_hint is mapped to
-// itself and not_taken and taken are mapped to each other.
-inline Hint NegateHint(Hint hint) {
- return (hint == no_hint)
- ? no_hint
- : ((hint == not_taken) ? taken : not_taken);
-}
-
-
// -----------------------------------------------------------------------------
// Machine instruction Immediates
@@ -395,6 +379,13 @@ class Operand BASE_EMBEDDED {
// Does not check the "reg" part of the Operand.
bool AddressUsesRegister(Register reg) const;
+ // Queries related to the size of the generated instruction.
+ // Whether the generated instruction will have a REX prefix.
+ bool requires_rex() const { return rex_ != 0; }
+ // Size of the ModR/M, SIB and displacement parts of the generated
+ // instruction.
+ int operand_size() const { return len_; }
+
private:
byte rex_;
byte buf_[6];
@@ -431,9 +422,11 @@ class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
- static void Probe(bool portable);
+ static void Probe();
+
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
@@ -441,40 +434,71 @@ class CpuFeatures : public AllStatic {
if (f == SAHF && !FLAG_enable_sahf) return false;
return (supported_ & (V8_UINT64_C(1) << f)) != 0;
}
+
+#ifdef DEBUG
// Check whether a feature is currently enabled.
static bool IsEnabled(CpuFeature f) {
- return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
+ ASSERT(initialized_);
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL) {
+ // When no isolate is available, work as if we're running in
+ // release mode.
+ return IsSupported(f);
+ }
+ uint64_t enabled = isolate->enabled_cpu_features();
+ return (enabled & (V8_UINT64_C(1) << f)) != 0;
}
+#endif
+
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
explicit Scope(CpuFeature f) {
- uint64_t mask = (V8_UINT64_C(1) << f);
+ uint64_t mask = V8_UINT64_C(1) << f;
ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0);
- old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= mask;
+ ASSERT(!Serializer::enabled() ||
+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+ isolate_ = Isolate::UncheckedCurrent();
+ old_enabled_ = 0;
+ if (isolate_ != NULL) {
+ old_enabled_ = isolate_->enabled_cpu_features();
+ isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+ }
+ }
+ ~Scope() {
+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+ if (isolate_ != NULL) {
+ isolate_->set_enabled_cpu_features(old_enabled_);
+ }
}
- ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
+ Isolate* isolate_;
uint64_t old_enabled_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
+
private:
// Safe defaults include SSE2 and CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
+ // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
+ // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
+
+#ifdef DEBUG
+ static bool initialized_;
+#endif
static uint64_t supported_;
- static uint64_t enabled_;
static uint64_t found_by_runtime_probing_;
+
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
-class Assembler : public Malloced {
+class Assembler : public AssemblerBase {
private:
// We check before assembling an instruction that there is sufficient
// space to write an instruction and its relocation information.
@@ -501,9 +525,12 @@ class Assembler : public Malloced {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- Assembler(void* buffer, int buffer_size);
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -613,6 +640,7 @@ class Assembler : public Malloced {
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
+ void push(Handle<Object> handle);
void pop(Register dst);
void pop(const Operand& dst);
@@ -649,7 +677,7 @@ class Assembler : public Malloced {
// Move sign extended immediate to memory location.
void movq(const Operand& dst, Immediate value);
- // New x64 instructions to load a 64-bit immediate into a register.
+ // Instructions to load a 64-bit immediate into a register.
// All 64-bit immediates must have a relocation mode.
void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
@@ -674,7 +702,7 @@ class Assembler : public Malloced {
void repmovsl();
void repmovsq();
- // New x64 instruction to load from an immediate 64-bit pointer into RAX.
+ // Instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
void load_rax(ExternalReference ext);
@@ -1109,6 +1137,7 @@ class Assembler : public Malloced {
// Miscellaneous
void clc();
+ void cld();
void cpuid();
void hlt();
void int3();
@@ -1134,12 +1163,13 @@ class Assembler : public Malloced {
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
- void bind(NearLabel* L);
// Calls
// Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
- void call(Handle<Code> target, RelocInfo::Mode rmode);
+ void call(Handle<Code> target,
+ RelocInfo::Mode rmode,
+ unsigned ast_id = kNoASTId);
// Calls directly to the given address using a relative offset.
// Should only ever be used in Code objects for calls within the
@@ -1156,7 +1186,8 @@ class Assembler : public Malloced {
// Jumps
// Jump short or near relative.
// Use a 32-bit signed displacement.
- void jmp(Label* L); // unconditional jump to L
+ // Unconditional jump to L
+ void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(Handle<Code> target, RelocInfo::Mode rmode);
// Jump near absolute indirect (r64)
@@ -1165,16 +1196,12 @@ class Assembler : public Malloced {
// Jump near absolute indirect (m64)
void jmp(const Operand& src);
- // Short jump
- void jmp(NearLabel* L);
-
// Conditional jumps
- void j(Condition cc, Label* L);
+ void j(Condition cc,
+ Label* L,
+ Label::Distance distance = Label::kFar);
void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
- // Conditional short jump
- void j(Condition cc, NearLabel* L, Hint hint = no_hint);
-
// Floating-point operations
void fld(int i);
@@ -1247,15 +1274,24 @@ class Assembler : public Malloced {
void movd(Register dst, XMMRegister src);
void movq(XMMRegister dst, Register src);
void movq(Register dst, XMMRegister src);
+ void movq(XMMRegister dst, XMMRegister src);
void extractps(Register dst, XMMRegister src, byte imm8);
- void movsd(const Operand& dst, XMMRegister src);
+ // Don't use this unless it's important to keep the
+ // top half of the destination register unchanged.
+ // Used movaps when moving double values and movq for integer
+ // values in xmm registers.
void movsd(XMMRegister dst, XMMRegister src);
+
+ void movsd(const Operand& dst, XMMRegister src);
void movsd(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
+ void movapd(XMMRegister dst, XMMRegister src);
+ void movaps(XMMRegister dst, XMMRegister src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
@@ -1287,11 +1323,21 @@ class Assembler : public Malloced {
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
+ void xorps(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
+ enum RoundingMode {
+ kRoundToNearest = 0x0,
+ kRoundDown = 0x1,
+ kRoundUp = 0x2,
+ kRoundToZero = 0x3
+ };
+
+ void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
void movmskpd(Register dst, XMMRegister src);
// The first argument is the reg field, the second argument is the r/m field.
@@ -1343,6 +1389,9 @@ class Assembler : public Malloced {
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
+ protected:
+ bool emit_debug_code() const { return emit_debug_code_; }
+
private:
byte* addr_at(int pos) { return buffer_ + pos; }
byte byte_at(int pos) { return buffer_[pos]; }
@@ -1361,7 +1410,9 @@ class Assembler : public Malloced {
inline void emitl(uint32_t x);
inline void emitq(uint64_t x, RelocInfo::Mode rmode);
inline void emitw(uint16_t x);
- inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode);
+ inline void emit_code_target(Handle<Code> target,
+ RelocInfo::Mode rmode,
+ unsigned ast_id = kNoASTId);
void emit(Immediate x) { emitl(x.value_); }
// Emits a REX prefix that encodes a 64-bit operand size and
@@ -1536,18 +1587,17 @@ class Assembler : public Malloced {
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
- // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
- static byte* spare_buffer_;
// code generation
byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
List< Handle<Code> > code_targets_;
- // push-pop elimination
- byte* last_pc_;
PositionsRecorder positions_recorder_;
+
+ bool emit_debug_code_;
+
friend class PositionsRecorder;
};
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index a2dd6cd42..076398906 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_X64)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
@@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
__ addq(rax, Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id), 1);
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
}
@@ -96,9 +96,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// rax: number of arguments
__ bind(&non_function_call);
// Set expected number of arguments to zero (not changing rax).
- __ movq(rbx, Immediate(0));
+ __ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ __ SetCallKind(rcx, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
@@ -127,7 +128,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address();
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
__ movq(kScratchRegister, debug_step_in_fp);
__ cmpq(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
@@ -339,14 +340,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Call the function.
if (is_api_function) {
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- Handle<Code> code = Handle<Code>(
- Builtins::builtin(Builtins::HandleApiCallConstruct));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
// Restore context from the frame.
@@ -360,8 +362,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ JumpIfSmi(rax, &use_receiver);
// If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &exit);
// Throw away the result of the constructor invocation and use the
@@ -379,7 +382,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ push(rcx);
- __ IncrementCounter(&Counters::constructed_objects, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->constructed_objects(), 1);
__ ret(0);
}
@@ -492,12 +496,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code.
if (is_construct) {
// Expects rdi to hold function pointer.
- __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+ __ Call(masm->isolate()->builtins()->JSConstructCall(),
RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(rax);
// Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
// Exit the JS frame. Notice that this also removes the empty
@@ -525,17 +530,23 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Push a copy of the function onto the stack.
__ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
__ push(rdi); // Function is also the parameter to the runtime call.
__ CallRuntime(Runtime::kLazyCompile, 1);
+
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore receiver.
__ pop(rdi);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
- __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rcx);
+ __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rax);
}
@@ -545,17 +556,23 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Push a copy of the function onto the stack.
__ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
__ push(rdi); // Function is also the parameter to the runtime call.
__ CallRuntime(Runtime::kLazyRecompile, 1);
- // Restore function and tear down temporary frame.
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore function.
__ pop(rdi);
+
+ // Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
- __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rcx);
+ __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rax);
}
@@ -575,15 +592,15 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
// Switch on the state.
- NearLabel not_no_registers, not_tos_rax;
+ Label not_no_registers, not_tos_rax;
__ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
- __ j(not_equal, &not_no_registers);
+ __ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movq(rax, Operand(rsp, 2 * kPointerSize));
__ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
- __ j(not_equal, &not_tos_rax);
+ __ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
__ bind(&not_tos_rax);
@@ -630,7 +647,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ testq(rax, rax);
__ j(not_zero, &done);
__ pop(rbx);
- __ Push(Factory::undefined_value());
+ __ Push(FACTORY->undefined_value());
__ push(rbx);
__ incq(rax);
__ bind(&done);
@@ -657,19 +674,24 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &shift_arguments);
+ // Do not transform the receiver for natives.
+ // SharedFunctionInfo is already loaded into rbx.
+ __ testb(FieldOperand(rbx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_zero, &shift_arguments);
+
// Compute the receiver in non-strict mode.
__ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
- __ JumpIfSmi(rbx, &convert_to_object);
+ __ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &use_global_receiver);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &convert_to_object);
- __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
- __ j(below_equal, &shift_arguments);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &shift_arguments);
__ bind(&convert_to_object);
__ EnterInternalFrame(); // In order to preserve argument count.
@@ -685,7 +707,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ LeaveInternalFrame();
// Restore the function to rdi.
__ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ jmp(&patch_receiver);
+ __ jmp(&patch_receiver, Label::kNear);
// Use the global receiver object from the called function as the
// receiver.
@@ -733,7 +755,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(not_zero, &function);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ __ SetCallKind(rcx, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
}
@@ -746,13 +769,15 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
__ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ SetCallKind(rcx, CALL_AS_METHOD);
__ cmpq(rax, rbx);
__ j(not_equal,
- Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
ParameterCount expected(0);
- __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
+ __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
@@ -821,8 +846,13 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &push_receiver);
+ // Do not transform the receiver for natives.
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &push_receiver);
+
// Compute the receiver in non-strict mode.
- __ JumpIfSmi(rbx, &call_to_object);
+ __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
@@ -830,17 +860,16 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// If given receiver is already a JavaScript object then there's no
// reason for converting it.
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &call_to_object);
- __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
- __ j(below_equal, &push_receiver);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &push_receiver);
// Convert the receiver to an object.
__ bind(&call_to_object);
__ push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ movq(rbx, rax);
- __ jmp(&push_receiver);
+ __ jmp(&push_receiver, Label::kNear);
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
@@ -863,7 +892,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Initialize();
__ Call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
@@ -886,7 +916,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
ParameterCount actual(rax);
__ SmiToInteger32(rax, rax);
__ movq(rdi, Operand(rbp, kFunctionOffset));
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
__ LeaveInternalFrame();
__ ret(3 * kPointerSize); // remove function, receiver, and arguments
@@ -935,7 +966,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// scratch2: start of next object
__ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
__ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- Factory::empty_fixed_array());
+ FACTORY->empty_fixed_array());
// Field JSArray::kElementsOffset is initialized later.
__ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
@@ -943,7 +974,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// fixed array.
if (initial_capacity == 0) {
__ Move(FieldOperand(result, JSArray::kElementsOffset),
- Factory::empty_fixed_array());
+ FACTORY->empty_fixed_array());
return;
}
@@ -960,7 +991,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// scratch1: elements array
// scratch2: start of next object
__ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- Factory::fixed_array_map());
+ FACTORY->fixed_array_map());
__ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
Smi::FromInt(initial_capacity));
@@ -968,7 +999,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
- __ Move(scratch3, Factory::the_hole_value());
+ __ Move(scratch3, FACTORY->the_hole_value());
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
@@ -1052,7 +1083,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// array_size: size of array (smi)
__ bind(&allocated);
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, Factory::empty_fixed_array());
+ __ Move(elements_array, FACTORY->empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
@@ -1071,7 +1102,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// elements_array_end: start of next object
// array_size: size of array (smi)
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- Factory::fixed_array_map());
+ FACTORY->fixed_array_map());
Label not_empty_2, fill_array;
__ SmiTest(array_size);
__ j(not_zero, &not_empty_2);
@@ -1092,7 +1123,7 @@ static void AllocateJSArray(MacroAssembler* masm,
__ bind(&fill_array);
if (fill_with_hole) {
Label loop, entry;
- __ Move(scratch, Factory::the_hole_value());
+ __ Move(scratch, FACTORY->the_hole_value());
__ lea(elements_array, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ jmp(&entry);
@@ -1137,7 +1168,8 @@ static void ArrayNativeCode(MacroAssembler* masm,
r8,
kPreallocatedArrayElements,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->array_function_native(), 1);
__ movq(rax, rbx);
__ ret(kPointerSize);
@@ -1168,7 +1200,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r9,
true,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ __ IncrementCounter(counters->array_function_native(), 1);
__ movq(rax, rbx);
__ ret(2 * kPointerSize);
@@ -1190,7 +1222,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r9,
false,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ __ IncrementCounter(counters->array_function_native(), 1);
// rax: argc
// rbx: JSArray
@@ -1248,7 +1280,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rdi);
if (FLAG_debug_code) {
- // Initial map for the builtin Array function shoud be a map.
+ // Initial map for the builtin Array functions should be maps.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
ASSERT(kSmiTag == 0);
@@ -1264,8 +1296,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Jump to the generic array code in case the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
- Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
- Handle<Code> array_code(code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->ArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
@@ -1280,11 +1312,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
Label generic_constructor;
if (FLAG_debug_code) {
- // The array construct code is only set for the builtin Array function which
- // does always have a map.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rbx);
- __ cmpq(rdi, rbx);
- __ Check(equal, "Unexpected Array function");
+ // The array construct code is only set for the builtin and internal
+ // Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
@@ -1301,8 +1330,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
- Handle<Code> generic_construct_stub(code);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
}
@@ -1324,11 +1353,11 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Push the function on the stack.
__ push(rdi);
- // Preserve the number of arguments on the stack. Must preserve both
- // rax and rbx because these registers are used when copying the
+ // Preserve the number of arguments on the stack. Must preserve rax,
+ // rbx and rcx because these registers are used when copying the
// arguments and the receiver.
- __ Integer32ToSmi(rcx, rax);
- __ push(rcx);
+ __ Integer32ToSmi(r8, rax);
+ __ push(r8);
}
@@ -1352,11 +1381,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : actual number of arguments
// -- rbx : expected number of arguments
+ // -- rcx : call kind information
// -- rdx : code entry to call
// -----------------------------------
Label invoke, dont_adapt_arguments;
- __ IncrementCounter(&Counters::arguments_adaptors, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->arguments_adaptors(), 1);
Label enough, too_few;
__ cmpq(rax, rbx);
@@ -1371,14 +1402,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
- __ movq(rcx, Immediate(-1)); // account for receiver
+ __ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(rcx);
+ __ incq(r8);
__ push(Operand(rax, 0));
__ subq(rax, Immediate(kPointerSize));
- __ cmpq(rcx, rbx);
+ __ cmpq(r8, rbx);
__ j(less, &copy);
__ jmp(&invoke);
}
@@ -1390,23 +1421,23 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
- __ movq(rcx, Immediate(-1)); // account for receiver
+ __ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(rcx);
+ __ incq(r8);
__ push(Operand(rdi, 0));
__ subq(rdi, Immediate(kPointerSize));
- __ cmpq(rcx, rax);
+ __ cmpq(r8, rax);
__ j(less, &copy);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
- __ incq(rcx);
+ __ incq(r8);
__ push(kScratchRegister);
- __ cmpq(rcx, rbx);
+ __ cmpq(r8, rbx);
__ j(less, &fill);
// Restore function pointer.
@@ -1455,17 +1486,17 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
- NearLabel skip;
+ Label skip;
__ SmiCompare(rax, Smi::FromInt(-1));
- __ j(not_equal, &skip);
+ __ j(not_equal, &skip, Label::kNear);
__ ret(0);
// If we decide not to perform on-stack replacement we perform a
// stack guard check to enable interrupts.
__ bind(&stack_check);
- NearLabel ok;
+ Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
+ __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ TailCallStub(&stub);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 9da22bd67..81514d1e9 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -40,15 +40,15 @@ namespace internal {
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in eax.
- NearLabel check_heap_number, call_builtin;
+ Label check_heap_number, call_builtin;
__ SmiTest(rax);
- __ j(not_zero, &check_heap_number);
+ __ j(not_zero, &check_heap_number, Label::kNear);
__ Ret();
__ bind(&check_heap_number);
- __ Move(rbx, Factory::heap_number_map());
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &call_builtin);
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_builtin, Label::kNear);
__ Ret();
__ bind(&call_builtin);
@@ -68,11 +68,15 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Get the function info from the stack.
__ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ int map_index = strict_mode_ == kStrictMode
+ ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+ : Context::FUNCTION_MAP_INDEX;
+
// Compute the function map in the current global context and set that
// as the map of the allocated object.
__ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
- __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index)));
__ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
// Initialize the rest of the function. We don't have to update the
@@ -104,7 +108,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ pop(rdx);
__ push(rsi);
__ push(rdx);
- __ Push(Factory::false_value());
+ __ PushRoot(Heap::kFalseValueRootIndex);
__ push(rcx); // Restore return address.
__ TailCallRuntime(Runtime::kNewClosure, 3, 1);
}
@@ -121,18 +125,17 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ movq(rcx, Operand(rsp, 1 * kPointerSize));
// Setup the object header.
- __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
+ __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
// Setup the fixed slots.
__ Set(rbx, 0); // Set to NULL.
__ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
__ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
- // Copy the global object from the surrounding context.
+ // Copy the global object from the previous context.
__ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
@@ -148,7 +151,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+ __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
}
@@ -227,212 +230,71 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
+// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
- NearLabel false_result, true_result, not_string;
+ Label false_result, true_result, not_string;
+ const Register map = rdx;
+
__ movq(rax, Operand(rsp, 1 * kPointerSize));
- // 'null' => false.
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ // undefined -> false
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, &false_result);
- // Get the map and type of the heap object.
- // We don't use CmpObjectType because we manipulate the type field.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
+ // Boolean -> its value
+ __ CompareRoot(rax, Heap::kFalseValueRootIndex);
+ __ j(equal, &false_result);
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ __ j(equal, &true_result);
- // Undetectable => false.
- __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
- __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &false_result);
+ // Smis: 0 -> false, all other -> true
+ __ Cmp(rax, Smi::FromInt(0));
+ __ j(equal, &false_result);
+ __ JumpIfSmi(rax, &true_result);
+
+ // 'null' -> false.
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, &false_result, Label::kNear);
- // JavaScript object => true.
- __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
- __ j(above_equal, &true_result);
+ // Get the map of the heap object.
+ __ movq(map, FieldOperand(rax, HeapObject::kMapOffset));
- // String value => false iff empty.
- __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
- __ j(above_equal, &not_string);
- __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rdx);
- __ j(zero, &false_result);
- __ jmp(&true_result);
+ // Undetectable -> false.
+ __ testb(FieldOperand(map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, &false_result, Label::kNear);
+
+ // JavaScript object -> true.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &true_result, Label::kNear);
+
+ // String value -> false iff empty.
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &not_string, Label::kNear);
+ __ cmpq(FieldOperand(rax, String::kLengthOffset), Immediate(0));
+ __ j(zero, &false_result, Label::kNear);
+ __ jmp(&true_result, Label::kNear);
__ bind(&not_string);
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &true_result);
- // HeapNumber => false iff +0, -0, or NaN.
+ // HeapNumber -> false iff +0, -0, or NaN.
// These three cases set the zero flag when compared to zero using ucomisd.
- __ xorpd(xmm0, xmm0);
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &true_result, Label::kNear);
+ __ xorps(xmm0, xmm0);
__ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ j(zero, &false_result);
+ __ j(zero, &false_result, Label::kNear);
// Fall through to |true_result|.
- // Return 1/0 for true/false in rax.
+ // Return 1/0 for true/false in tos_.
__ bind(&true_result);
- __ movq(rax, Immediate(1));
+ __ Set(tos_, 1);
__ ret(1 * kPointerSize);
__ bind(&false_result);
- __ Set(rax, 0);
+ __ Set(tos_, 0);
__ ret(1 * kPointerSize);
}
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ movq(right_arg, right);
- } else if (right.is(right_arg)) {
- __ movq(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ movq(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ movq(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ movq(right_arg, right);
- __ movq(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ Push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (left.is(left_arg)) {
- __ Move(right_arg, right);
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ Move(left_arg, right);
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ movq(left_arg, left);
- __ Move(right_arg, right);
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ Push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (right.is(right_arg)) {
- __ Move(left_arg, left);
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ Move(right_arg, left);
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ movq(right_arg, right);
- __ Move(left_arg, left);
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
class FloatingPointHelper : public AllStatic {
public:
// Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
@@ -452,486 +314,198 @@ class FloatingPointHelper : public AllStatic {
// As above, but we know the operands to be numbers. In that case,
// conversion can't fail.
static void LoadNumbersAsIntegers(MacroAssembler* masm);
+
+ // Tries to convert two values to smis losslessly.
+ // This fails if either argument is not a Smi nor a HeapNumber,
+ // or if it's a HeapNumber with a value that can't be converted
+ // losslessly to a Smi. In that case, control transitions to the
+ // on_not_smis label.
+ // On success, either control goes to the on_success label (if one is
+ // provided), or it falls through at the end of the code (if on_success
+ // is NULL).
+ // On success, both first and second holds Smi tagged values.
+ // One of first or second must be non-Smi when entering.
+ static void NumbersToSmis(MacroAssembler* masm,
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* on_success,
+ Label* on_not_smis);
};
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
- // dividend in rax and rdx free for the division. Use rax, rbx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = rdx;
- Register right = rax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = rax;
- right = rbx;
- if (HasArgsInRegisters()) {
- __ movq(rbx, rax);
- __ movq(rax, rdx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ movq(right, Operand(rsp, 1 * kPointerSize));
- __ movq(left, Operand(rsp, 2 * kPointerSize));
- }
+// Get the integer part of a heap number.
+// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
+void IntegerConvert(MacroAssembler* masm,
+ Register result,
+ Register source) {
+ // Result may be rcx. If result and source are the same register, source will
+ // be overwritten.
+ ASSERT(!result.is(rdi) && !result.is(rbx));
+ // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
+ // cvttsd2si (32-bit version) directly.
+ Register double_exponent = rbx;
+ Register double_value = rdi;
+ Label done, exponent_63_plus;
+ // Get double and extract exponent.
+ __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
+ // Clear result preemptively, in case we need to return zero.
+ __ xorl(result, result);
+ __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
+ // Double to remove sign bit, shift exponent down to least significant bits.
+ // and subtract bias to get the unshifted, unbiased exponent.
+ __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
+ __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
+ __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
+ // Check whether the exponent is too big for a 63 bit unsigned integer.
+ __ cmpl(double_exponent, Immediate(63));
+ __ j(above_equal, &exponent_63_plus, Label::kNear);
+ // Handle exponent range 0..62.
+ __ cvttsd2siq(result, xmm0);
+ __ jmp(&done, Label::kNear);
- Label not_smis;
- // 2. Smi check both operands.
- if (static_operands_type_.IsSmi()) {
- // Skip smi check if we know that both arguments are smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- // Handle OR here, since we do extra smi-checking in the or code below.
- __ SmiOr(right, right, left);
- GenerateReturn(masm);
- return;
- }
+ __ bind(&exponent_63_plus);
+ // Exponent negative or 63+.
+ __ cmpl(double_exponent, Immediate(83));
+ // If exponent negative or above 83, number contains no significant bits in
+ // the range 0..2^31, so result is zero, and rcx already holds zero.
+ __ j(above, &done, Label::kNear);
+
+ // Exponent in rage 63..83.
+ // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
+ // the least significant exponent-52 bits.
+
+ // Negate low bits of mantissa if value is negative.
+ __ addq(double_value, double_value); // Move sign bit to carry.
+ __ sbbl(result, result); // And convert carry to -1 in result register.
+ // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
+ __ addl(double_value, result);
+ // Do xor in opposite directions depending on where we want the result
+ // (depending on whether result is rcx or not).
+
+ if (result.is(rcx)) {
+ __ xorl(double_value, result);
+ // Left shift mantissa by (exponent - mantissabits - 1) to save the
+ // bits that have positional values below 2^32 (the extra -1 comes from the
+ // doubling done above to move the sign bit into the carry flag).
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(double_value);
+ __ movl(result, double_value);
} else {
- if (op_ != Token::BIT_OR) {
- // Skip the check for OR as it is better combined with the
- // actual operation.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, &not_smis);
- }
+ // As the then-branch, but move double-value to result before shifting.
+ __ xorl(result, double_value);
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(result);
}
- // 3. Operands are both smis (except for OR), perform the operation leaving
- // the result in rax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::ADD: {
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
- }
+ __ bind(&done);
+}
- case Token::SUB: {
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
- }
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operand_type_) {
+ case UnaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
break;
-
- case Token::DIV:
- ASSERT(left.is(rax));
- __ SmiDiv(left, left, right, &use_fp_on_smis);
+ case UnaryOpIC::SMI:
+ GenerateSmiStub(masm);
break;
-
- case Token::MOD:
- ASSERT(left.is(rax));
- __ SmiMod(left, left, right, slow);
+ case UnaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
break;
-
- case Token::BIT_OR:
- ASSERT(right.is(rax));
- __ movq(rcx, right); // Save the right operand.
- __ SmiOr(right, right, left); // BIT_OR is commutative.
- __ testb(right, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smis);
+ case UnaryOpIC::GENERIC:
+ GenerateGenericStub(masm);
break;
+ }
+}
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ __ pop(rcx); // Save return address.
+ __ push(rax);
+ // Left and right arguments are now on top.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ Push(Smi::FromInt(MinorKey()));
+ __ Push(Smi::FromInt(op_));
+ __ Push(Smi::FromInt(operand_type_));
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- switch (op_) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- break;
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, slow);
- break;
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- break;
- default:
- UNREACHABLE();
- }
- __ movq(rax, left);
- break;
+ __ push(rcx); // Push return address.
- default:
- UNREACHABLE();
- break;
- }
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
+ masm->isolate()),
+ 4,
+ 1);
+}
- // 4. Emit return of result in rax.
- GenerateReturn(masm);
- // 5. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
switch (op_) {
- case Token::ADD:
case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- ASSERT(use_fp_on_smis.is_linked());
- __ bind(&use_fp_on_smis);
- if (op_ == Token::DIV) {
- __ movq(rdx, rax);
- __ movq(rax, rbx);
- }
- // left is rdx, right is rax.
- __ AllocateHeapNumber(rbx, rcx, slow);
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rbx);
- GenerateReturn(masm);
- }
- default:
- break;
- }
-
- // 6. Non-smi operands, fall out to the non-smi code with the operands in
- // rdx and rax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
-
- switch (op_) {
- case Token::DIV:
- case Token::MOD:
- // Operands are in rax, rbx at this point.
- __ movq(rdx, rax);
- __ movq(rax, rbx);
+ GenerateSmiStubSub(masm);
break;
-
- case Token::BIT_OR:
- // Right operand is saved in rcx and rax was destroyed by the smi
- // operation.
- __ movq(rax, rcx);
+ case Token::BIT_NOT:
+ GenerateSmiStubBitNot(masm);
break;
-
default:
- break;
+ UNREACHABLE();
}
}
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+ Label slow;
+ GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) {
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
- Label not_floats;
- // rax: y
- // rdx: x
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadSSE2NumberOperands(masm);
- } else {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
- }
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+ Label non_smi;
+ GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+ __ bind(&non_smi);
+ GenerateTypeTransition(masm);
+}
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Allocate a heap number, if needed.
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT:
- __ JumpIfNotSmi(rdx, &skip_allocation);
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- __ movq(rax, rdx);
- break;
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- __ bind(&not_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // A perfect moment to try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label skip_allocation, non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadNumbersAsIntegers(masm);
- } else {
- FloatingPointHelper::LoadAsIntegers(masm,
- &call_runtime,
- heap_number_map);
- }
- switch (op_) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- GenerateReturn(masm);
-
- // All bit-ops except SHR return a signed int32 that can be
- // returned immediately as a smi.
- // We might need to allocate a HeapNumber if we shift a negative
- // number right by zero (i.e., convert to UInt32).
- if (op_ == Token::SHR) {
- ASSERT(non_smi_shr_result.is_linked());
- __ bind(&non_smi_shr_result);
- // Allocate a heap number if needed.
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ AllocateInNewSpace(HeapNumber::kSize,
- rax,
- rcx,
- no_reg,
- &call_runtime,
- TAG_OBJECT);
- // Set the map.
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- }
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+ Label* non_smi,
+ Label* slow,
+ Label::Distance non_smi_near,
+ Label::Distance slow_near) {
+ Label done;
+ __ JumpIfNotSmi(rax, non_smi, non_smi_near);
+ __ SmiNeg(rax, rax, &done, Label::kNear);
+ __ jmp(slow, slow_near);
+ __ bind(&done);
+ __ ret(0);
+}
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
- __ bind(&call_runtime);
+void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+ Label* non_smi,
+ Label::Distance non_smi_near) {
+ __ JumpIfNotSmi(rax, non_smi, non_smi_near);
+ __ SmiNot(rax, rax);
+ __ ret(0);
+}
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
switch (op_) {
- case Token::ADD: {
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
-
- if (HasArgsReversed()) {
- lhs = rax;
- rhs = rdx;
- } else {
- lhs = rdx;
- rhs = rax;
- }
-
- // Test for string arguments before calling runtime.
- Label not_strings, both_strings, not_string1, string1, string1_smi2;
-
- // If this stub has already generated FP-specific code then the arguments
- // are already in rdx and rax.
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
-
- Condition is_smi;
- is_smi = masm->CheckSmi(lhs);
- __ j(is_smi, &not_string1);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &not_string1);
-
- // First argument is a a string, test second.
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &string1_smi2);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string1);
-
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, rhs, rbx, rcx, r8, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ movq(Operand(rsp, 1 * kPointerSize), rbx);
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
- // First argument was not a string, test second.
- __ bind(&not_string1);
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &not_strings);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
- __ j(above_equal, &not_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
- __ bind(&not_strings);
- // Neither argument is a string.
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ GenerateHeapNumberStubSub(masm);
break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ case Token::BIT_NOT:
+ GenerateHeapNumberStubBitNot(masm);
break;
default:
UNREACHABLE();
@@ -939,83 +513,163 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- ASSERT(!HasArgsInRegisters());
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+ Label non_smi, slow, call_builtin;
+ GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+ __ bind(&call_builtin);
+ GenerateGenericCodeFallback(masm);
}
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
- } else {
- __ ret(0);
- }
+void UnaryOpStub::GenerateHeapNumberStubBitNot(
+ MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
}
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(rcx);
- if (HasArgsReversed()) {
- __ push(rax);
- __ push(rdx);
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+ Label* slow) {
+ // Check if the operand is a heap number.
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, slow);
+
+ // Operand is a float, negate its value by flipping the sign bit.
+ if (mode_ == UNARY_OVERWRITE) {
+ __ Set(kScratchRegister, 0x01);
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister);
} else {
- __ push(rdx);
+ // Allocate a heap number before calculating the answer,
+ // so we don't have an untagged double around during GC.
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
__ push(rax);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ movq(rcx, rax);
+ __ pop(rax);
+ __ LeaveInternalFrame();
+ __ bind(&heapnumber_allocated);
+ // rcx: allocated 'empty' number
+
+ // Copy the double value to the new heap number, flipping the sign.
+ __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Set(kScratchRegister, 0x01);
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(rdx, kScratchRegister); // Flip sign.
+ __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+ __ movq(rax, rcx);
}
- __ push(rcx);
+ __ ret(0);
}
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
+ Label* slow) {
+ // Check if the operand is a heap number.
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, slow);
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
+ // Convert the heap number in rax to an untagged integer in rcx.
+ IntegerConvert(masm, rax, rax);
- // Left and right arguments are already on stack.
- __ pop(rcx); // Save the return address.
+ // Do the bitwise operation and smi tag the result.
+ __ notl(rax);
+ __ Integer32ToSmi(rax, rax);
+ __ ret(0);
+}
- // Push this stub's key.
- __ Push(Smi::FromInt(MinorKey()));
- // Although the operation and the type info are encoded into the key,
- // the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(op_));
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateGenericStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateGenericStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
- __ Push(Smi::FromInt(runtime_operands_type_));
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
+}
- __ push(rcx); // The return address.
- // Perform patching to an appropriate fast case and return the result.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
}
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
+void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ pop(rcx); // pop return address
+ __ push(rax);
+ __ push(rcx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
- TRBinaryOpIC::TypeInfo type_info,
- TRBinaryOpIC::TypeInfo result_type_info) {
- TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
+const char* UnaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name = NULL; // Make g++ happy.
+ switch (mode_) {
+ case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "UnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ UnaryOpIC::GetName(operand_type_));
+ return name_;
}
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(rcx); // Save return address.
__ push(rdx);
__ push(rax);
@@ -1031,35 +685,39 @@ void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+ masm->isolate()),
5,
1);
}
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
switch (operands_type_) {
- case TRBinaryOpIC::UNINITIALIZED:
+ case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
break;
- case TRBinaryOpIC::SMI:
+ case BinaryOpIC::SMI:
GenerateSmiStub(masm);
break;
- case TRBinaryOpIC::INT32:
+ case BinaryOpIC::INT32:
UNREACHABLE();
// The int32 case is identical to the Smi case. We avoid creating this
// ic state on x64.
break;
- case TRBinaryOpIC::HEAP_NUMBER:
+ case BinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm);
break;
- case TRBinaryOpIC::ODDBALL:
+ case BinaryOpIC::ODDBALL:
GenerateOddballStub(masm);
break;
- case TRBinaryOpIC::STRING:
+ case BinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
+ case BinaryOpIC::STRING:
GenerateStringStub(masm);
break;
- case TRBinaryOpIC::GENERIC:
+ case BinaryOpIC::GENERIC:
GenerateGeneric(masm);
break;
default:
@@ -1068,10 +726,11 @@ void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
}
-const char* TypeRecordingBinaryOpStub::GetName() {
+const char* BinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
@@ -1083,41 +742,43 @@ const char* TypeRecordingBinaryOpStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "TypeRecordingBinaryOpStub_%s_%s_%s",
+ "BinaryOpStub_%s_%s_%s",
op_name,
overwrite_name,
- TRBinaryOpIC::GetName(operands_type_));
+ BinaryOpIC::GetName(operands_type_));
return name_;
}
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+ MacroAssembler* masm,
Label* slow,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ // Arguments to BinaryOpStub are in rdx and rax.
+ Register left = rdx;
+ Register right = rax;
+
// We only generate heapnumber answers for overflowing calculations
- // for the four basic arithmetic operations.
+ // for the four basic arithmetic operations and logical right shift by 0.
bool generate_inline_heapnumber_results =
(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
(op_ == Token::ADD || op_ == Token::SUB ||
- op_ == Token::MUL || op_ == Token::DIV);
-
- // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
- Register left = rdx;
- Register right = rax;
-
+ op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
// Smi check of both operands. If op is BIT_OR, the check is delayed
// until after the OR operation.
Label not_smis;
Label use_fp_on_smis;
- Label restore_MOD_registers; // Only used if op_ == Token::MOD.
+ Label fail;
if (op_ != Token::BIT_OR) {
Comment smi_check_comment(masm, "-- Smi check arguments");
__ JumpIfNotBothSmi(left, right, &not_smis);
}
+ Label smi_values;
+ __ bind(&smi_values);
// Perform the operation.
Comment perform_smi(masm, "-- Perform smi operation");
switch (op_) {
@@ -1156,9 +817,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
case Token::BIT_OR: {
ASSERT(right.is(rax));
- __ movq(rcx, right); // Save the right operand.
- __ SmiOr(right, right, left); // BIT_OR is commutative.
- __ JumpIfNotSmi(right, &not_smis); // Test delayed until after BIT_OR.
+ __ SmiOrIfSmis(right, right, left, &not_smis); // BIT_OR is commutative.
break;
}
case Token::BIT_XOR:
@@ -1182,7 +841,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
break;
case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, &not_smis);
+ __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
__ movq(rax, left);
break;
@@ -1193,48 +852,58 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
// 5. Emit return of result in rax. Some operations have registers pushed.
__ ret(0);
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- __ bind(&use_fp_on_smis);
- if (op_ == Token::DIV || op_ == Token::MOD) {
- // Restore left and right to rdx and rax.
- __ movq(rdx, rcx);
- __ movq(rax, rbx);
- }
-
+ if (use_fp_on_smis.is_linked()) {
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ __ bind(&use_fp_on_smis);
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ // Restore left and right to rdx and rax.
+ __ movq(rdx, rcx);
+ __ movq(rax, rbx);
+ }
- if (generate_inline_heapnumber_results) {
- __ AllocateHeapNumber(rcx, rbx, slow);
- Comment perform_float(masm, "-- Perform float operation on smis");
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
+ if (generate_inline_heapnumber_results) {
+ __ AllocateHeapNumber(rcx, rbx, slow);
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ if (op_ == Token::SHR) {
+ __ SmiToInteger32(left, left);
+ __ cvtqsi2sd(xmm0, left);
+ } else {
+ FloatingPointHelper::LoadSSE2SmiOperands(masm);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ }
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rcx);
+ __ ret(0);
+ } else {
+ __ jmp(&fail);
}
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
}
// 7. Non-smi operands reach the end of the code generated by
// GenerateSmiCode, and fall through to subsequent code,
// with the operands in rdx and rax.
- Comment done_comment(masm, "-- Enter non-smi code");
+ // But first we check if non-smi values are HeapNumbers holding
+ // values that could be smi.
__ bind(&not_smis);
- if (op_ == Token::BIT_OR) {
- __ movq(right, rcx);
- }
+ Comment done_comment(masm, "-- Enter non-smi code");
+ FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
+ &smi_values, &fail);
+ __ jmp(&smi_values);
+ __ bind(&fail);
}
-void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
- MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure) {
+void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure) {
switch (op_) {
case Token::ADD:
case Token::SUB:
@@ -1303,7 +972,7 @@ void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
// already loaded heap_number_map.
__ AllocateInNewSpace(HeapNumber::kSize,
rax,
- rcx,
+ rdx,
no_reg,
&allocation_failed,
TAG_OBJECT);
@@ -1323,7 +992,7 @@ void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
// We need tagged values in rdx and rax for the following code,
// not int32 in rax and rcx.
__ Integer32ToSmi(rax, rcx);
- __ Integer32ToSmi(rdx, rax);
+ __ Integer32ToSmi(rdx, rbx);
__ jmp(allocation_failure);
}
break;
@@ -1333,32 +1002,32 @@ void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
// No fall-through from this generated code.
if (FLAG_debug_code) {
__ Abort("Unexpected fall-through in "
- "TypeRecordingBinaryStub::GenerateFloatingPointCode.");
+ "BinaryStub::GenerateFloatingPointCode.");
}
}
-void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
+void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
- NearLabel left_not_string, call_runtime;
+ Label left_not_string, call_runtime;
// Registers containing left and right operands respectively.
Register left = rdx;
Register right = rax;
// Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string);
+ __ JumpIfSmi(left, &left_not_string, Label::kNear);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &left_not_string);
+ __ j(above_equal, &left_not_string, Label::kNear);
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
// Left operand is not a string, test right.
__ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
+ __ JumpIfSmi(right, &call_runtime, Label::kNear);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
+ __ j(above_equal, &call_runtime, Label::kNear);
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
@@ -1369,7 +1038,7 @@ void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
+void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
GenerateRegisterArgsPush(masm);
switch (op_) {
case Token::ADD:
@@ -1411,27 +1080,70 @@ void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label not_smi;
-
- GenerateSmiCode(masm, &not_smi, NO_HEAPNUMBER_RESULTS);
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label call_runtime;
+ if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+ result_type_ == BinaryOpIC::SMI) {
+ // Only allow smi results.
+ GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+ } else {
+ // Allow heap number result and don't make a transition if a heap number
+ // cannot be allocated.
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ }
- __ bind(&not_smi);
+ // Code falls through if the result is not returned as either a smi or heap
+ // number.
GenerateTypeTransition(masm);
+
+ if (call_runtime.is_linked()) {
+ __ bind(&call_runtime);
+ GenerateCallRuntimeCode(masm);
+ }
}
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
GenerateStringAddCode(masm);
// Try to add arguments as strings, otherwise, transition to the generic
- // TRBinaryOpIC type.
+ // BinaryOpIC type.
+ GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = rdx;
+ Register right = rax;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &call_runtime);
+
+ // Test if right operand is a string.
+ __ JumpIfSmi(right, &call_runtime);
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &call_runtime);
+
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
GenerateTypeTransition(masm);
}
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
Label call_runtime;
if (op_ == Token::ADD) {
@@ -1441,18 +1153,18 @@ void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
// Convert oddball arguments to numbers.
- NearLabel check, done;
+ Label check, done;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check);
+ __ j(not_equal, &check, Label::kNear);
if (Token::IsBitOp(op_)) {
__ xor_(rdx, rdx);
} else {
__ LoadRoot(rdx, Heap::kNanValueRootIndex);
}
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&check);
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
if (Token::IsBitOp(op_)) {
__ xor_(rax, rax);
} else {
@@ -1464,7 +1176,7 @@ void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label gc_required, not_number;
GenerateFloatingPointCode(masm, &gc_required, &not_number);
@@ -1476,7 +1188,7 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime, call_string_add_or_runtime;
GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
@@ -1493,9 +1205,8 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Label* alloc_failure) {
+void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure) {
Label skip_allocation;
OverwriteMode mode = mode_;
switch (mode) {
@@ -1533,7 +1244,7 @@ void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
}
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
__ pop(rcx);
__ push(rdx);
__ push(rax);
@@ -1560,11 +1271,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
Label skip_cache;
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
- NearLabel input_not_smi;
- NearLabel loaded;
+ Label input_not_smi, loaded;
// Test that rax is a number.
__ movq(rax, Operand(rsp, kPointerSize));
- __ JumpIfNotSmi(rax, &input_not_smi);
+ __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the bits of the double into rbx.
__ SmiToInteger32(rax, rax);
@@ -1575,7 +1285,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ movq(rdx, xmm1);
__ fld_d(Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&loaded);
+ __ jmp(&loaded, Label::kNear);
__ bind(&input_not_smi);
// Check if input is a HeapNumber.
@@ -1614,15 +1324,18 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ xorl(rcx, rdx);
__ xorl(rax, rdi);
__ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
// ST[0] == double value.
// rbx = bits of double value.
// rcx = TranscendentalCache::hash(double value).
- __ movq(rax, ExternalReference::transcendental_cache_array_address());
- // rax points to cache array.
- __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ ExternalReference cache_array =
+ ExternalReference::transcendental_cache_array_address(masm->isolate());
+ __ movq(rax, cache_array);
+ int cache_array_index =
+ type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
+ __ movq(rax, Operand(rax, cache_array_index));
// rax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ testq(rax, rax);
@@ -1630,7 +1343,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
{ // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::Element test_elem[2];
+ TranscendentalCache::SubCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
@@ -1647,9 +1360,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ addl(rcx, rcx);
__ lea(rcx, Operand(rax, rcx, times_8, 0));
// Check if cache matches: Double value is stored in uint32_t[2] array.
- NearLabel cache_miss;
+ Label cache_miss;
__ cmpq(rbx, Operand(rcx, 0));
- __ j(not_equal, &cache_miss);
+ __ j(not_equal, &cache_miss, Label::kNear);
// Cache hit!
__ movq(rax, Operand(rcx, 2 * kIntSize));
if (tagged) {
@@ -1703,7 +1416,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&runtime_call_clear_stack);
__ fstp(0);
__ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+ __ TailCallExternalReference(
+ ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
} else { // UNTAGGED.
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
@@ -1756,8 +1470,8 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ j(below, &in_range);
// Check for infinity and NaN. Both return NaN for sin.
__ cmpl(rdi, Immediate(0x7ff));
- NearLabel non_nan_result;
- __ j(not_equal, &non_nan_result);
+ Label non_nan_result;
+ __ j(not_equal, &non_nan_result, Label::kNear);
// Input is +/-Infinity or NaN. Result is NaN.
__ fstp(0);
__ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
@@ -1785,7 +1499,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// Compute st(0) % st(1)
{
- NearLabel partial_remainder_loop;
+ Label partial_remainder_loop;
__ bind(&partial_remainder_loop);
__ fprem1();
__ fwait();
@@ -1822,74 +1536,6 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
}
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- NearLabel done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done);
-
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done);
-
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
-
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
-
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
- }
-
- __ bind(&done);
-}
-
-
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
@@ -1937,7 +1583,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ bind(&check_undefined_arg1);
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, conversion_failure);
- __ movl(r8, Immediate(0));
+ __ Set(r8, 0);
__ jmp(&load_arg2);
__ bind(&arg1_is_object);
@@ -1957,7 +1603,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ bind(&check_undefined_arg2);
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(not_equal, conversion_failure);
- __ movl(rcx, Immediate(0));
+ __ Set(rcx, 0);
__ jmp(&done);
__ bind(&arg2_is_object);
@@ -2032,87 +1678,57 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
}
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- if (op_ == Token::SUB) {
- if (include_smi_code_) {
- // Check whether the value is a smi.
- Label try_float;
- __ JumpIfNotSmi(rax, &try_float);
- if (negative_zero_ == kIgnoreNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(equal, &done);
- }
- __ SmiNeg(rax, rax, &done);
- __ jmp(&slow); // zero, if not handled above, and Smi::kMinValue.
-
- // Try floating point case.
- __ bind(&try_float);
- } else if (FLAG_debug_code) {
- __ AbortIfSmi(rax);
- }
-
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
- // Operand is a float, negate its value by flipping sign bit.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(kScratchRegister, Immediate(0x01));
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- // rdx is value to store.
- if (overwrite_ == UNARY_OVERWRITE) {
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
- } else {
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // rcx: allocated 'empty' number
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
- }
- } else if (op_ == Token::BIT_NOT) {
- if (include_smi_code_) {
- Label try_float;
- __ JumpIfNotSmi(rax, &try_float);
- __ SmiNot(rax, rax);
- __ jmp(&done);
- // Try floating point case.
- __ bind(&try_float);
- } else if (FLAG_debug_code) {
- __ AbortIfSmi(rax);
- }
-
- // Check if the operand is a heap number.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
-
- // Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, rax);
+void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* on_success,
+ Label* on_not_smis) {
+ Register heap_number_map = scratch3;
+ Register smi_result = scratch1;
+ Label done;
- // Do the bitwise operation and smi tag the result.
- __ notl(rax);
- __ Integer32ToSmi(rax, rax);
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ Label first_smi;
+ __ JumpIfSmi(first, &first_smi, Label::kNear);
+ __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, on_not_smis);
+ // Convert HeapNumber to smi if possible.
+ __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
+ __ movq(scratch2, xmm0);
+ __ cvttsd2siq(smi_result, xmm0);
+ // Check if conversion was successful by converting back and
+ // comparing to the original double's bits.
+ __ cvtlsi2sd(xmm1, smi_result);
+ __ movq(kScratchRegister, xmm1);
+ __ cmpq(scratch2, kScratchRegister);
+ __ j(not_equal, on_not_smis);
+ __ Integer32ToSmi(first, smi_result);
+
+ __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
+ __ bind(&first_smi);
+ if (FLAG_debug_code) {
+ // Second should be non-smi if we get here.
+ __ AbortIfSmi(second);
}
-
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ pop(rcx); // pop return address
- __ push(rax);
- __ push(rcx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
+ __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, on_not_smis);
+ // Convert second to smi, if possible.
+ __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
+ __ movq(scratch2, xmm0);
+ __ cvttsd2siq(smi_result, xmm0);
+ __ cvtlsi2sd(xmm1, smi_result);
+ __ movq(kScratchRegister, xmm1);
+ __ cmpq(scratch2, kScratchRegister);
+ __ j(not_equal, on_not_smis);
+ __ Integer32ToSmi(second, smi_result);
+ if (on_success != NULL) {
+ __ jmp(on_success);
+ } else {
+ __ bind(&done);
}
}
@@ -2130,7 +1746,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movq(rax, Operand(rsp, 1 * kPointerSize));
// Save 1 in xmm3 - we need this several times later on.
- __ movl(rcx, Immediate(1));
+ __ Set(rcx, 1);
__ cvtlsi2sd(xmm3, rcx);
Label exponent_nonsmi;
@@ -2162,20 +1778,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movq(rdx, rax);
// Get absolute value of exponent.
- NearLabel no_neg;
+ Label no_neg;
__ cmpl(rax, Immediate(0));
- __ j(greater_equal, &no_neg);
+ __ j(greater_equal, &no_neg, Label::kNear);
__ negl(rax);
__ bind(&no_neg);
// Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- NearLabel while_true;
- NearLabel no_multiply;
+ __ movaps(xmm1, xmm3);
+ Label while_true;
+ Label no_multiply;
__ bind(&while_true);
__ shrl(rax, Immediate(1));
- __ j(not_carry, &no_multiply);
+ __ j(not_carry, &no_multiply, Label::kNear);
__ mulsd(xmm1, xmm0);
__ bind(&no_multiply);
__ mulsd(xmm0, xmm0);
@@ -2187,8 +1803,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(positive, &allocate_return);
// Special case if xmm1 has reached infinity.
__ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ xorpd(xmm0, xmm0);
+ __ movaps(xmm1, xmm3);
+ __ xorps(xmm0, xmm0);
__ ucomisd(xmm0, xmm1);
__ j(equal, &call_runtime);
@@ -2205,12 +1821,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ucomisd(xmm1, xmm1);
__ j(parity_even, &call_runtime);
- NearLabel base_not_smi;
- NearLabel handle_special_cases;
- __ JumpIfNotSmi(rdx, &base_not_smi);
+ Label base_not_smi, handle_special_cases;
+ __ JumpIfNotSmi(rdx, &base_not_smi, Label::kNear);
__ SmiToInteger32(rdx, rdx);
__ cvtlsi2sd(xmm0, rdx);
- __ jmp(&handle_special_cases);
+ __ jmp(&handle_special_cases, Label::kNear);
__ bind(&base_not_smi);
__ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
@@ -2225,22 +1840,22 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// base is in xmm0 and exponent is in xmm1.
__ bind(&handle_special_cases);
- NearLabel not_minus_half;
+ Label not_minus_half;
// Test for -0.5.
// Load xmm2 with -0.5.
__ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
__ movq(xmm2, rcx);
// xmm2 now has -0.5.
__ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half);
+ __ j(not_equal, &not_minus_half, Label::kNear);
// Calculates reciprocal of square root.
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
+ __ xorps(xmm1, xmm1);
__ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
__ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
+ __ movaps(xmm1, xmm3);
__ jmp(&allocate_return);
// Test for 0.5.
@@ -2253,8 +1868,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &call_runtime);
// Calculates square root.
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
+ __ xorps(xmm1, xmm1);
+ __ addsd(xmm1, xmm0); // Convert -0 to 0.
__ sqrtsd(xmm1, xmm1);
__ bind(&allocate_return);
@@ -2280,11 +1895,14 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label slow;
__ JumpIfNotSmi(rdx, &slow);
- // Check if the calling frame is an arguments adaptor frame.
+ // Check if the calling frame is an arguments adaptor frame. We look at the
+ // context offset, and if the frame is not a regular one, then we find a
+ // Smi instead of the context. We can't use SmiCompare here, because that
+ // only works for comparing two smis.
Label adaptor;
__ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor);
// Check index against formal parameters count limit passed in
@@ -2325,103 +1943,340 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // Stack layout:
+ // rsp[0] : return address
+ // rsp[8] : number of parameters (tagged)
+ // rsp[16] : receiver displacement
+ // rsp[24] : function
+ // Registers used over the whole function:
+ // rbx: the mapped parameter count (untagged)
+ // rax: the allocated object (tagged).
+
+ Factory* factory = masm->isolate()->factory();
+
+ __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
+ // rbx = parameter count (untagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+ __ movq(rcx, rbx);
+ __ jmp(&try_allocate, Label::kNear);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ SmiToInteger64(rcx,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+ // rbx = parameter count (untagged)
+ // rcx = argument count (untagged)
+ // Compute the mapped parameter count = min(rbx, rcx) in rbx.
+ __ cmpq(rbx, rcx);
+ __ j(less_equal, &try_allocate, Label::kNear);
+ __ movq(rbx, rcx);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ Label no_parameter_map;
+ __ testq(rbx, rbx);
+ __ j(zero, &no_parameter_map, Label::kNear);
+ __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ bind(&no_parameter_map);
+
+ // 2. Backing store.
+ __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
+
+ // rax = address of new object(s) (tagged)
+ // rcx = argument count (untagged)
+ // Get the arguments boilerplate from the current (global) context into rdi.
+ Label has_mapped_parameters, copy;
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ testq(rbx, rbx);
+ __ j(not_zero, &has_mapped_parameters, Label::kNear);
+
+ const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
+ __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
+ __ jmp(&copy, Label::kNear);
+
+ const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
+ __ bind(&has_mapped_parameters);
+ __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
+ __ bind(&copy);
+
+ // rax = address of new object (tagged)
+ // rbx = mapped parameter count (untagged)
+ // rcx = argument count (untagged)
+ // rdi = address of boilerplate object (tagged)
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ movq(rdx, FieldOperand(rdi, i));
+ __ movq(FieldOperand(rax, i), rdx);
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize),
+ rdx);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ // Note: rcx is tagged from here on.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ Integer32ToSmi(rcx, rcx);
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ rcx);
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, edi will point there, otherwise to the
+ // backing store.
+ __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+
+ // rax = address of new object (tagged)
+ // rbx = mapped parameter count (untagged)
+ // rcx = argument count (tagged)
+ // rdi = address of parameter map or backing store (tagged)
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ testq(rbx, rbx);
+ __ j(zero, &skip_parameter_map);
+
+ __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ // rbx contains the untagged argument count. Add 2 and tag to write.
+ __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ Integer64PlusConstantToSmi(r9, rbx, 2);
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
+ __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+
+ // Load tagged parameter count into r9.
+ __ movq(r9, Operand(rsp, 1 * kPointerSize));
+ __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
+ __ addq(r8, Operand(rsp, 3 * kPointerSize));
+ __ subq(r8, r9);
+ __ Move(r11, factory->the_hole_value());
+ __ movq(rdx, rdi);
+ __ SmiToInteger64(kScratchRegister, r9);
+ __ lea(rdi, Operand(rdi, kScratchRegister,
+ times_pointer_size,
+ kParameterMapHeaderSize));
+ // r9 = loop variable (tagged)
+ // r8 = mapping index (tagged)
+ // r11 = the hole value
+ // rdx = address of parameter map (tagged)
+ // rdi = address of backing store (tagged)
+ __ jmp(&parameters_test, Label::kNear);
+
+ __ bind(&parameters_loop);
+ __ SmiSubConstant(r9, r9, Smi::FromInt(1));
+ __ SmiToInteger64(kScratchRegister, r9);
+ __ movq(FieldOperand(rdx, kScratchRegister,
+ times_pointer_size,
+ kParameterMapHeaderSize),
+ r8);
+ __ movq(FieldOperand(rdi, kScratchRegister,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ r11);
+ __ SmiAddConstant(r8, r8, Smi::FromInt(1));
+ __ bind(&parameters_test);
+ __ SmiTest(r9);
+ __ j(not_zero, &parameters_loop, Label::kNear);
+
+ __ bind(&skip_parameter_map);
+
+ // rcx = argument count (tagged)
+ // rdi = address of backing store (tagged)
+ // Copy arguments header and remaining slots (if there are any).
+ __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
+ factory->fixed_array_map());
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+
+ Label arguments_loop, arguments_test;
+ __ movq(r8, rbx);
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ // Untag rcx and r8 for the loop below.
+ __ SmiToInteger64(rcx, rcx);
+ __ SmiToInteger64(r8, r8);
+ __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
+ __ subq(rdx, kScratchRegister);
+ __ jmp(&arguments_test, Label::kNear);
+
+ __ bind(&arguments_loop);
+ __ subq(rdx, Immediate(kPointerSize));
+ __ movq(r9, Operand(rdx, 0));
+ __ movq(FieldOperand(rdi, r8,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ r9);
+ __ addq(r8, Immediate(1));
+
+ __ bind(&arguments_test);
+ __ cmpq(r8, rcx);
+ __ j(less, &arguments_loop, Label::kNear);
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ // rcx = argument count (untagged)
+ __ bind(&runtime);
+ __ Integer32ToSmi(rcx, rcx);
+ __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[8] : number of parameters
+ // esp[16] : receiver displacement
+ // esp[24] : function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ SmiToInteger64(rcx, rcx);
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// rsp[0] : return address
// rsp[8] : number of parameters
// rsp[16] : receiver displacement
// rsp[24] : function
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
-
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// Get the length from the frame.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger64(rcx, rcx);
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
- __ SmiToInteger32(rcx,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- // Space on stack must already hold a smi.
- __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
- // Do not clobber the length index for the indexing operation since
- // it is used compute the size for allocation later.
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
+ __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ SmiToInteger64(rcx, rcx);
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
__ movq(Operand(rsp, 2 * kPointerSize), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ testl(rcx, rcx);
- __ j(zero, &add_arguments_object);
- __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ testq(rcx, rcx);
+ __ j(zero, &add_arguments_object, Label::kNear);
+ __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
+ __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
// Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
__ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ const int offset =
+ Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
__ movq(rdi, Operand(rdi, offset));
// Copy the JS object part.
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
- __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
- __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
- __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
- __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
- __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
-
- // Setup the callee in-object property.
- ASSERT(Heap::arguments_callee_index == 0);
- __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rdi, i));
+ __ movq(FieldOperand(rax, i), rbx);
+ }
// Get the length (smi tagged) and set that as an in-object property too.
- ASSERT(Heap::arguments_length_index == 1);
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ movq(rcx, Operand(rsp, 1 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ rcx);
// If there are no actual arguments, we're done.
Label done;
- __ SmiTest(rcx);
+ __ testq(rcx, rcx);
__ j(zero, &done);
- // Get the parameters pointer from the stack and untag the length.
+ // Get the parameters pointer from the stack.
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
__ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+
+
__ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
- __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
+ // Untag the length for the loop below.
+ __ SmiToInteger64(rcx, rcx);
// Copy the fixed array slots.
Label loop;
__ bind(&loop);
- __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
+ __ movq(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
__ addq(rdi, Immediate(kPointerSize));
__ subq(rdx, Immediate(kPointerSize));
- __ decl(rcx);
+ __ decq(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
@@ -2430,7 +2285,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
@@ -2459,14 +2314,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static const int kJSRegExpOffset = 4 * kPointerSize;
Label runtime;
-
// Ensure that a RegExp stack is allocated.
+ Isolate* isolate = masm->isolate();
ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference::address_of_regexp_stack_memory_address(isolate);
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
__ testq(kScratchRegister, kScratchRegister);
__ j(zero, &runtime);
@@ -2477,32 +2331,32 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
- Condition is_smi = masm->CheckSmi(rcx);
+ Condition is_smi = masm->CheckSmi(rax);
__ Check(NegateCondition(is_smi),
"Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
+ __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
__ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
}
- // rcx: RegExp data (FixedArray)
+ // rax: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
+ __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
__ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
__ j(not_equal, &runtime);
- // rcx: RegExp data (FixedArray)
+ // rax: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer.
__ SmiToInteger32(rdx,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
__ leal(rdx, Operand(rdx, rdx, times_1, 2));
// Check that the static offsets vector buffer is large enough.
__ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
__ j(above, &runtime);
- // rcx: RegExp data (FixedArray)
+ // rax: RegExp data (FixedArray)
// rdx: Number of capture registers
// Check that the second argument is a string.
__ movq(rdi, Operand(rsp, kSubjectOffset));
@@ -2530,7 +2384,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the JSArray is in fast case.
__ movq(rbx, FieldOperand(rdi, JSArray::kElementsOffset));
__ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ Cmp(rdi, Factory::fixed_array_map());
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add.
@@ -2542,7 +2397,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rax: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
- NearLabel seq_ascii_string, seq_two_byte_string, check_code;
+ Label seq_ascii_string, seq_two_byte_string, check_code;
__ movq(rdi, Operand(rsp, kSubjectOffset));
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
@@ -2550,10 +2405,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ andb(rbx, Immediate(
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
+ __ j(zero, &seq_two_byte_string, Label::kNear);
// Any other flat string must be a flat ascii string.
__ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_ascii_string);
+ __ j(zero, &seq_ascii_string, Label::kNear);
// Check for flat cons string.
// A flat cons string is a cons string where the second part is the empty
@@ -2565,8 +2420,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
__ j(not_zero, &runtime);
// String is a cons string.
- __ movq(rdx, FieldOperand(rdi, ConsString::kSecondOffset));
- __ Cmp(rdx, Factory::empty_string());
+ __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
__ j(not_equal, &runtime);
__ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
@@ -2577,7 +2432,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
Immediate(kStringRepresentationMask | kStringEncodingMask));
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
+ __ j(zero, &seq_two_byte_string, Label::kNear);
// Any other flat string must be ascii.
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
Immediate(kStringRepresentationMask));
@@ -2588,7 +2443,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rax: RegExp data (FixedArray)
__ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
__ Set(rcx, 1); // Type is ascii.
- __ jmp(&check_code);
+ __ jmp(&check_code, Label::kNear);
__ bind(&seq_two_byte_string);
// rdi: subject string (flat two-byte)
@@ -2615,15 +2470,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: encoding of subject string (1 if ascii 0 if two_byte);
// r11: code
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->regexp_entry_native(), 1);
- static const int kRegExpExecuteArguments = 7;
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ static const int kRegExpExecuteArguments = 8;
int argument_slots_on_stack =
masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
- __ EnterApiExitFrame(argument_slots_on_stack); // Clobbers rax!
+ __ EnterApiExitFrame(argument_slots_on_stack);
- // Argument 7: Indicate that this is a direct call from JavaScript.
+ // Argument 8: Pass current isolate address.
+ // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ // Immediate(ExternalReference::isolate_address()));
+ __ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
__ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ kScratchRegister);
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
Immediate(1));
// Argument 6: Start (high end) of backtracking stack memory area.
@@ -2633,14 +2497,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ addq(r9, Operand(kScratchRegister, 0));
// Argument 6 passed in r9 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
#endif
// Argument 5: static offsets vector buffer.
- __ movq(r8, ExternalReference::address_of_static_offsets_vector());
+ __ LoadAddress(r8,
+ ExternalReference::address_of_static_offsets_vector(isolate));
// Argument 5 passed in r8 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), r8);
#endif
// First four arguments are passed in registers on both Linux and Windows.
@@ -2664,13 +2529,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 4: End of string data
// Argument 3: Start of string data
- NearLabel setup_two_byte, setup_rest;
+ Label setup_two_byte, setup_rest;
__ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
- __ j(zero, &setup_two_byte);
+ __ j(zero, &setup_two_byte, Label::kNear);
__ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
__ lea(arg4, FieldOperand(rdi, rcx, times_1, SeqAsciiString::kHeaderSize));
__ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
- __ jmp(&setup_rest);
+ __ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
__ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
__ lea(arg4, FieldOperand(rdi, rcx, times_2, SeqTwoByteString::kHeaderSize));
@@ -2681,7 +2546,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movq(arg2, rbx);
// Argument 1: Subject string.
-#ifdef WIN64_
+#ifdef _WIN64
__ movq(arg1, rdi);
#else
// Already there in AMD64 calling convention.
@@ -2695,10 +2560,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ LeaveApiExitFrame();
// Check the result.
- NearLabel success;
+ Label success;
Label exception;
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
- __ j(equal, &success);
+ __ j(equal, &success, Label::kNear);
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
__ j(equal, &exception);
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
@@ -2741,17 +2606,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
// Get the static offsets vector filled by the native regexp code.
- __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
+ __ LoadAddress(rcx,
+ ExternalReference::address_of_static_offsets_vector(isolate));
// rbx: last_match_info backing store (FixedArray)
// rcx: offsets vector
// rdx: number of capture registers
- NearLabel next_capture, done;
+ Label next_capture, done;
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
__ subq(rdx, Immediate(1));
- __ j(negative, &done);
+ __ j(negative, &done, Label::kNear);
// Read the value from the static offsets vector buffer and make it a smi.
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
__ Integer32ToSmi(rdi, rdi);
@@ -2773,17 +2639,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(rbx, pending_exception_address);
- __ movq(rax, Operand(rbx, 0));
+ ExternalReference pending_exception_address(
+ Isolate::k_pending_exception_address, isolate);
+ Operand pending_exception_operand =
+ masm->ExternalOperand(pending_exception_address, rbx);
+ __ movq(rax, pending_exception_operand);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
__ cmpq(rax, rdx);
__ j(equal, &runtime);
- __ movq(Operand(rbx, 0), rdx);
+ __ movq(pending_exception_operand, rdx);
__ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- NearLabel termination_exception;
- __ j(equal, &termination_exception);
+ Label termination_exception;
+ __ j(equal, &termination_exception, Label::kNear);
__ Throw(rax);
__ bind(&termination_exception);
@@ -2830,8 +2698,8 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
// Set empty properties FixedArray.
- __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
- Factory::empty_fixed_array());
+ __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
+ __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
// Set elements to point to FixedArray allocated right after the JSArray.
__ lea(rcx, Operand(rax, JSRegExpResult::kSize));
@@ -2851,13 +2719,13 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// rbx: Number of elements in array as int32.
// Set map.
- __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
- Factory::fixed_array_map());
+ __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
+ __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
// Set length.
__ Integer32ToSmi(rdx, rbx);
__ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
// Fill contents of fixed-array with the-hole.
- __ Move(rdx, Factory::the_hole_value());
+ __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
__ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
// Fill fixed array elements with hole.
// rax: JSArray.
@@ -2908,9 +2776,13 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Heap::GetNumberStringCache.
Label is_smi;
Label load_result_from_cache;
+ Factory* factory = masm->isolate()->factory();
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
- __ CheckMap(object, Factory::heap_number_map(), not_found, true);
+ __ CheckMap(object,
+ factory->heap_number_map(),
+ not_found,
+ DONT_DO_SMI_CHECK);
STATIC_ASSERT(8 == kDoubleSize);
__ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
@@ -2925,8 +2797,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
times_1,
FixedArray::kHeaderSize));
__ JumpIfSmi(probe, not_found);
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope fscope(SSE2);
__ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
__ ucomisd(xmm0, xmm1);
@@ -2955,7 +2825,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
index,
times_1,
FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->number_to_string_native(), 1);
}
@@ -2998,6 +2869,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
Label check_unequal_objects, done;
+ Factory* factory = masm->isolate()->factory();
// Compare two smis if required.
if (include_smi_compare_) {
@@ -3025,40 +2897,39 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Two identical objects are equal unless they are both NaN or undefined.
{
- NearLabel not_identical;
+ Label not_identical;
__ cmpq(rax, rdx);
- __ j(not_equal, &not_identical);
+ __ j(not_equal, &not_identical, Label::kNear);
if (cc_ != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
- NearLabel check_for_nan;
+ Label check_for_nan;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check_for_nan);
+ __ j(not_equal, &check_for_nan, Label::kNear);
__ Set(rax, NegativeComparisonResult(cc_));
__ ret(0);
__ bind(&check_for_nan);
}
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
// so we do the second best thing - test it ourselves.
// Note: if cc_ != equal, never_nan_nan_ is not used.
// We cannot set rax to EQUAL until just before return because
// rax must be unchanged on jump to not_identical.
-
if (never_nan_nan_ && (cc_ == equal)) {
__ Set(rax, EQUAL);
__ ret(0);
} else {
- NearLabel heap_number;
+ Label heap_number;
// If it's not a heap number, then return equal for (in)equality operator.
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(equal, &heap_number);
+ factory->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, &not_identical);
+ // Call runtime on identical objects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &not_identical, Label::kNear);
}
__ Set(rax, EQUAL);
__ ret(0);
@@ -3098,7 +2969,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Check if the non-smi operand is a heap number.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory->heap_number_map());
// If heap number, handle it in the slow case.
__ j(equal, &slow);
// Return non-equal. ebx (the lower half of rbx) is not zero.
@@ -3113,10 +2984,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
// There is no test for undetectability in strict equality.
// If the first object is a JS object, we have done pointer comparison.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- NearLabel first_non_object;
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &first_non_object);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ Label first_non_object;
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(below, &first_non_object, Label::kNear);
// Return non-zero (eax (not rax) is not zero)
Label return_not_equal;
STATIC_ASSERT(kHeapObjectTag != 0);
@@ -3128,7 +2999,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ CmpInstanceType(rcx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -3143,14 +3014,14 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Generate the number comparison code.
if (include_number_compare_) {
Label non_number_comparison;
- NearLabel unordered;
+ Label unordered;
FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
__ xorl(rax, rax);
__ xorl(rcx, rcx);
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered);
+ __ j(parity_even, &unordered, Label::kNear);
// Return a result of -1, 0, or 1, based on EFLAGS.
__ setcc(above, rax);
__ setcc(below, rcx);
@@ -3190,13 +3061,21 @@ void CompareStub::Generate(MacroAssembler* masm) {
rdx, rax, rcx, rbx, &check_unequal_objects);
// Inline comparison of ascii strings.
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ if (cc_ == equal) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm,
rdx,
rax,
rcx,
- rbx,
- rdi,
- r8);
+ rbx);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ rdx,
+ rax,
+ rcx,
+ rbx,
+ rdi,
+ r8);
+ }
#ifdef DEBUG
__ Abort("Unexpected fall-through from string comparison");
@@ -3207,7 +3086,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- NearLabel not_both_objects, return_unequal;
+ Label not_both_objects, return_unequal;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
@@ -3215,17 +3094,17 @@ void CompareStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagMask == 1);
__ lea(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
- __ j(below, &not_both_objects);
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &not_both_objects);
+ __ j(not_zero, &not_both_objects, Label::kNear);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
+ __ j(below, &not_both_objects, Label::kNear);
+ __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(below, &not_both_objects, Label::kNear);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
+ __ j(zero, &return_unequal, Label::kNear);
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
+ __ j(zero, &return_unequal, Label::kNear);
// The objects are both undetectable, so they both compare as the value
// undefined, and are equal.
__ Set(rax, EQUAL);
@@ -3283,30 +3162,22 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
+ // The receiver might implicitly be the global object. This is
+ // indicated by passing the hole as the receiver to the call
+ // function stub.
+ if (ReceiverMightBeImplicit()) {
+ Label call;
// Get the receiver from the stack.
// +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
__ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ JumpIfSmi(rax, &receiver_is_value);
-
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
- __ j(above_equal, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
-
- __ bind(&receiver_is_js_object);
+ // Call as function is indicated with the hole.
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &call, Label::kNear);
+ // Patch the receiver on the stack with the global receiver object.
+ __ movq(rbx, GlobalObjectOperand());
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rbx);
+ __ bind(&call);
}
// Get the function to call from the stack.
@@ -3321,7 +3192,23 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Fast-case: Just invoke the function.
ParameterCount actual(argc_);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+
+ if (ReceiverMightBeImplicit()) {
+ Label call_as_function;
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &call_as_function);
+ __ InvokeFunction(rdi,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_METHOD);
+ __ bind(&call_as_function);
+ }
+ __ InvokeFunction(rdi,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_FUNCTION);
// Slow-case: Non-function called.
__ bind(&slow);
@@ -3331,11 +3218,17 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ Set(rax, argc_);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ Handle<Code> adaptor =
+ Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
+bool CEntryStub::NeedsImmovableCode() {
+ return false;
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Throw exception in eax.
__ Throw(rax);
@@ -3353,7 +3246,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// rbp: frame pointer (restored after C call).
// rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r12: pointer to the first argument (C callee-saved).
+ // r15: pointer to the first argument (C callee-saved).
// This pointer is reused in LeaveExitFrame(), so it is stored in a
// callee-saved register.
@@ -3383,10 +3276,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
+ ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
if (always_allocate_scope) {
- __ movq(kScratchRegister, scope_depth);
- __ incl(Operand(kScratchRegister, 0));
+ Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
+ __ incl(scope_depth_operand);
}
// Call C function.
@@ -3394,30 +3287,33 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
// Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
__ movq(StackSpaceOperand(0), r14); // argc.
- __ movq(StackSpaceOperand(1), r12); // argv.
+ __ movq(StackSpaceOperand(1), r15); // argv.
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
__ lea(rcx, StackSpaceOperand(0));
+ __ LoadAddress(rdx, ExternalReference::isolate_address());
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
__ lea(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
__ lea(rdx, StackSpaceOperand(0));
+ __ LoadAddress(r8, ExternalReference::isolate_address());
}
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
- __ movq(rsi, r12); // argv.
+ __ movq(rsi, r15); // argv.
+ __ movq(rdx, ExternalReference::isolate_address());
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
if (always_allocate_scope) {
- __ movq(kScratchRegister, scope_depth);
- __ decl(Operand(kScratchRegister, 0));
+ Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
+ __ decl(scope_depth_operand);
}
// Check for failure result.
@@ -3446,11 +3342,11 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Handling of failure.
__ bind(&failure_returned);
- NearLabel retry;
+ Label retry;
// If the returned exception is RETRY_AFTER_GC continue at retry label
STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
__ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry);
+ __ j(zero, &retry, Label::kNear);
// Special handling of out of memory exceptions.
__ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
@@ -3458,12 +3354,13 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(equal, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(kScratchRegister, pending_exception_address);
- __ movq(rax, Operand(kScratchRegister, 0));
- __ movq(rdx, ExternalReference::the_hole_value_location());
- __ movq(rdx, Operand(rdx, 0));
- __ movq(Operand(kScratchRegister, 0), rdx);
+ ExternalReference pending_exception_address(
+ Isolate::k_pending_exception_address, masm->isolate());
+ Operand pending_exception_operand =
+ masm->ExternalOperand(pending_exception_address);
+ __ movq(rax, pending_exception_operand);
+ __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ movq(pending_exception_operand, rdx);
// Special handling of termination exceptions which are uncatchable
// by javascript code.
@@ -3514,7 +3411,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// rbp: frame pointer of exit frame (restored after C call).
// rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r12: argv pointer (C callee-saved).
+ // r15: argv pointer (C callee-saved).
Label throw_normal_exception;
Label throw_termination_exception;
@@ -3562,54 +3459,64 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
#ifdef ENABLE_LOGGING_AND_PROFILING
Label not_outermost_js, not_outermost_js_2;
#endif
-
- // Setup frame.
- __ push(rbp);
- __ movq(rbp, rsp);
-
- // Push the stack frame type marker twice.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Scratch register is neither callee-save, nor an argument register on any
- // platform. It's free to use at this point.
- // Cannot use smi-register for loading yet.
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE);
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
+ { // NOLINT. Scope block confuses linter.
+ MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
+ // Setup frame.
+ __ push(rbp);
+ __ movq(rbp, rsp);
+
+ // Push the stack frame type marker twice.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ // Scratch register is neither callee-save, nor an argument register on any
+ // platform. It's free to use at this point.
+ // Cannot use smi-register for loading yet.
+ __ movq(kScratchRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
+ RelocInfo::NONE);
+ __ push(kScratchRegister); // context slot
+ __ push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/Win64 calling conventions).
+ __ push(r12);
+ __ push(r13);
+ __ push(r14);
+ __ push(r15);
#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
#endif
- __ push(rbx);
- // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
- // callee save as well.
+ __ push(rbx);
+ // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
+ // callee save as well.
+
+ // Set up the roots and smi constant registers.
+ // Needs to be done before any further smi loads.
+ __ InitializeSmiConstantRegister();
+ __ InitializeRootRegister();
+ }
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
- __ load_rax(c_entry_fp);
- __ push(rax);
+ Isolate* isolate = masm->isolate();
- // Set up the roots and smi constant registers.
- // Needs to be done before any further smi loads.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(kRootRegister, roots_address);
- __ InitializeSmiConstantRegister();
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, isolate);
+ {
+ Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
+ __ push(c_entry_fp_operand);
+ }
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
- __ load_rax(js_entry_sp);
+ ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
+ __ Load(rax, js_entry_sp);
__ testq(rax, rax);
__ j(not_zero, &not_outermost_js);
+ __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ movq(rax, rbp);
- __ store_rax(js_entry_sp);
+ __ Store(js_entry_sp, rax);
+ Label cont;
+ __ jmp(&cont);
__ bind(&not_outermost_js);
+ __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
+ __ bind(&cont);
#endif
// Call a faked try-block that does the invoke.
@@ -3617,8 +3524,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Caught exception: Store result (exception) in the pending
// exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ store_rax(pending_exception);
+ ExternalReference pending_exception(Isolate::k_pending_exception_address,
+ isolate);
+ __ Store(pending_exception, rax);
__ movq(rax, Failure::Exception(), RelocInfo::NONE);
__ jmp(&exit);
@@ -3627,8 +3535,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
// Clear any pending exceptions.
- __ load_rax(ExternalReference::the_hole_value_location());
- __ store_rax(pending_exception);
+ __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ Store(pending_exception, rax);
// Fake a receiver (NULL).
__ push(Immediate(0)); // receiver
@@ -3639,35 +3547,34 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// directly in the code, because the builtin stubs may not have been
// generated yet at the time this code is generated.
if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ load_rax(construct_entry);
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+ isolate);
+ __ Load(rax, construct_entry);
} else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ load_rax(entry);
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
+ __ Load(rax, entry);
}
__ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
__ call(kScratchRegister);
// Unlink this frame from the handler chain.
- __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
- __ pop(Operand(kScratchRegister, 0));
- // Pop next_sp.
- __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ __ PopTryHandler();
+ __ bind(&exit);
#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current RBP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ movq(kScratchRegister, js_entry_sp);
- __ cmpq(rbp, Operand(kScratchRegister, 0));
+ // Check if the current stack frame is marked as the outermost JS frame.
+ __ pop(rbx);
+ __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
+ __ movq(kScratchRegister, js_entry_sp);
__ movq(Operand(kScratchRegister, 0), Immediate(0));
__ bind(&not_outermost_js_2);
#endif
// Restore the top frame descriptor from the stack.
- __ bind(&exit);
- __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
- __ pop(Operand(kScratchRegister, 0));
+ { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
+ __ pop(c_entry_fp_operand);
+ }
// Restore callee-saved registers (X64 conventions).
__ pop(rbx);
@@ -3690,88 +3597,159 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
void InstanceofStub::Generate(MacroAssembler* masm) {
// Implements "value instanceof function" operator.
- // Expected input state:
+ // Expected input state with no inline cache:
// rsp[0] : return address
// rsp[1] : function pointer
// rsp[2] : value
+ // Expected input state with an inline one-element cache:
+ // rsp[0] : return address
+ // rsp[1] : offset from return address to location of inline cache
+ // rsp[2] : function pointer
+ // rsp[3] : value
// Returns a bitwise zero to indicate that the value
// is and instance of the function and anything else to
// indicate that the value is not an instance.
- // None of the flags are supported on X64.
- ASSERT(flags_ == kNoFlags);
+ static const int kOffsetToMapCheckValue = 2;
+ static const int kOffsetToResultValue = 18;
+ // The last 4 bytes of the instruction sequence
+ // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
+ // Move(kScratchRegister, FACTORY->the_hole_value())
+ // in front of the hole value address.
+ static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
+ // The last 4 bytes of the instruction sequence
+ // __ j(not_equal, &cache_miss);
+ // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
+ // before the offset of the hole value in the root array.
+ static const unsigned int kWordBeforeResultValue = 0x458B4909;
+ // Only the inline check flag is supported on X64.
+ ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
+ int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
// Get the object - go slow case if it's a smi.
Label slow;
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
+
+ __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
__ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
__ j(below, &slow);
- __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
__ j(above, &slow);
// Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
// rdx is function, rax is map.
- // Look up the function and the map in the instanceof cache.
- NearLabel miss;
- __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss);
- __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
+ // If there is a call site cache don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ j(not_equal, &miss, Label::kNear);
+ __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+ __ j(not_equal, &miss, Label::kNear);
+ __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+ __ bind(&miss);
+ }
- __ bind(&miss);
__ TryGetFunctionPrototype(rdx, rbx, &slow);
// Check that the function prototype is a JS object.
__ JumpIfSmi(rbx, &slow);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
__ j(below, &slow);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
__ j(above, &slow);
// Register mapping:
// rax is object map.
// rdx is function.
// rbx is function prototype.
- __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+ if (!HasCallSiteInlineCheck()) {
+ __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+ } else {
+ __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
+ __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax);
+ if (FLAG_debug_code) {
+ __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
+ __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
+ }
+ }
__ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
// Loop through the prototype chain looking for the function prototype.
- NearLabel loop, is_instance, is_not_instance;
+ Label loop, is_instance, is_not_instance;
__ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
__ bind(&loop);
__ cmpq(rcx, rbx);
- __ j(equal, &is_instance);
+ __ j(equal, &is_instance, Label::kNear);
__ cmpq(rcx, kScratchRegister);
// The code at is_not_instance assumes that kScratchRegister contains a
// non-zero GCable value (the null object in this case).
- __ j(equal, &is_not_instance);
+ __ j(equal, &is_not_instance, Label::kNear);
__ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
__ jmp(&loop);
__ bind(&is_instance);
- __ xorl(rax, rax);
- // Store bitwise zero in the cache. This is a Smi in GC terms.
- STATIC_ASSERT(kSmiTag == 0);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
+ if (!HasCallSiteInlineCheck()) {
+ __ xorl(rax, rax);
+ // Store bitwise zero in the cache. This is a Smi in GC terms.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Store offset of true in the root array at the inline check site.
+ ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
+ == 0xB0 - 0x100);
+ __ movl(rax, Immediate(0xB0)); // TrueValue is at -10 * kPointerSize.
+ __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
+ __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
+ if (FLAG_debug_code) {
+ __ movl(rax, Immediate(kWordBeforeResultValue));
+ __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
+ }
+ __ Set(rax, 0);
+ }
+ __ ret(2 * kPointerSize + extra_stack_space);
__ bind(&is_not_instance);
- // We have to store a non-zero value in the cache.
- __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
+ if (!HasCallSiteInlineCheck()) {
+ // We have to store a non-zero value in the cache.
+ __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Store offset of false in the root array at the inline check site.
+ ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
+ == 0xB8 - 0x100);
+ __ movl(rax, Immediate(0xB8)); // FalseValue is at -9 * kPointerSize.
+ __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
+ __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
+ if (FLAG_debug_code) {
+ __ movl(rax, Immediate(kWordBeforeResultValue));
+ __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
+ }
+ }
+ __ ret(2 * kPointerSize + extra_stack_space);
// Slow-case: Go through the JavaScript implementation.
__ bind(&slow);
+ if (HasCallSiteInlineCheck()) {
+ // Remove extra value from the stack.
+ __ pop(rcx);
+ __ pop(rax);
+ __ push(rcx);
+ }
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
@@ -3805,7 +3783,8 @@ const char* CompareStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* cc_name;
@@ -3936,10 +3915,14 @@ void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
+ Factory* factory = masm->isolate()->factory();
// Index is not a smi.
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ __ CheckMap(index_,
+ factory->heap_number_map(),
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_);
@@ -4048,15 +4031,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- Condition is_smi;
- is_smi = masm->CheckSmi(rax);
- __ j(is_smi, &string_add_runtime);
+ __ JumpIfSmi(rax, &string_add_runtime);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
__ j(above_equal, &string_add_runtime);
// First argument is a a string, test second.
- is_smi = masm->CheckSmi(rdx);
- __ j(is_smi, &string_add_runtime);
+ __ JumpIfSmi(rdx, &string_add_runtime);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &string_add_runtime);
} else {
@@ -4079,20 +4059,21 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rax: first string
// rdx: second string
// Check if either of the strings are empty. In that case return the other.
- NearLabel second_not_zero_length, both_not_zero_length;
+ Label second_not_zero_length, both_not_zero_length;
__ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
__ SmiTest(rcx);
- __ j(not_zero, &second_not_zero_length);
+ __ j(not_zero, &second_not_zero_length, Label::kNear);
// Second string is empty, result is first string which is already in rax.
- __ IncrementCounter(&Counters::string_add_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&second_not_zero_length);
__ movq(rbx, FieldOperand(rax, String::kLengthOffset));
__ SmiTest(rbx);
- __ j(not_zero, &both_not_zero_length);
+ __ j(not_zero, &both_not_zero_length, Label::kNear);
// First string is empty, result is second string which is in rdx.
__ movq(rax, rdx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Both strings are non-empty.
@@ -4118,8 +4099,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Look at the length of the result of adding the two strings.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
__ SmiAdd(rbx, rbx, rcx);
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
+ // Use the symbol table when adding two one character strings, as it
+ // helps later optimizations to return a symbol here.
__ SmiCompare(rbx, Smi::FromInt(2));
__ j(not_equal, &longer_than_two);
@@ -4135,8 +4116,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// just allocate a new one.
Label make_two_character_string, make_flat_ascii_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&make_two_character_string);
@@ -4176,7 +4157,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
__ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
__ movq(rax, rcx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
@@ -4250,7 +4231,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rdi: length of second argument
StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
__ movq(rax, rbx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Handle creating a flat two byte result.
@@ -4287,7 +4268,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rdi: length of second argument
StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
__ movq(rax, rbx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Just jump to runtime to add the two strings.
@@ -4383,9 +4364,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
ASSERT(count.is(rcx)); // rep movs count
// Nothing to do for zero characters.
- NearLabel done;
+ Label done;
__ testl(count, count);
- __ j(zero, &done);
+ __ j(zero, &done, Label::kNear);
// Make count the number of bytes to copy.
if (!ascii) {
@@ -4394,9 +4375,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
}
// Don't enter the rep movs if there are less than 4 bytes to copy.
- NearLabel last_bytes;
+ Label last_bytes;
__ testl(count, Immediate(~7));
- __ j(zero, &last_bytes);
+ __ j(zero, &last_bytes, Label::kNear);
// Copy from edi to esi using rep movs instruction.
__ movl(kScratchRegister, count);
@@ -4410,7 +4391,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Check if there are more bytes to copy.
__ bind(&last_bytes);
__ testl(count, count);
- __ j(zero, &done);
+ __ j(zero, &done, Label::kNear);
// Copy remaining characters.
Label loop;
@@ -4438,10 +4419,10 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Make sure that both characters are not digits as such strings has a
// different hash algorithm. Don't try to look for these in the symbol table.
- NearLabel not_array_index;
+ Label not_array_index;
__ leal(scratch, Operand(c1, -'0'));
__ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index);
+ __ j(above, &not_array_index, Label::kNear);
__ leal(scratch, Operand(c2, -'0'));
__ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
__ j(below_equal, not_found);
@@ -4471,15 +4452,14 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
__ decl(mask);
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+ Register map = scratch4;
// Registers
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string (32-bit int)
// symbol_table: symbol table
// mask: capacity mask (32-bit int)
- // undefined: undefined value
+ // map: -
// scratch: -
// Perform a number of probes in the symbol table.
@@ -4494,7 +4474,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
}
__ andl(scratch, mask);
- // Load the entry from the symble table.
+ // Load the entry from the symbol table.
Register candidate = scratch; // Scratch register contains candidate.
STATIC_ASSERT(SymbolTable::kEntrySize == 1);
__ movq(candidate,
@@ -4504,8 +4484,16 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
SymbolTable::kElementsStartOffset));
// If entry is undefined no string with this hash can be found.
- __ cmpq(candidate, undefined);
+ Label is_string;
+ __ CmpObjectType(candidate, ODDBALL_TYPE, map);
+ __ j(not_equal, &is_string, Label::kNear);
+
+ __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
__ j(equal, not_found);
+ // Must be null (deleted entry).
+ __ jmp(&next_probe[i]);
+
+ __ bind(&is_string);
// If length is not 2 the string is not a candidate.
__ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
@@ -4517,8 +4505,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register temp = kScratchRegister;
// Check that the candidate is a non-external ascii string.
- __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset));
- __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(
temp, temp, &next_probe[i]);
@@ -4591,7 +4578,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// if (hash == 0) hash = 27;
Label hash_not_zero;
__ j(not_zero, &hash_not_zero);
- __ movl(hash, Immediate(27));
+ __ Set(hash, 27);
__ bind(&hash_not_zero);
}
@@ -4696,7 +4683,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rsi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
__ movq(rsi, rdx); // Restore rsi.
- __ IncrementCounter(&Counters::sub_string_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1);
__ ret(kArgumentsSize);
__ bind(&non_ascii_flat);
@@ -4733,7 +4721,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movq(rsi, rdx); // Restore esi.
__ bind(&return_rax);
- __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ IncrementCounter(counters->sub_string_native(), 1);
__ ret(kArgumentsSize);
// Just jump to runtime to create the sub string.
@@ -4742,6 +4730,47 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2) {
+ Register length = scratch1;
+
+ // Compare lengths.
+ Label check_zero_length;
+ __ movq(length, FieldOperand(left, String::kLengthOffset));
+ __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
+ __ j(equal, &check_zero_length, Label::kNear);
+ __ Move(rax, Smi::FromInt(NOT_EQUAL));
+ __ ret(0);
+
+ // Check if the length is zero.
+ Label compare_chars;
+ __ bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ SmiTest(length);
+ __ j(not_zero, &compare_chars, Label::kNear);
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ ret(0);
+
+ // Compare characters.
+ __ bind(&compare_chars);
+ Label strings_not_equal;
+ GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
+ &strings_not_equal, Label::kNear);
+
+ // Characters are equal.
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ ret(0);
+
+ // Characters are not equal.
+ __ bind(&strings_not_equal);
+ __ Move(rax, Smi::FromInt(NOT_EQUAL));
+ __ ret(0);
+}
+
+
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
@@ -4761,8 +4790,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
FieldOperand(right, String::kLengthOffset));
// Register scratch4 now holds left.length - right.length.
const Register length_difference = scratch4;
- NearLabel left_shorter;
- __ j(less, &left_shorter);
+ Label left_shorter;
+ __ j(less, &left_shorter, Label::kNear);
// The right string isn't longer that the left one.
// Get the right string's length by subtracting the (non-negative) difference
// from the left string's length.
@@ -4771,54 +4800,30 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Register scratch1 now holds Min(left.length, right.length).
const Register min_length = scratch1;
- NearLabel compare_lengths;
+ Label compare_lengths;
// If min-length is zero, go directly to comparing lengths.
__ SmiTest(min_length);
- __ j(zero, &compare_lengths);
+ __ j(zero, &compare_lengths, Label::kNear);
- __ SmiToInteger32(min_length, min_length);
+ // Compare loop.
+ Label result_not_equal;
+ GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
+ &result_not_equal, Label::kNear);
- // Registers scratch2 and scratch3 are free.
- NearLabel result_not_equal;
- Label loop;
- {
- // Check characters 0 .. min_length - 1 in a loop.
- // Use scratch3 as loop index, min_length as limit and scratch2
- // for computation.
- const Register index = scratch3;
- __ movl(index, Immediate(0)); // Index into strings.
- __ bind(&loop);
- // Compare characters.
- // TODO(lrn): Could we load more than one character at a time?
- __ movb(scratch2, FieldOperand(left,
- index,
- times_1,
- SeqAsciiString::kHeaderSize));
- // Increment index and use -1 modifier on next load to give
- // the previous load extra time to complete.
- __ addl(index, Immediate(1));
- __ cmpb(scratch2, FieldOperand(right,
- index,
- times_1,
- SeqAsciiString::kHeaderSize - 1));
- __ j(not_equal, &result_not_equal);
- __ cmpl(index, min_length);
- __ j(not_equal, &loop);
- }
// Completed loop without finding different characters.
// Compare lengths (precomputed).
__ bind(&compare_lengths);
__ SmiTest(length_difference);
- __ j(not_zero, &result_not_equal);
+ __ j(not_zero, &result_not_equal, Label::kNear);
// Result is EQUAL.
__ Move(rax, Smi::FromInt(EQUAL));
__ ret(0);
- NearLabel result_greater;
+ Label result_greater;
__ bind(&result_not_equal);
// Unequal comparison of left to right, either character or length.
- __ j(greater, &result_greater);
+ __ j(greater, &result_greater, Label::kNear);
// Result is LESS.
__ Move(rax, Smi::FromInt(LESS));
@@ -4831,6 +4836,36 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
}
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch,
+ Label* chars_not_equal,
+ Label::Distance near_jump) {
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiToInteger32(length, length);
+ __ lea(left,
+ FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(right,
+ FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+ __ neg(length);
+ Register index = length; // index = -length;
+
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ __ movb(scratch, Operand(left, index, times_1, 0));
+ __ cmpb(scratch, Operand(right, index, times_1, 0));
+ __ j(not_equal, chars_not_equal, near_jump);
+ __ addq(index, Immediate(1));
+ __ j(not_zero, &loop);
+}
+
+
void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -4843,11 +4878,12 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
// Check for identity.
- NearLabel not_same;
+ Label not_same;
__ cmpq(rdx, rax);
- __ j(not_equal, &not_same);
+ __ j(not_equal, &not_same, Label::kNear);
__ Move(rax, Smi::FromInt(EQUAL));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_compare_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&not_same);
@@ -4856,7 +4892,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
// Inline comparison of ascii strings.
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ IncrementCounter(counters->string_compare_native(), 1);
// Drop arguments from the stack
__ pop(rcx);
__ addq(rsp, Immediate(2 * kPointerSize));
@@ -4870,72 +4906,18 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
-void StringCharAtStub::Generate(MacroAssembler* masm) {
- // Expects two arguments (object, index) on the stack:
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: index
- // rsp[16]: object
-
- Register object = rbx;
- Register index = rax;
- Register scratch1 = rcx;
- Register scratch2 = rdx;
- Register result = rax;
-
- __ pop(scratch1); // Return address.
- __ pop(index);
- __ pop(object);
- __ push(scratch1);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ Move(result, Factory::empty_string());
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result, Smi::FromInt(0));
- __ jmp(&done);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm, call_helper);
-
- __ bind(&done);
- __ ret(0);
-}
-
-
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
- NearLabel miss;
- __ JumpIfNotBothSmi(rdx, rax, &miss);
+ Label miss;
+ __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
__ subq(rax, rdx);
} else {
- NearLabel done;
+ Label done;
__ subq(rdx, rax);
- __ j(no_overflow, &done);
+ __ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
__ SmiNot(rdx, rdx);
__ bind(&done);
@@ -4951,16 +4933,16 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::HEAP_NUMBERS);
- NearLabel generic_stub;
- NearLabel unordered;
- NearLabel miss;
+ Label generic_stub;
+ Label unordered;
+ Label miss;
Condition either_smi = masm->CheckEitherSmi(rax, rdx);
- __ j(either_smi, &generic_stub);
+ __ j(either_smi, &generic_stub, Label::kNear);
__ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &miss);
+ __ j(not_equal, &miss, Label::kNear);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &miss);
+ __ j(not_equal, &miss, Label::kNear);
// Load left and right operand
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
@@ -4970,7 +4952,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered);
+ __ j(parity_even, &unordered, Label::kNear);
// Return a result of -1, 0, or 1, based on EFLAGS.
// Performing mov, because xor would destroy the flag register.
@@ -4991,16 +4973,133 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::SYMBOLS);
+ ASSERT(GetCondition() == equal);
+
+ // Registers containing left and right operands respectively.
+ Register left = rdx;
+ Register right = rax;
+ Register tmp1 = rcx;
+ Register tmp2 = rbx;
+
+ // Check that both operands are heap objects.
+ Label miss;
+ Condition cond = masm->CheckEitherSmi(left, right, tmp1);
+ __ j(cond, &miss, Label::kNear);
+
+ // Check that both operands are symbols.
+ __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(tmp1, tmp2);
+ __ testb(tmp1, Immediate(kIsSymbolMask));
+ __ j(zero, &miss, Label::kNear);
+
+ // Symbols are compared by identity.
+ Label done;
+ __ cmpq(left, right);
+ // Make sure rax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(rax));
+ __ j(not_equal, &done, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ bind(&done);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(GetCondition() == equal);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = rdx;
+ Register right = rax;
+ Register tmp1 = rcx;
+ Register tmp2 = rbx;
+ Register tmp3 = rdi;
+
+ // Check that both operands are heap objects.
+ Condition cond = masm->CheckEitherSmi(left, right, tmp1);
+ __ j(cond, &miss);
+
+ // Check that both operands are strings. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movq(tmp3, tmp1);
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ or_(tmp3, tmp2);
+ __ testb(tmp3, Immediate(kIsNotStringMask));
+ __ j(not_zero, &miss);
+
+ // Fast check for identical strings.
+ Label not_same;
+ __ cmpq(left, right);
+ __ j(not_equal, &not_same, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ ret(0);
+
+ // Handle not identical strings.
+ __ bind(&not_same);
+
+ // Check that both strings are symbols. If they are, we're done
+ // because we already know they are not identical.
+ Label do_compare;
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(tmp1, tmp2);
+ __ testb(tmp1, Immediate(kIsSymbolMask));
+ __ j(zero, &do_compare, Label::kNear);
+ // Make sure rax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(rax));
+ __ ret(0);
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ bind(&do_compare);
+ __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2);
+
+ // Handle more complex cases in runtime.
+ __ bind(&runtime);
+ __ pop(tmp1); // Return address.
+ __ push(left);
+ __ push(right);
+ __ push(tmp1);
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::OBJECTS);
- NearLabel miss;
+ Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
- __ j(either_smi, &miss);
+ __ j(either_smi, &miss, Label::kNear);
__ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss, Label::kNear);
__ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
__ subq(rax, rdx);
@@ -5019,7 +5118,8 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ push(rcx);
// Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
__ EnterInternalFrame();
__ push(rdx);
__ push(rax);
@@ -5041,144 +5141,206 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
}
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register untagged_key,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key and is unchanged (must be a smi).
- // elements - is set to the the receiver's element if
- // the receiver doesn't have a pixel array or the
- // key is not a smi, otherwise it's the elements'
- // external pointer.
- // untagged_key - is set to the untagged key
-
- // Some callers already have verified that the key is a smi. key_not_smi is
- // set to NULL as a sentinel for that case. Otherwise, add an explicit check
- // to ensure the key is a smi must be added.
- if (key_not_smi != NULL) {
- __ JumpIfNotSmi(key, key_not_smi);
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(key);
- }
- }
- __ SmiToInteger32(untagged_key, key);
-
- __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- // By passing NULL as not_pixel_array, callers signal that they have already
- // verified that the receiver has pixel array elements.
- if (not_pixel_array != NULL) {
- __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
- } else {
- if (FLAG_debug_code) {
- // Map check should have already made sure that elements is a pixel array.
- __ Cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Factory::pixel_array_map());
- __ Assert(equal, "Elements isn't a pixel array");
- }
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ String* name,
+ Register r0) {
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // r0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = r0;
+ // Capacity is smi 2^n.
+ __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
+ __ decl(index);
+ __ and_(index,
+ Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+
+ Register entity_name = r0;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ movq(entity_name, Operand(properties,
+ index,
+ times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
+ __ j(equal, done);
+
+ // Stop if found the property.
+ __ Cmp(entity_name, Handle<String>(name));
+ __ j(equal, miss);
+
+ // Check if the entry name is not a symbol.
+ __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ Immediate(kIsSymbolMask));
+ __ j(zero, miss);
}
- // Check that the smi is in range.
- __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
- __ j(above_equal, out_of_range); // unsigned check handles negative keys.
-
- // Load and tag the element as a smi.
- __ movq(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
- __ movzxbq(result, Operand(elements, untagged_key, times_1, 0));
- __ Integer32ToSmi(result, result);
- __ ret(0);
+ StringDictionaryLookupStub stub(properties,
+ r0,
+ r0,
+ StringDictionaryLookupStub::NEGATIVE_LOOKUP);
+ __ Push(Handle<Object>(name));
+ __ push(Immediate(name->Hash()));
+ MaybeObject* result = masm->TryCallStub(&stub);
+ if (result->IsFailure()) return result;
+ __ testq(r0, r0);
+ __ j(not_zero, miss);
+ __ jmp(done);
+ return result;
}
-// Stores an indexed element into a pixel array, clamping the stored value.
-void GenerateFastPixelArrayStore(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register elements,
- Register scratch1,
- bool load_elements_from_receiver,
- bool key_is_untagged,
- Label* key_not_smi,
- Label* value_not_smi,
- Label* not_pixel_array,
- Label* out_of_range) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key (must be a smi) and is unchanged.
- // value - holds the value (must be a smi) and is unchanged.
- // elements - holds the element object of the receiver on entry if
- // load_elements_from_receiver is false, otherwise used
- // internally to store the pixel arrays elements and
- // external array pointer.
- //
- Register external_pointer = elements;
- Register untagged_key = scratch1;
- Register untagged_value = receiver; // Only set once success guaranteed.
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r1|. Jump to the |miss| label
+// otherwise.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1) {
+ // Assert that name contains a string.
+ if (FLAG_debug_code) __ AbortIfNotString(name);
+
+ __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
+ __ decl(r0);
+
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
+ __ shrl(r1, Immediate(String::kHashShift));
+ if (i > 0) {
+ __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
+ }
+ __ and_(r1, r0);
- // Fetch the receiver's elements if the caller hasn't already done so.
- if (load_elements_from_receiver) {
- __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- }
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
- // By passing NULL as not_pixel_array, callers signal that they have already
- // verified that the receiver has pixel array elements.
- if (not_pixel_array != NULL) {
- __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
- } else {
- if (FLAG_debug_code) {
- // Map check should have already made sure that elements is a pixel array.
- __ Cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Factory::pixel_array_map());
- __ Assert(equal, "Elements isn't a pixel array");
- }
+ // Check if the key is identical to the name.
+ __ cmpq(name, Operand(elements, r1, times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ j(equal, done);
}
- // Key must be a smi and it must be in range.
- if (key_is_untagged) {
- untagged_key = key;
- } else {
- // Some callers already have verified that the key is a smi. key_not_smi is
- // set to NULL as a sentinel for that case. Otherwise, add an explicit
- // check to ensure the key is a smi.
- if (key_not_smi != NULL) {
- __ JumpIfNotSmi(key, key_not_smi);
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(key);
- }
+ StringDictionaryLookupStub stub(elements,
+ r0,
+ r1,
+ POSITIVE_LOOKUP);
+ __ push(name);
+ __ movl(r0, FieldOperand(name, String::kHashFieldOffset));
+ __ shrl(r0, Immediate(String::kHashShift));
+ __ push(r0);
+ __ CallStub(&stub);
+
+ __ testq(r0, r0);
+ __ j(zero, miss);
+ __ jmp(done);
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // Stack frame on entry:
+ // esp[0 * kPointerSize]: return address.
+ // esp[1 * kPointerSize]: key's hash.
+ // esp[2 * kPointerSize]: key.
+ // Registers:
+ // dictionary_: StringDictionary to probe.
+ // result_: used as scratch.
+ // index_: will hold an index of entry if lookup is successful.
+ // might alias with result_.
+ // Returns:
+ // result_ is zero if lookup failed, non zero otherwise.
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ Register scratch = result_;
+
+ __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
+ __ decl(scratch);
+ __ push(scratch);
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ movq(scratch, Operand(rsp, 2 * kPointerSize));
+ if (i > 0) {
+ __ addl(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
+ }
+ __ and_(scratch, Operand(rsp, 0));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+
+ // Having undefined at this place means the name is not contained.
+ __ movq(scratch, Operand(dictionary_,
+ index_,
+ times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+
+ __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &not_in_dictionary);
+
+ // Stop if found the property.
+ __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
+ __ j(equal, &in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // If we hit a non symbol key during negative lookup
+ // we have to bailout as this key might be equal to the
+ // key we are looking for.
+
+ // Check if the entry name is not a symbol.
+ __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ Immediate(kIsSymbolMask));
+ __ j(zero, &maybe_in_dictionary);
}
- __ SmiToInteger32(untagged_key, key);
}
- __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
- __ j(above_equal, out_of_range); // unsigned check handles negative keys.
-
- // Value must be a smi.
- __ JumpIfNotSmi(value, value_not_smi);
- __ SmiToInteger32(untagged_value, value);
-
- { // Clamp the value to [0..255].
- NearLabel done;
- __ testl(untagged_value, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, untagged_value); // 1 if negative, 0 if positive.
- __ decb(untagged_value); // 0 if negative, 255 if positive.
- __ bind(&done);
+
+ __ bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ movq(scratch, Immediate(0));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
}
- __ movq(external_pointer,
- FieldOperand(elements, PixelArray::kExternalPointerOffset));
- __ movb(Operand(external_pointer, untagged_key, times_1, 0), untagged_value);
- __ ret(0); // Return value in eax.
+ __ bind(&in_dictionary);
+ __ movq(scratch, Immediate(1));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&not_in_dictionary);
+ __ movq(scratch, Immediate(0));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
}
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index feb4de818..a7ed91c50 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -59,78 +59,30 @@ class TranscendentalCacheStub: public CodeStub {
};
-class ToBooleanStub: public CodeStub {
+class UnaryOpStub: public CodeStub {
public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags,
- TypeInfo operands_type = TypeInfo::Unknown())
+ UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
: op_(op),
mode_(mode),
- flags_(flags),
- args_in_registers_(false),
- args_reversed_(false),
- static_operands_type_(operands_type),
- runtime_operands_type_(BinaryOpIC::DEFAULT),
+ operand_type_(UnaryOpIC::UNINITIALIZED),
name_(NULL) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
+ UnaryOpStub(
+ int key,
+ UnaryOpIC::TypeInfo operand_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
- flags_(FlagBits::decode(key)),
- args_in_registers_(ArgsInRegistersBits::decode(key)),
- args_reversed_(ArgsReversedBits::decode(key)),
- static_operands_type_(TypeInfo::ExpandedRepresentation(
- StaticTypeInfoBits::decode(key))),
- runtime_operands_type_(runtime_operands_type),
+ operand_type_(operand_type),
name_(NULL) {
}
- // Generate code to call the stub with the supplied arguments. This will add
- // code at the call site to prepare arguments either in registers or on the
- // stack together with the actual call.
- void GenerateCall(MacroAssembler* masm, Register left, Register right);
- void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
- void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
- bool ArgsInRegistersSupported() {
- return (op_ == Token::ADD) || (op_ == Token::SUB)
- || (op_ == Token::MUL) || (op_ == Token::DIV);
- }
-
private:
Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
- bool args_in_registers_; // Arguments passed in registers not on the stack.
- bool args_reversed_; // Left and right argument are swapped.
-
- // Number type information of operands, determined by code generator.
- TypeInfo static_operands_type_;
+ UnaryOverwriteMode mode_;
// Operand type information determined at runtime.
- BinaryOpIC::TypeInfo runtime_operands_type_;
+ UnaryOpIC::TypeInfo operand_type_;
char* name_;
@@ -138,93 +90,82 @@ class GenericBinaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
- PrintF("GenericBinaryOpStub %d (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
+ PrintF("UnaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
- static_cast<int>(flags_),
- static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_),
- static_operands_type_.ToString());
+ UnaryOpIC::GetName(operand_type_));
}
#endif
- // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class ArgsInRegistersBits: public BitField<bool, 9, 1> {};
- class ArgsReversedBits: public BitField<bool, 10, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
- class StaticTypeInfoBits: public BitField<int, 12, 3> {};
- class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 3> {};
+ class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+ class OpBits: public BitField<Token::Value, 1, 7> {};
+ class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
- Major MajorKey() { return GenericBinaryOp; }
+ Major MajorKey() { return UnaryOp; }
int MinorKey() {
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_)
- | StaticTypeInfoBits::encode(
- static_operands_type_.ThreeBitRepresentation())
- | RuntimeTypeInfoBits::encode(runtime_operands_type_);
+ return ModeBits::encode(mode_)
+ | OpBits::encode(op_)
+ | OperandTypeInfoBits::encode(operand_type_);
}
+ // Note: A lot of the helper functions below will vanish when we use virtual
+ // function instead of switch more often.
void Generate(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- bool IsOperationCommutative() {
- return (op_ == Token::ADD) || (op_ == Token::MUL);
- }
+ void GenerateTypeTransition(MacroAssembler* masm);
- void SetArgsInRegisters() { args_in_registers_ = true; }
- void SetArgsReversed() { args_reversed_ = true; }
- bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgsInRegisters() { return args_in_registers_; }
- bool HasArgsReversed() { return args_reversed_; }
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateSmiStubSub(MacroAssembler* masm);
+ void GenerateSmiStubBitNot(MacroAssembler* masm);
+ void GenerateSmiCodeSub(MacroAssembler* masm,
+ Label* non_smi,
+ Label* slow,
+ Label::Distance non_smi_near = Label::kFar,
+ Label::Distance slow_near = Label::kFar);
+ void GenerateSmiCodeBitNot(MacroAssembler* masm,
+ Label* non_smi,
+ Label::Distance non_smi_near);
- bool ShouldGenerateSmiCode() {
- return HasSmiCodeInStub() &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateHeapNumberStubSub(MacroAssembler* masm);
+ void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+ void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateGenericStubSub(MacroAssembler* masm);
+ void GenerateGenericStubBitNot(MacroAssembler* masm);
+ void GenerateGenericCodeFallback(MacroAssembler* masm);
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+ virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
+ return UnaryOpIC::ToState(operand_type_);
}
- friend class CodeGenerator;
- friend class LCodeGen;
+ virtual void FinishCode(Code* code) {
+ code->set_unary_op_type(operand_type_);
+ }
};
-class TypeRecordingBinaryOpStub: public CodeStub {
+class BinaryOpStub: public CodeStub {
public:
- TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
- operands_type_(TRBinaryOpIC::UNINITIALIZED),
- result_type_(TRBinaryOpIC::UNINITIALIZED),
+ operands_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED),
name_(NULL) {
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- TypeRecordingBinaryOpStub(
+ BinaryOpStub(
int key,
- TRBinaryOpIC::TypeInfo operands_type,
- TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+ BinaryOpIC::TypeInfo operands_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
operands_type_(operands_type),
@@ -241,8 +182,8 @@ class TypeRecordingBinaryOpStub: public CodeStub {
OverwriteMode mode_;
// Operand type information determined at runtime.
- TRBinaryOpIC::TypeInfo operands_type_;
- TRBinaryOpIC::TypeInfo result_type_;
+ BinaryOpIC::TypeInfo operands_type_;
+ BinaryOpIC::TypeInfo result_type_;
char* name_;
@@ -250,22 +191,22 @@ class TypeRecordingBinaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
- PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+ PrintF("BinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
- TRBinaryOpIC::GetName(operands_type_));
+ BinaryOpIC::GetName(operands_type_));
}
#endif
// Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
- class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 9, 3> {};
- class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 12, 3> {};
+ class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 9, 3> {};
+ class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 12, 3> {};
- Major MajorKey() { return TypeRecordingBinaryOp; }
+ Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
@@ -291,6 +232,7 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
@@ -298,15 +240,15 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
- virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
- return TRBinaryOpIC::ToState(operands_type_);
+ return BinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Code* code) {
- code->set_type_recording_binary_op_type(operands_type_);
- code->set_type_recording_binary_op_result_type(result_type_);
+ code->set_binary_op_type(operands_type_);
+ code->set_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
@@ -415,10 +357,9 @@ class SubStringStub: public CodeStub {
class StringCompareStub: public CodeStub {
public:
- explicit StringCompareStub() {}
+ StringCompareStub() {}
- // Compare two flat ascii strings and returns result in rax after popping two
- // arguments from the stack.
+ // Compares two flat ASCII strings and returns result in rax.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
@@ -427,11 +368,27 @@ class StringCompareStub: public CodeStub {
Register scratch3,
Register scratch4);
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
+ // Compares two flat ASCII strings for equality and returns result
+ // in rax.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2);
- void Generate(MacroAssembler* masm);
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch,
+ Label* chars_not_equal,
+ Label::Distance near_jump = Label::kFar);
};
@@ -472,48 +429,73 @@ class NumberToStringStub: public CodeStub {
};
-// Generate code to load an element from a pixel array. The receiver is assumed
-// to not be a smi and to have elements, the caller must guarantee this
-// precondition. If key is not a smi, then the generated code branches to
-// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
-// check has already been performed on key so that the smi check is not
-// generated. If key is not a valid index within the bounds of the pixel array,
-// the generated code jumps to out_of_range. receiver, key and elements are
-// unchanged throughout the generated code sequence.
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register untagged_key,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range);
-
-// Generate code to store an element into a pixel array, clamping values between
-// [0..255]. The receiver is assumed to not be a smi and to have elements, the
-// caller must guarantee this precondition. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated. If the value is not a smi, the
-// generated code will branch to value_not_smi. If the receiver
-// doesn't have pixel array elements, the generated code will branch to
-// not_pixel_array, unless not_pixel_array is NULL, in which case the caller
-// must ensure that the receiver has pixel array elements. If key is not a
-// valid index within the bounds of the pixel array, the generated code jumps to
-// out_of_range.
-void GenerateFastPixelArrayStore(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register elements,
- Register scratch1,
- bool load_elements_from_receiver,
- bool key_is_untagged,
- Label* key_not_smi,
- Label* value_not_smi,
- Label* not_pixel_array,
- Label* out_of_range);
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ StringDictionaryLookupStub(Register dictionary,
+ Register result,
+ Register index,
+ LookupMode mode)
+ : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ String* name,
+ Register r0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1);
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("StringDictionaryLookupStub\n");
+ }
+#endif
+
+ Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+ int MinorKey() {
+ return DictionaryBits::encode(dictionary_.code()) |
+ ResultBits::encode(result_.code()) |
+ IndexBits::encode(index_.code()) |
+ LookupModeBits::encode(mode_);
+ }
+
+ class DictionaryBits: public BitField<int, 0, 4> {};
+ class ResultBits: public BitField<int, 4, 4> {};
+ class IndexBits: public BitField<int, 8, 4> {};
+ class LookupModeBits: public BitField<LookupMode, 12, 1> {};
+
+ Register dictionary_;
+ Register result_;
+ Register index_;
+ LookupMode mode_;
+};
+
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index ad114c243..f8f2d6e68 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -29,81 +29,14 @@
#if defined(V8_TARGET_ARCH_X64)
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
+#include "codegen.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
-
-// -------------------------------------------------------------------------
-// Platform-specific FrameRegisterState functions.
-
-void FrameRegisterState::Save(MacroAssembler* masm) const {
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int action = registers_[i];
- if (action == kPush) {
- __ push(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
- __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
- }
- }
-}
-
-
-void FrameRegisterState::Restore(MacroAssembler* masm) const {
- // Restore registers in reverse order due to the stack.
- for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
- int action = registers_[i];
- if (action == kPush) {
- __ pop(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore) {
- action &= ~kSyncedFlag;
- __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
- }
- }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
- frame_state_.Save(masm_);
-}
-
-
-void DeferredCode::RestoreRegisters() {
- frame_state_.Restore(masm_);
-}
-
-
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- frame_state_->Save(masm);
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- frame_state_->Restore(masm);
-}
-
-
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterInternalFrame();
}
@@ -114,8628 +47,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
}
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- destination_(NULL),
- previous_(NULL) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
- ControlDestination* destination)
- : owner_(owner),
- destination_(destination),
- previous_(owner->state()) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- info_(NULL),
- frame_(NULL),
- allocator_(NULL),
- state_(NULL),
- loop_nesting_(0),
- function_return_is_shadowed_(false),
- in_spilled_code_(false) {
-}
-
-
-// Calling conventions:
-// rbp: caller's frame pointer
-// rsp: stack pointer
-// rdi: called JS function
-// rsi: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- // Record the position for debugging purposes.
- CodeForFunctionPosition(info->function());
- Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
- // Initialize state.
- info_ = info;
- ASSERT(allocator_ == NULL);
- RegisterAllocator register_allocator(this);
- allocator_ = &register_allocator;
- ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame();
- set_in_spilled_code(false);
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(0, loop_nesting_);
- loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
- JumpTarget::set_compiling_deferred_code(false);
-
- {
- CodeGenState state(this);
- // Entry:
- // Stack: receiver, arguments, return address.
- // rbp: caller's frame pointer
- // rsp: stack pointer
- // rdi: called JS function
- // rsi: callee's context
- allocator_->Initialize();
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- frame_->SpillAll();
- __ int3();
- }
-#endif
-
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
-
- // Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
-
- // Update context local.
- frame_->SaveContextRegister();
-
- // Verify that the runtime call result and rsi agree.
- if (FLAG_debug_code) {
- __ cmpq(context.reg(), rsi);
- __ Assert(equal, "Runtime::NewContext should end up in rsi");
- }
- }
-
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope()->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ movq(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
- }
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
-
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
- }
-
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
-
- // Generate code to 'execute' declarations and initialize functions
- // (source elements). In case of an illegal redeclaration we need to
- // handle that instead of processing the declarations.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ illegal redeclarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope()->declarations());
- // Bail out if a stack-overflow exception occurred when processing
- // declarations.
- if (HasStackOverflow()) return;
- }
-
- if (FLAG_trace) {
- frame_->CallRuntime(Runtime::kTraceEnter, 0);
- // Ignore the return value.
- }
- CheckStack();
-
- // Compile the body of the function in a vanilla state. Don't
- // bother compiling all the code if the scope has an illegal
- // redeclaration.
- if (!scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
- bool is_builtin = Bootstrapper::IsActive();
- bool should_trace =
- is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
- if (should_trace) {
- frame_->CallRuntime(Runtime::kDebugTrace, 0);
- // Ignore the return value.
- }
-#endif
- VisitStatements(info->function()->body());
-
- // Handle the return from the function.
- if (has_valid_frame()) {
- // If there is a valid frame, control flow can fall off the end of
- // the body. In that case there is an implicit return statement.
- ASSERT(!function_return_is_shadowed_);
- CodeForReturnPosition(info->function());
- frame_->PrepareForReturn();
- Result undefined(Factory::undefined_value());
- if (function_return_.is_bound()) {
- function_return_.Jump(&undefined);
- } else {
- function_return_.Bind(&undefined);
- GenerateReturnSequence(&undefined);
- }
- } else if (function_return_.is_linked()) {
- // If the return target has dangling jumps to it, then we have not
- // yet generated the return sequence. This can happen when (a)
- // control does not flow off the end of the body so we did not
- // compile an artificial return statement just above, and (b) there
- // are return statements in the body but (c) they are all shadowed.
- Result return_value;
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
- }
- }
- }
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
- loop_nesting_ = 0;
-
- // Code generation state must be reset.
- ASSERT(state_ == NULL);
- ASSERT(!function_return_is_shadowed_);
- function_return_.Unuse();
- DeleteFrame();
-
- // Process any deferred code using the register allocator.
- if (!HasStackOverflow()) {
- JumpTarget::set_compiling_deferred_code(true);
- ProcessDeferred();
- JumpTarget::set_compiling_deferred_code(false);
- }
-
- // There is no need to delete the register allocator, it is a
- // stack-allocated local.
- allocator_ = NULL;
-}
-
-
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return frame_->ParameterAt(index);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(rsi)); // do not overwrite context register
- Register context = rsi;
- int chain_length = scope()->ContextChainLength(slot->var()->scope());
- for (int i = 0; i < chain_length; i++) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
-
- default:
- UNREACHABLE();
- return Operand(rsp, 0);
- }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- ASSERT(tmp.is_register());
- Register context = rsi;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- }
- // Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal, not_taken);
- __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp.reg(), slot->index());
-}
-
-
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* expr,
- ControlDestination* dest,
- bool force_control) {
- ASSERT(!in_spilled_code());
- int original_height = frame_->height();
-
- { CodeGenState new_state(this, dest);
- Visit(expr);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- !dest->is_used() &&
- frame_->height() == original_height) {
- dest->Goto(true);
- }
- }
-
- if (force_control && !dest->is_used()) {
- // Convert the TOS value into flow to the control destination.
- ToBoolean(dest);
- }
-
- ASSERT(!(force_control && !dest->is_used()));
- ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Load(expression);
- frame_->SpillAll();
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
- JumpTarget true_target;
- JumpTarget false_target;
- ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(expr, &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The false target was just bound.
- JumpTarget loaded;
- frame_->Push(Factory::false_value());
- // There may be dangling jumps to the true target.
- if (true_target.is_linked()) {
- loaded.Jump();
- true_target.Bind();
- frame_->Push(Factory::true_value());
- loaded.Bind();
- }
-
- } else if (dest.is_used()) {
- // There is true, and possibly false, control flow (with true as
- // the fall through).
- JumpTarget loaded;
- frame_->Push(Factory::true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- false_target.Bind();
- frame_->Push(Factory::false_value());
- loaded.Bind();
- }
-
- } else {
- // We have a valid value on top of the frame, but we still may
- // have dangling jumps to the true and false targets from nested
- // subexpressions (eg, the left subexpressions of the
- // short-circuited boolean operators).
- ASSERT(has_valid_frame());
- if (true_target.is_linked() || false_target.is_linked()) {
- JumpTarget loaded;
- loaded.Jump(); // Don't lose the current TOS.
- if (true_target.is_linked()) {
- true_target.Bind();
- frame_->Push(Factory::true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- }
- }
- if (false_target.is_linked()) {
- false_target.Bind();
- frame_->Push(Factory::false_value());
- }
- loaded.Bind();
- }
- }
-
- ASSERT(has_valid_frame());
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
- if (in_spilled_code()) {
- frame_->EmitPush(GlobalObjectOperand());
- } else {
- Result temp = allocator_->Allocate();
- __ movq(temp.reg(), GlobalObjectOperand());
- frame_->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
- Result temp = allocator_->Allocate();
- Register reg = temp.reg();
- __ movq(reg, GlobalObjectOperand());
- __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
- frame_->Push(&temp);
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
- // Special handling of identifiers as subexpressions of typeof.
- Variable* variable = expr->AsVariableProxy()->AsVariable();
- if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // For a global variable we build the property reference
- // <global>.<variable> and perform a (regular non-contextual) property
- // load to make sure we do not get reference errors.
- Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
- Literal key(variable->name());
- Property property(&global, &key, RelocInfo::kNoPosition);
- Reference ref(this, &property);
- ref.GetValue();
- } else if (variable != NULL && variable->AsSlot() != NULL) {
- // For a variable that rewrites to a slot, we signal it is the immediate
- // subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
- } else {
- // Anything else can be handled normally.
- Load(expr);
- }
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
- ASSERT(scope()->arguments_shadow() != NULL);
- // We don't want to do lazy arguments allocation for functions that
- // have heap-allocated contexts, because it interfers with the
- // uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0)
- ? EAGER_ARGUMENTS_ALLOCATION
- : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
- ArgumentsAllocationMode mode = ArgumentsMode();
- ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
- Comment cmnt(masm_, "[ store arguments object");
- if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the arguments marker value
- // as a sentinel indicating that the arguments object hasn't been
- // allocated yet.
- frame_->Push(Factory::arguments_marker());
- } else {
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- frame_->PushFunction();
- frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope()->num_parameters()));
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
- }
-
- Variable* arguments = scope()->arguments();
- Variable* shadow = scope()->arguments_shadow();
- ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
- ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
- Result probe = frame_->Pop();
- if (probe.is_constant()) {
- // We have to skip updating the arguments object if it has
- // been assigned a proper value.
- skip_arguments = !probe.handle()->IsArgumentsMarker();
- } else {
- __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
- probe.Unuse();
- done.Branch(not_equal);
- }
- }
- if (!skip_arguments) {
- StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- }
- StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
- return frame_->Pop();
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- // References are loaded from both spilled and unspilled code. Set the
- // state to unspilled to allow that (and explicitly spill after
- // construction at the construction sites).
- bool was_in_spilled_code = in_spilled_code_;
- in_spilled_code_ = false;
-
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- // The expression is either a property or a variable proxy that rewrites
- // to a property.
- Load(property->obj());
- if (property->key()->IsPropertyName()) {
- ref->set_type(Reference::NAMED);
- } else {
- Load(property->key());
- ref->set_type(Reference::KEYED);
- }
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- // If rax is free, the register allocator prefers it. Thus the code
- // generator will load the global object into rax, which is where
- // LoadIC wants it. Most uses of Reference call LoadIC directly
- // after the reference is created.
- frame_->Spill(rax);
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->AsSlot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- // Anything else is a runtime error.
- Load(e);
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- }
-
- in_spilled_code_ = was_in_spilled_code;
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
- // Pop a reference from the stack while preserving TOS.
- Comment cmnt(masm_, "[ UnloadReference");
- frame_->Nip(ref->size());
- ref->set_unloaded();
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
- Comment cmnt(masm_, "[ ToBoolean");
-
- // The value to convert should be popped from the frame.
- Result value = frame_->Pop();
- value.ToRegister();
-
- if (value.is_number()) {
- // Fast case if TypeInfo indicates only numbers.
- if (FLAG_debug_code) {
- __ AbortIfNotNumber(value.reg());
- }
- // Smi => false iff zero.
- __ SmiCompare(value.reg(), Smi::FromInt(0));
- if (value.is_smi()) {
- value.Unuse();
- dest->Split(not_zero);
- } else {
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
- __ xorpd(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
- value.Unuse();
- dest->Split(not_zero);
- }
- } else {
- // Fast case checks.
- // 'false' => false.
- __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
- dest->false_target()->Branch(equal);
-
- // 'true' => true.
- __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
- dest->true_target()->Branch(equal);
-
- // 'undefined' => false.
- __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
- dest->false_target()->Branch(equal);
-
- // Smi => false iff zero.
- __ SmiCompare(value.reg(), Smi::FromInt(0));
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
-
- // Call the stub for all other cases.
- frame_->Push(&value); // Undo the Pop() from above.
- ToBooleanStub stub;
- Result temp = frame_->CallStub(&stub, 1);
- // Convert the result to a condition code.
- __ testq(temp.reg(), temp.reg());
- temp.Unuse();
- dest->Split(not_equal);
- }
-}
-
-
-// Call the specialized stub for a binary operation.
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
- DeferredInlineBinaryOperation(Token::Value op,
- Register dst,
- Register left,
- Register right,
- OverwriteMode mode)
- : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
- set_comment("[ DeferredInlineBinaryOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register left_;
- Register right_;
- OverwriteMode mode_;
-};
-
-
-void DeferredInlineBinaryOperation::Generate() {
- Label done;
- if ((op_ == Token::ADD)
- || (op_ == Token::SUB)
- || (op_ == Token::MUL)
- || (op_ == Token::DIV)) {
- Label call_runtime;
- Label left_smi, right_smi, load_right, do_op;
- __ JumpIfSmi(left_, &left_smi);
- __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_LEFT) {
- __ movq(dst_, left_);
- }
- __ jmp(&load_right);
-
- __ bind(&left_smi);
- __ SmiToInteger32(left_, left_);
- __ cvtlsi2sd(xmm0, left_);
- __ Integer32ToSmi(left_, left_);
- if (mode_ == OVERWRITE_LEFT) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
-
- __ bind(&load_right);
- __ JumpIfSmi(right_, &right_smi);
- __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_RIGHT) {
- __ movq(dst_, right_);
- } else if (mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
- __ jmp(&do_op);
-
- __ bind(&right_smi);
- __ SmiToInteger32(right_, right_);
- __ cvtlsi2sd(xmm1, right_);
- __ Integer32ToSmi(right_, right_);
- if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
-
- __ bind(&do_op);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
- __ jmp(&done);
-
- __ bind(&call_runtime);
- }
- GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, left_, right_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
- __ bind(&done);
-}
-
-
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
- Token::Value op,
- const Result& right,
- const Result& left) {
- // Set TypeInfo of result according to the operation performed.
- // We rely on the fact that smis have a 32 bit payload on x64.
- STATIC_ASSERT(kSmiValueSize == 32);
- switch (op) {
- case Token::COMMA:
- return right.type_info();
- case Token::OR:
- case Token::AND:
- // Result type can be either of the two input types.
- return operands_type;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- // Result is always a smi.
- return TypeInfo::Smi();
- case Token::SAR:
- case Token::SHL:
- // Result is always a smi.
- return TypeInfo::Smi();
- case Token::SHR:
- // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
- return (right.is_constant() && right.handle()->IsSmi()
- && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
- ? TypeInfo::Smi()
- : TypeInfo::Number();
- case Token::ADD:
- if (operands_type.IsNumber()) {
- return TypeInfo::Number();
- } else if (left.type_info().IsString() || right.type_info().IsString()) {
- return TypeInfo::String();
- } else {
- return TypeInfo::Unknown();
- }
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Result is always a number.
- return TypeInfo::Number();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return TypeInfo::Unknown();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = expr->op();
- Comment cmnt_token(masm_, Token::String(op));
-
- if (op == Token::COMMA) {
- // Simply discard left value.
- frame_->Nip(1);
- return;
- }
-
- Result right = frame_->Pop();
- Result left = frame_->Pop();
-
- if (op == Token::ADD) {
- const bool left_is_string = left.type_info().IsString();
- const bool right_is_string = right.type_info().IsString();
- // Make sure constant strings have string type info.
- ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
- left_is_string);
- ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
- right_is_string);
- if (left_is_string || right_is_string) {
- frame_->Push(&left);
- frame_->Push(&right);
- Result answer;
- if (left_is_string) {
- if (right_is_string) {
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- } else {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
- }
- } else if (right_is_string) {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
- }
- answer.set_type_info(TypeInfo::String());
- frame_->Push(&answer);
- return;
- }
- // Neither operand is known to be a string.
- }
-
- bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
- bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
- bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
- bool right_is_non_smi_constant =
- right.is_constant() && !right.handle()->IsSmi();
-
- if (left_is_smi_constant && right_is_smi_constant) {
- // Compute the constant result at compile time, and leave it on the frame.
- int left_int = Smi::cast(*left.handle())->value();
- int right_int = Smi::cast(*right.handle())->value();
- if (FoldConstantSmis(op, left_int, right_int)) return;
- }
-
- // Get number type of left and right sub-expressions.
- TypeInfo operands_type =
- TypeInfo::Combine(left.type_info(), right.type_info());
-
- TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
-
- Result answer;
- if (left_is_non_smi_constant || right_is_non_smi_constant) {
- // Go straight to the slow case, with no smi code.
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_SMI_CODE_IN_STUB,
- operands_type);
- answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
- } else if (right_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
- false, overwrite_mode);
- } else if (left_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
- true, overwrite_mode);
- } else {
- // Set the flags based on the operation, type and loop nesting level.
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- // For all other operations only inline the Smi check code for likely smis
- // if the operation is part of a loop.
- if (loop_nesting() > 0 &&
- (Token::IsBitOp(op) ||
- operands_type.IsInteger32() ||
- expr->type()->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
- } else {
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_GENERIC_BINARY_FLAGS,
- operands_type);
- answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
- }
- }
-
- answer.set_type_info(result_type);
- frame_->Push(&answer);
-}
-
-
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- Object* answer_object = Heap::undefined_value();
- switch (op) {
- case Token::ADD:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
- answer_object = Smi::FromInt(left + right);
- }
- break;
- case Token::SUB:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
- answer_object = Smi::FromInt(left - right);
- }
- break;
- case Token::MUL: {
- double answer = static_cast<double>(left) * right;
- if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
- // If the product is zero and the non-zero factor is negative,
- // the spec requires us to return floating point negative zero.
- if (answer != 0 || (left >= 0 && right >= 0)) {
- answer_object = Smi::FromInt(static_cast<int>(answer));
- }
- }
- }
- break;
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- answer_object = Smi::FromInt(left | right);
- break;
- case Token::BIT_AND:
- answer_object = Smi::FromInt(left & right);
- break;
- case Token::BIT_XOR:
- answer_object = Smi::FromInt(left ^ right);
- break;
-
- case Token::SHL: {
- int shift_amount = right & 0x1F;
- if (Smi::IsValid(left << shift_amount)) {
- answer_object = Smi::FromInt(left << shift_amount);
- }
- break;
- }
- case Token::SHR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- unsigned_left >>= shift_amount;
- if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
- answer_object = Smi::FromInt(unsigned_left);
- }
- break;
- }
- case Token::SAR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- if (left < 0) {
- // Perform arithmetic shift of a negative number by
- // complementing number, logical shifting, complementing again.
- unsigned_left = ~unsigned_left;
- unsigned_left >>= shift_amount;
- unsigned_left = ~unsigned_left;
- } else {
- unsigned_left >>= shift_amount;
- }
- ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
- answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- if (answer_object == Heap::undefined_value()) {
- return false;
- }
- frame_->Push(Handle<Object>(answer_object));
- return true;
-}
-
-
-void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
- Result* right,
- JumpTarget* both_smi) {
- TypeInfo left_info = left->type_info();
- TypeInfo right_info = right->type_info();
- if (left_info.IsDouble() || left_info.IsString() ||
- right_info.IsDouble() || right_info.IsString()) {
- // We know that left and right are not both smi. Don't do any tests.
- return;
- }
-
- if (left->reg().is(right->reg())) {
- if (!left_info.IsSmi()) {
- Condition is_smi = masm()->CheckSmi(left->reg());
- both_smi->Branch(is_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- left->Unuse();
- right->Unuse();
- both_smi->Jump();
- }
- } else if (!left_info.IsSmi()) {
- if (!right_info.IsSmi()) {
- Condition is_smi = masm()->CheckBothSmi(left->reg(), right->reg());
- both_smi->Branch(is_smi);
- } else {
- Condition is_smi = masm()->CheckSmi(left->reg());
- both_smi->Branch(is_smi);
- }
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- if (!right_info.IsSmi()) {
- Condition is_smi = masm()->CheckSmi(right->reg());
- both_smi->Branch(is_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
- left->Unuse();
- right->Unuse();
- both_smi->Jump();
- }
- }
-}
-
-
-void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
- TypeInfo type,
- DeferredCode* deferred) {
- if (!type.IsSmi()) {
- __ JumpIfNotSmi(reg, deferred->entry_label());
- }
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(reg);
- }
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred) {
- if (!left_info.IsSmi() && !right_info.IsSmi()) {
- __ JumpIfNotBothSmi(left, right, deferred->entry_label());
- } else if (!left_info.IsSmi()) {
- __ JumpIfNotSmi(left, deferred->entry_label());
- } else if (!right_info.IsSmi()) {
- __ JumpIfNotSmi(right, deferred->entry_label());
- }
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
-}
-
-
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
- // Copy the type info because left and right may be overwritten.
- TypeInfo left_type_info = left->type_info();
- TypeInfo right_type_info = right->type_info();
- Token::Value op = expr->op();
- Result answer;
- // Special handling of div and mod because they use fixed registers.
- if (op == Token::DIV || op == Token::MOD) {
- // We need rax as the quotient register, rdx as the remainder
- // register, neither left nor right in rax or rdx, and left copied
- // to rax.
- Result quotient;
- Result remainder;
- bool left_is_in_rax = false;
- // Step 1: get rax for quotient.
- if ((left->is_register() && left->reg().is(rax)) ||
- (right->is_register() && right->reg().is(rax))) {
- // One or both is in rax. Use a fresh non-rdx register for
- // them.
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (fresh.reg().is(rdx)) {
- remainder = fresh;
- fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- }
- if (left->is_register() && left->reg().is(rax)) {
- quotient = *left;
- *left = fresh;
- left_is_in_rax = true;
- }
- if (right->is_register() && right->reg().is(rax)) {
- quotient = *right;
- *right = fresh;
- }
- __ movq(fresh.reg(), rax);
- } else {
- // Neither left nor right is in rax.
- quotient = allocator_->Allocate(rax);
- }
- ASSERT(quotient.is_register() && quotient.reg().is(rax));
- ASSERT(!(left->is_register() && left->reg().is(rax)));
- ASSERT(!(right->is_register() && right->reg().is(rax)));
-
- // Step 2: get rdx for remainder if necessary.
- if (!remainder.is_valid()) {
- if ((left->is_register() && left->reg().is(rdx)) ||
- (right->is_register() && right->reg().is(rdx))) {
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (left->is_register() && left->reg().is(rdx)) {
- remainder = *left;
- *left = fresh;
- }
- if (right->is_register() && right->reg().is(rdx)) {
- remainder = *right;
- *right = fresh;
- }
- __ movq(fresh.reg(), rdx);
- } else {
- // Neither left nor right is in rdx.
- remainder = allocator_->Allocate(rdx);
- }
- }
- ASSERT(remainder.is_register() && remainder.reg().is(rdx));
- ASSERT(!(left->is_register() && left->reg().is(rdx)));
- ASSERT(!(right->is_register() && right->reg().is(rdx)));
-
- left->ToRegister();
- right->ToRegister();
- frame_->Spill(rax);
- frame_->Spill(rdx);
-
- // Check that left and right are smi tagged.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- (op == Token::DIV) ? rax : rdx,
- left->reg(),
- right->reg(),
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
- left_type_info, right_type_info, deferred);
-
- if (op == Token::DIV) {
- __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = quotient;
- } else {
- ASSERT(op == Token::MOD);
- __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = remainder;
- }
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Special handling of shift operations because they use fixed
- // registers.
- if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
- // Move left out of rcx if necessary.
- if (left->is_register() && left->reg().is(rcx)) {
- *left = allocator_->Allocate();
- ASSERT(left->is_valid());
- __ movq(left->reg(), rcx);
- }
- right->ToRegister(rcx);
- left->ToRegister();
- ASSERT(left->is_register() && !left->reg().is(rcx));
- ASSERT(right->is_register() && right->reg().is(rcx));
-
- // We will modify right, it must be spilled.
- frame_->Spill(rcx);
-
- // Use a fresh answer register to avoid spilling the left operand.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
- // Check that both operands are smis using the answer register as a
- // temporary.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- rcx,
- overwrite_mode);
-
- Label do_op;
- // Left operand must be unchanged in left->reg() for deferred code.
- // Left operand is in answer.reg(), possibly converted to int32, for
- // inline code.
- __ movq(answer.reg(), left->reg());
- if (right_type_info.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(right->reg());
- }
- // If left is not known to be a smi, check if it is.
- // If left is not known to be a number, and it isn't a smi, check if
- // it is a HeapNumber.
- if (!left_type_info.IsSmi()) {
- __ JumpIfSmi(answer.reg(), &do_op);
- if (!left_type_info.IsNumber()) {
- // Branch if not a heapnumber.
- __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
- deferred->Branch(not_equal);
- }
- // Load integer value into answer register using truncation.
- __ cvttsd2si(answer.reg(),
- FieldOperand(answer.reg(), HeapNumber::kValueOffset));
- // Branch if we might have overflowed.
- // (False negative for Smi::kMinValue)
- __ cmpl(answer.reg(), Immediate(0x80000000));
- deferred->Branch(equal);
- // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
- __ Integer32ToSmi(answer.reg(), answer.reg());
- } else {
- // Fast case - both are actually smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left->reg());
- }
- }
- } else {
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
- left_type_info, right_type_info, deferred);
- }
- __ bind(&do_op);
-
- // Perform the operation.
- switch (op) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(answer.reg(), answer.reg(), rcx);
- break;
- case Token::SHR: {
- __ SmiShiftLogicalRight(answer.reg(),
- answer.reg(),
- rcx,
- deferred->entry_label());
- break;
- }
- case Token::SHL: {
- __ SmiShiftLeft(answer.reg(),
- answer.reg(),
- rcx);
- break;
- }
- default:
- UNREACHABLE();
- }
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Handle the other binary operations.
- left->ToRegister();
- right->ToRegister();
- // A newly allocated register answer is used to hold the answer. The
- // registers containing left and right are not modified so they don't
- // need to be spilled in the fast case.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
-
- // Perform the smi tag check.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- right->reg(),
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
- left_type_info, right_type_info, deferred);
-
- switch (op) {
- case Token::ADD:
- __ SmiAdd(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
-
- case Token::SUB:
- __ SmiSub(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
-
- case Token::MUL: {
- __ SmiMul(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
- }
-
- case Token::BIT_OR:
- __ SmiOr(answer.reg(), left->reg(), right->reg());
- break;
-
- case Token::BIT_AND:
- __ SmiAnd(answer.reg(), left->reg(), right->reg());
- break;
-
- case Token::BIT_XOR:
- __ SmiXor(answer.reg(), left->reg(), right->reg());
- break;
-
- default:
- UNREACHABLE();
- break;
- }
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- Register dst,
- Register src,
- Smi* value,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- src_(src),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register src_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperation::Generate() {
- // For mod we don't generate all the Smi code inline.
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, src_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
- DeferredInlineSmiOperationReversed(Token::Value op,
- Register dst,
- Smi* value,
- Register src,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- value_(value),
- src_(src),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperationReversed");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Smi* value_;
- Register src_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, value_, src_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
- DeferredInlineSmiAdd(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAdd");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAdd::Generate() {
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The result of value + src is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
- DeferredInlineSmiAddReversed(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAddReversed");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAddReversed::Generate() {
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, value_, dst_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredInlineSmiSub: public DeferredCode {
- public:
- DeferredInlineSmiSub(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiSub");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSub::Generate() {
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> value,
- bool reversed,
- OverwriteMode overwrite_mode) {
- // Generate inline code for a binary operation when one of the
- // operands is a constant smi. Consumes the argument "operand".
- if (IsUnsafeSmi(value)) {
- Result unsafe_operand(value);
- if (reversed) {
- return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
- overwrite_mode);
- } else {
- return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
- overwrite_mode);
- }
- }
-
- // Get the literal value.
- Smi* smi_value = Smi::cast(*value);
- int int_value = smi_value->value();
-
- Token::Value op = expr->op();
- Result answer;
- switch (op) {
- case Token::ADD: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred = NULL;
- if (reversed) {
- deferred = new DeferredInlineSmiAddReversed(operand->reg(),
- smi_value,
- overwrite_mode);
- } else {
- deferred = new DeferredInlineSmiAdd(operand->reg(),
- smi_value,
- overwrite_mode);
- }
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiAddConstant(operand->reg(),
- operand->reg(),
- smi_value,
- deferred->entry_label());
- deferred->BindExit();
- answer = *operand;
- break;
- }
-
- case Token::SUB: {
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- answer = *operand;
- DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- // A smi currently fits in a 32-bit Immediate.
- __ SmiSubConstant(operand->reg(),
- operand->reg(),
- smi_value,
- deferred->entry_label());
- deferred->BindExit();
- operand->Unuse();
- }
- break;
- }
-
- case Token::SAR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftArithmeticRightConstant(operand->reg(),
- operand->reg(),
- shift_value);
- deferred->BindExit();
- answer = *operand;
- }
- break;
-
- case Token::SHR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftLogicalRightConstant(answer.reg(),
- operand->reg(),
- shift_value,
- deferred->entry_label());
- deferred->BindExit();
- operand->Unuse();
- }
- break;
-
- case Token::SHL:
- if (reversed) {
- operand->ToRegister();
-
- // We need rcx to be available to hold operand, and to be spilled.
- // SmiShiftLeft implicitly modifies rcx.
- if (operand->reg().is(rcx)) {
- frame_->Spill(operand->reg());
- answer = allocator()->Allocate();
- } else {
- Result rcx_reg = allocator()->Allocate(rcx);
- // answer must not be rcx.
- answer = allocator()->Allocate();
- // rcx_reg goes out of scope.
- }
-
- DeferredInlineSmiOperationReversed* deferred =
- new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- operand->reg(),
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
-
- __ Move(answer.reg(), smi_value);
- __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
- operand->Unuse();
-
- deferred->BindExit();
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- if (shift_value == 0) {
- // Spill operand so it can be overwritten in the slow case.
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- deferred->BindExit();
- answer = *operand;
- } else {
- // Use a fresh temporary for nonzero shift values.
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftLeftConstant(answer.reg(),
- operand->reg(),
- shift_value);
- deferred->BindExit();
- operand->Unuse();
- }
- }
- break;
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- if (reversed) {
- // Bit operations with a constant smi are commutative.
- // We can swap left and right operands with no problem.
- // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
- overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
- }
- DeferredCode* deferred = new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- if (op == Token::BIT_AND) {
- __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
- } else if (op == Token::BIT_XOR) {
- if (int_value != 0) {
- __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
- }
- } else {
- ASSERT(op == Token::BIT_OR);
- if (int_value != 0) {
- __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
- }
- }
- deferred->BindExit();
- answer = *operand;
- break;
- }
-
- // Generate inline code for mod of powers of 2 and negative powers of 2.
- case Token::MOD:
- if (!reversed &&
- int_value != 0 &&
- (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- __ JumpUnlessNonNegativeSmi(operand->reg(), deferred->entry_label());
- if (int_value < 0) int_value = -int_value;
- if (int_value == 1) {
- __ Move(operand->reg(), Smi::FromInt(0));
- } else {
- __ SmiAndConstant(operand->reg(),
- operand->reg(),
- Smi::FromInt(int_value - 1));
- }
- deferred->BindExit();
- answer = *operand;
- break; // This break only applies if we generated code for MOD.
- }
- // Fall through if we did not find a power of 2 on the right hand side!
- // The next case must be the default.
-
- default: {
- Result constant_operand(value);
- if (reversed) {
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
- overwrite_mode);
- }
- break;
- }
- }
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-static bool CouldBeNaN(const Result& result) {
- if (result.type_info().IsSmi()) return false;
- if (result.type_info().IsInteger32()) return false;
- if (!result.is_constant()) return true;
- if (!result.handle()->IsHeapNumber()) return false;
- return isnan(HeapNumber::cast(*result.handle())->value());
-}
-
-
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
- switch (cc) {
- case less: return below;
- case equal: return equal;
- case less_equal: return below_equal;
- case greater: return above;
- case greater_equal: return above_equal;
- default: UNREACHABLE();
- }
- UNREACHABLE();
- return equal;
-}
-
-
-static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
- bool inline_number_compare) {
- CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
- if (nan_info == kCantBothBeNaN) {
- flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
- }
- if (inline_number_compare) {
- flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
- }
- return flags;
-}
-
-
-void CodeGenerator::Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* dest) {
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cc == equal);
-
- Result left_side;
- Result right_side;
- // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cc == greater || cc == less_equal) {
- cc = ReverseCondition(cc);
- left_side = frame_->Pop();
- right_side = frame_->Pop();
- } else {
- right_side = frame_->Pop();
- left_side = frame_->Pop();
- }
- ASSERT(cc == less || cc == equal || cc == greater_equal);
-
- // If either side is a constant smi, optimize the comparison.
- bool left_side_constant_smi = false;
- bool left_side_constant_null = false;
- bool left_side_constant_1_char_string = false;
- if (left_side.is_constant()) {
- left_side_constant_smi = left_side.handle()->IsSmi();
- left_side_constant_null = left_side.handle()->IsNull();
- left_side_constant_1_char_string =
- (left_side.handle()->IsString() &&
- String::cast(*left_side.handle())->length() == 1 &&
- String::cast(*left_side.handle())->IsAsciiRepresentation());
- }
- bool right_side_constant_smi = false;
- bool right_side_constant_null = false;
- bool right_side_constant_1_char_string = false;
- if (right_side.is_constant()) {
- right_side_constant_smi = right_side.handle()->IsSmi();
- right_side_constant_null = right_side.handle()->IsNull();
- right_side_constant_1_char_string =
- (right_side.handle()->IsString() &&
- String::cast(*right_side.handle())->length() == 1 &&
- String::cast(*right_side.handle())->IsAsciiRepresentation());
- }
-
- if (left_side_constant_smi || right_side_constant_smi) {
- bool is_loop_condition = (node->AsExpression() != NULL) &&
- node->AsExpression()->is_loop_condition();
- ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
- left_side_constant_smi, right_side_constant_smi,
- is_loop_condition);
- } else if (left_side_constant_1_char_string ||
- right_side_constant_1_char_string) {
- if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
- // Trivial case, comparing two constants.
- int left_value = String::cast(*left_side.handle())->Get(0);
- int right_value = String::cast(*right_side.handle())->Get(0);
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant 1 character string.
- // If left side is a constant 1-character string, reverse the operands.
- // Since one side is a constant string, conversion order does not matter.
- if (left_side_constant_1_char_string) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may reintroduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant string, inlining the case
- // where both sides are strings.
- left_side.ToRegister();
-
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_not_string, is_string;
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
- ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
- Condition is_smi = masm()->CheckSmi(left_reg);
- is_not_string.Branch(is_smi, &left_side);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(left_reg, HeapObject::kMapOffset));
- __ movzxbl(temp.reg(),
- FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
- // If we are testing for equality then make use of the symbol shortcut.
- // Check if the left hand side has the same type as the right hand
- // side (which is always a symbol).
- if (cc == equal) {
- Label not_a_symbol;
- STATIC_ASSERT(kSymbolTag != 0);
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
- __ j(zero, &not_a_symbol);
- // They are symbols, so do identity compare.
- __ Cmp(left_reg, right_side.handle());
- dest->true_target()->Branch(equal);
- dest->false_target()->Branch(not_equal);
- __ bind(&not_a_symbol);
- }
- // Call the compare stub if the left side is not a flat ascii string.
- __ andb(temp.reg(),
- Immediate(kIsNotStringMask |
- kStringRepresentationMask |
- kStringEncodingMask));
- __ cmpb(temp.reg(),
- Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
- temp.Unuse();
- is_string.Branch(equal, &left_side);
-
- // Setup and call the compare stub.
- is_not_string.Bind(&left_side);
- CompareFlags flags =
- static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
- CompareStub stub(cc, strict, flags);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ testq(result.reg(), result.reg());
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_string.Bind(&left_side);
- // left_side is a sequential ASCII string.
- ASSERT(left_side.reg().is(left_reg));
- right_side = Result(right_val);
- Result temp2 = allocator_->Allocate();
- ASSERT(temp2.is_valid());
- // Test string equality and comparison.
- if (cc == equal) {
- Label comparison_done;
- __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
- Smi::FromInt(1));
- __ j(not_equal, &comparison_done);
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
- Immediate(char_value));
- __ bind(&comparison_done);
- } else {
- __ movq(temp2.reg(),
- FieldOperand(left_side.reg(), String::kLengthOffset));
- __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
- Label comparison;
- // If the length is 0 then the subtraction gave -1 which compares less
- // than any character.
- __ j(negative, &comparison);
- // Otherwise load the first character.
- __ movzxbl(temp2.reg(),
- FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
- __ bind(&comparison);
- // Compare the first character of the string with the
- // constant 1-character string.
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
- __ cmpb(temp2.reg(), Immediate(char_value));
- Label characters_were_different;
- __ j(not_equal, &characters_were_different);
- // If the first character is the same then the long string sorts after
- // the short one.
- __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
- Smi::FromInt(1));
- __ bind(&characters_were_different);
- }
- temp2.Unuse();
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
- } else {
- // Neither side is a constant Smi, constant 1-char string, or constant null.
- // If either side is a non-smi constant, or known to be a heap number,
- // skip the smi check.
- bool known_non_smi =
- (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
- (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
- left_side.type_info().IsDouble() ||
- right_side.type_info().IsDouble();
-
- NaNInformation nan_info =
- (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
- kBothCouldBeNaN :
- kCantBothBeNaN;
-
- // Inline number comparison handling any combination of smi's and heap
- // numbers if:
- // code is in a loop
- // the compare operation is different from equal
- // compare is not a for-loop comparison
- // The reason for excluding equal is that it will most likely be done
- // with smi's (not heap numbers) and the code to comparing smi's is inlined
- // separately. The same reason applies for for-loop comparison which will
- // also most likely be smi comparisons.
- bool is_loop_condition = (node->AsExpression() != NULL)
- && node->AsExpression()->is_loop_condition();
- bool inline_number_compare =
- loop_nesting() > 0 && cc != equal && !is_loop_condition;
-
- // Left and right needed in registers for the following code.
- left_side.ToRegister();
- right_side.ToRegister();
-
- if (known_non_smi) {
- // Inlined equality check:
- // If at least one of the objects is not NaN, then if the objects
- // are identical, they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
- CompareStub stub(cc, strict, flags);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
- answer.Unuse();
- dest->Split(cc);
- } else {
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
- Register left_reg = left_side.reg();
- Register right_reg = right_side.reg();
-
- // In-line check for comparing two smis.
- JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
-
- if (has_valid_frame()) {
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareFlags flags =
- ComputeCompareFlags(nan_info, inline_number_compare);
- CompareStub stub(cc, strict, flags);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
- answer.Unuse();
- if (is_smi.is_linked()) {
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
- } else {
- dest->Split(cc);
- }
- }
-
- if (is_smi.is_linked()) {
- is_smi.Bind();
- left_side = Result(left_reg);
- right_side = Result(right_reg);
- __ SmiCompare(left_side.reg(), right_side.reg());
- right_side.Unuse();
- left_side.Unuse();
- dest->Split(cc);
- }
- }
- }
-}
-
-
-void CodeGenerator::ConstantSmiComparison(Condition cc,
- bool strict,
- ControlDestination* dest,
- Result* left_side,
- Result* right_side,
- bool left_side_constant_smi,
- bool right_side_constant_smi,
- bool is_loop_condition) {
- if (left_side_constant_smi && right_side_constant_smi) {
- // Trivial case, comparing two constants.
- int left_value = Smi::cast(*left_side->handle())->value();
- int right_value = Smi::cast(*right_side->handle())->value();
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant Smi.
- // If left side is a constant Smi, reverse the operands.
- // Since one side is a constant Smi, conversion order does not matter.
- if (left_side_constant_smi) {
- Result* temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may re-introduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant Smi, inlining the case
- // where both sides are Smis.
- left_side->ToRegister();
- Register left_reg = left_side->reg();
- Smi* constant_smi = Smi::cast(*right_side->handle());
-
- if (left_side->is_smi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left_reg);
- }
- // Test smi equality and comparison by signed int comparison.
- // Both sides are smis, so we can use an Immediate.
- __ SmiCompare(left_reg, constant_smi);
- left_side->Unuse();
- right_side->Unuse();
- dest->Split(cc);
- } else {
- // Only the case where the left side could possibly be a non-smi is left.
- JumpTarget is_smi;
- if (cc == equal) {
- // We can do the equality comparison before the smi check.
- __ SmiCompare(left_reg, constant_smi);
- dest->true_target()->Branch(equal);
- Condition left_is_smi = masm_->CheckSmi(left_reg);
- dest->false_target()->Branch(left_is_smi);
- } else {
- // Do the smi check, then the comparison.
- Condition left_is_smi = masm_->CheckSmi(left_reg);
- is_smi.Branch(left_is_smi, left_side, right_side);
- }
-
- // Jump or fall through to here if we are comparing a non-smi to a
- // constant smi. If the non-smi is a heap number and this is not
- // a loop condition, inline the floating point code.
- if (!is_loop_condition) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- JumpTarget not_number;
- __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
- not_number.Branch(not_equal, left_side);
- __ movsd(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = constant_smi->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ movl(temp.reg(), Immediate(value));
- __ cvtlsi2sd(xmm0, temp.reg());
- temp.Unuse();
- }
- __ ucomisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, left_side);
- left_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
- not_number.Bind(left_side);
- }
-
- // Setup and call the compare stub.
- CompareFlags flags =
- static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
- CompareStub stub(cc, strict, flags);
- Result result = frame_->CallStub(&stub, left_side, right_side);
- result.ToRegister();
- __ testq(result.reg(), result.reg());
- result.Unuse();
- if (cc == equal) {
- dest->Split(cc);
- } else {
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- // It is important for performance for this case to be at the end.
- is_smi.Bind(left_side, right_side);
- __ SmiCompare(left_reg, constant_smi);
- left_side->Unuse();
- right_side->Unuse();
- dest->Split(cc);
- }
- }
- }
-}
-
-
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
- Result* operand,
- XMMRegister xmm_reg,
- Result* left_side,
- Result* right_side,
- JumpTarget* not_numbers) {
- Label done;
- if (operand->type_info().IsDouble()) {
- // Operand is known to be a heap number, just load it.
- __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- } else if (operand->type_info().IsSmi()) {
- // Operand is known to be a smi. Convert it to double and keep the original
- // smi.
- __ SmiToInteger32(kScratchRegister, operand->reg());
- __ cvtlsi2sd(xmm_reg, kScratchRegister);
- } else {
- // Operand type not known, check for smi or heap number.
- Label smi;
- __ JumpIfSmi(operand->reg(), &smi);
- if (!operand->type_info().IsNumber()) {
- __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- kScratchRegister);
- not_numbers->Branch(not_equal, left_side, right_side, taken);
- }
- __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&smi);
- // Comvert smi to float and keep the original smi.
- __ SmiToInteger32(kScratchRegister, operand->reg());
- __ cvtlsi2sd(xmm_reg, kScratchRegister);
- __ jmp(&done);
- }
- __ bind(&done);
-}
-
-
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest) {
- ASSERT(left_side->is_register());
- ASSERT(right_side->is_register());
-
- JumpTarget not_numbers;
- // Load left and right operand into registers xmm0 and xmm1 and compare.
- LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
- &not_numbers);
- LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
- &not_numbers);
- __ ucomisd(xmm0, xmm1);
- // Bail out if a NaN is involved.
- not_numbers.Branch(parity_even, left_side, right_side);
-
- // Split to destination targets based on comparison.
- left_side->Unuse();
- right_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
-
- not_numbers.Bind(left_side, right_side);
-}
-
-
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- // Push the arguments ("left-to-right") on the stack.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Record the position for debugging purposes.
- CodeForSourcePosition(position);
-
- // Use the shared code stub to call the function.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, flags);
- Result answer = frame_->CallStub(&call_function, arg_count + 1);
- // Restore context and replace function on the stack with the
- // result of the stub invocation.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &answer);
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position) {
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments).
- // If the arguments object of the scope has not been allocated,
- // and x.apply is Function.prototype.apply, this optimization
- // just copies y and the arguments of the current function on the
- // stack, as receiver and arguments, and calls x.
- // In the implementation comments, we call x the applicand
- // and y the receiver.
- ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
- ASSERT(arguments->IsArguments());
-
- // Load applicand.apply onto the stack. This will usually
- // give us a megamorphic load site. Not super, but it works.
- Load(applicand);
- frame()->Dup();
- Handle<String> name = Factory::LookupAsciiSymbol("apply");
- frame()->Push(name);
- Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
- __ nop();
- frame()->Push(&answer);
-
- // Load the receiver and the existing arguments object onto the
- // expression stack. Avoid allocating the arguments object here.
- Load(receiver);
- LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
- // Emit the source position information after having loaded the
- // receiver and the arguments.
- CodeForSourcePosition(position);
- // Contents of frame at this point:
- // Frame[0]: arguments object of the current function or the hole.
- // Frame[1]: receiver
- // Frame[2]: applicand.apply
- // Frame[3]: applicand.
-
- // Check if the arguments object has been lazily allocated
- // already. If so, just use that instead of copying the arguments
- // from the stack. This also deals with cases where a local variable
- // named 'arguments' has been introduced.
- frame_->Dup();
- Result probe = frame_->Pop();
- { VirtualFrame::SpilledScope spilled_scope;
- Label slow, done;
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsArgumentsMarker();
- } else {
- __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
- probe.Unuse();
- __ j(not_equal, &slow);
- }
-
- if (try_lazy) {
- Label build_args;
- // Get rid of the arguments object probe.
- frame_->Drop(); // Can be called on a spilled frame.
- // Stack now has 3 elements on it.
- // Contents of stack at this point:
- // rsp[0]: receiver
- // rsp[1]: applicand.apply
- // rsp[2]: applicand.
-
- // Check that the receiver really is a JavaScript object.
- __ movq(rax, Operand(rsp, 0));
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &build_args);
- // We allow all JSObjects including JSFunctions. As long as
- // JS_FUNCTION_TYPE is the last instance type and it is right
- // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
- // bound.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &build_args);
-
- // Check that applicand.apply is Function.prototype.apply.
- __ movq(rax, Operand(rsp, kPointerSize));
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &build_args);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &build_args);
- __ movq(rcx, FieldOperand(rax, JSFunction::kCodeEntryOffset));
- __ subq(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ Cmp(rcx, apply_code);
- __ j(not_equal, &build_args);
-
- // Check that applicand is a function.
- __ movq(rdi, Operand(rsp, 2 * kPointerSize));
- is_smi = masm_->CheckSmi(rdi);
- __ j(is_smi, &build_args);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &build_args);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ Set(rax, scope()->num_parameters());
- for (int i = 0; i < scope()->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ SmiToInteger32(rax,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movl(rcx, rax);
- __ cmpl(rax, Immediate(kArgumentsLimit));
- __ j(above, &build_args);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- // rcx is a small non-negative integer, due to the test above.
- __ testl(rcx, rcx);
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
- // Drop applicand.apply and applicand from the stack, and push
- // the result of the function call, but leave the spilled frame
- // unchanged, with 3 elements, so it is correct when we compile the
- // slow-case code.
- __ addq(rsp, Immediate(2 * kPointerSize));
- __ push(rax);
- // Stack now has 1 element:
- // rsp[0]: result
- __ jmp(&done);
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // applicand.apply.
- __ bind(&build_args);
- // Stack now has 3 elements, because we have jumped from where:
- // rsp[0]: receiver
- // rsp[1]: applicand.apply
- // rsp[2]: applicand.
-
- // StoreArgumentsObject requires a correct frame, and may modify it.
- Result arguments_object = StoreArgumentsObject(false);
- frame_->SpillAll();
- arguments_object.ToRegister();
- frame_->EmitPush(arguments_object.reg());
- arguments_object.Unuse();
- // Stack and frame now have 4 elements.
- __ bind(&slow);
- }
-
- // Generic computation of x.apply(y, args) with no special optimization.
- // Flip applicand.apply and applicand on the stack, so
- // applicand looks like the receiver of the applicand.apply call.
- // Then process it as a normal function call.
- __ movq(rax, Operand(rsp, 3 * kPointerSize));
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
- __ movq(Operand(rsp, 3 * kPointerSize), rbx);
-
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- // The function and its two arguments have been dropped.
- frame_->Drop(1); // Drop the receiver as well.
- res.ToRegister();
- frame_->EmitPush(res.reg());
- // Stack now has 1 element:
- // rsp[0]: result
- if (try_lazy) __ bind(&done);
- } // End of spilled scope.
- // Restore the context register after a call.
- frame_->RestoreContextRegister();
-}
-
-
-class DeferredStackCheck: public DeferredCode {
- public:
- DeferredStackCheck() {
- set_comment("[ DeferredStackCheck");
- }
-
- virtual void Generate();
-};
-
-
-void DeferredStackCheck::Generate() {
- StackCheckStub stub;
- __ CallStub(&stub);
-}
-
-
-void CodeGenerator::CheckStack() {
- DeferredStackCheck* deferred = new DeferredStackCheck;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- deferred->Branch(below);
- deferred->BindExit();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Visit(statement);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- VisitStatements(statements);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
- for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
- Visit(statements->at(i));
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ Block");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- VisitStatements(node->statements());
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals. The inevitable call
- // will sync frame elements to memory anyway, so we do it eagerly to
- // allow us to push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(rsi); // The context is the first argument.
- frame_->EmitPush(kScratchRegister);
- frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
- frame_->EmitPush(Smi::FromInt(strict_mode_flag()));
- Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
- // Return value is ignored.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = node->proxy()->var();
- ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->AsSlot();
-
- // If it was not possible to allocate the variable at compile time,
- // we need to "declare" it at runtime to make sure it actually
- // exists in the local context.
- if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Variables with a "LOOKUP" slot were introduced as non-locals
- // during variable resolution and must have mode DYNAMIC.
- ASSERT(var->is_dynamic());
- // For now, just do a runtime call. Sync the virtual frame eagerly
- // so we can simply push the arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
- PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->EmitPush(Smi::FromInt(attr));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (node->mode() == Variable::CONST) {
- frame_->EmitPush(Heap::kTheHoleValueRootIndex);
- } else if (node->fun() != NULL) {
- Load(node->fun());
- } else {
- frame_->EmitPush(Smi::FromInt(0)); // no initial value!
- }
- Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
- // Ignore the return value (declarations are statements).
- return;
- }
-
- ASSERT(!var->is_global());
-
- // If we have a function or a constant, we need to initialize the variable.
- Expression* val = NULL;
- if (node->mode() == Variable::CONST) {
- val = new Literal(Factory::the_hole_value());
- } else {
- val = node->fun(); // NULL if we don't have a function
- }
-
- if (val != NULL) {
- {
- // Set the initial value.
- Reference target(this, node->proxy());
- Load(val);
- target.SetValue(NOT_CONST_INIT);
- // The reference is removed from the stack (preserving TOS) when
- // it goes out of scope.
- }
- // Get rid of the assigned value (declarations are statements).
- frame_->Drop();
- }
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- Load(expression);
- // Remove the lingering expression result from the top of stack.
- frame_->Drop();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "// EmptyStatement");
- CodeForStatementPosition(node);
- // nothing to do
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ IfStatement");
- // Generate different code depending on which parts of the if statement
- // are present or not.
- bool has_then_stm = node->HasThenStatement();
- bool has_else_stm = node->HasElseStatement();
-
- CodeForStatementPosition(node);
- JumpTarget exit;
- if (has_then_stm && has_else_stm) {
- JumpTarget then;
- JumpTarget else_;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Visit(node->else_statement());
-
- // We may have dangling jumps to the then part.
- if (then.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Visit(node->then_statement());
-
- if (else_.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- }
-
- } else if (has_then_stm) {
- ASSERT(!has_else_stm);
- JumpTarget then;
- ControlDestination dest(&then, &exit, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // then part.
- if (then.is_linked()) {
- exit.Unuse();
- exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then label was bound.
- Visit(node->then_statement());
- }
-
- } else if (has_else_stm) {
- ASSERT(!has_then_stm);
- JumpTarget else_;
- ControlDestination dest(&exit, &else_, false);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.true_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // else part.
- if (else_.is_linked()) {
- exit.Unuse();
- exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- } else {
- // The else label was bound.
- Visit(node->else_statement());
- }
-
- } else {
- ASSERT(!has_then_stm && !has_else_stm);
- // We only care about the condition's side effects (not its value
- // or control flow effect). LoadCondition is called without
- // forcing control flow.
- ControlDestination dest(&exit, &exit, true);
- LoadCondition(node->condition(), &dest, false);
- if (!dest.is_used()) {
- // We got a value on the frame rather than (or in addition to)
- // control flow.
- frame_->Drop();
- }
- }
-
- if (exit.is_linked()) {
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ContinueStatement");
- CodeForStatementPosition(node);
- node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ BreakStatement");
- CodeForStatementPosition(node);
- node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ReturnStatement");
-
- CodeForStatementPosition(node);
- Load(node->expression());
- Result return_value = frame_->Pop();
- masm()->positions_recorder()->WriteRecordedPositions();
- if (function_return_is_shadowed_) {
- function_return_.Jump(&return_value);
- } else {
- frame_->PrepareForReturn();
- if (function_return_.is_bound()) {
- // If the function return label is already bound we reuse the
- // code by jumping to the return site.
- function_return_.Jump(&return_value);
- } else {
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
- }
- }
-}
-
-
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
- // The return value is a live (but not currently reference counted)
- // reference to rax. This is safe because the current frame does not
- // contain a reference to rax (it is prepared for the return by spilling
- // all registers).
- if (FLAG_trace) {
- frame_->Push(return_value);
- *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
- return_value->ToRegister(rax);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
-
- // Leave the frame and return popping the arguments and the
- // receiver.
- frame_->Exit();
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
- __ Ret(arguments_bytes, rcx);
- DeleteFrame();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint.
- // The shortest return sequence generated is "movq rsp, rbp; pop rbp; ret k"
- // with length 7 (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
- }
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithEnterStatement");
- CodeForStatementPosition(node);
- Load(node->expression());
- Result context;
- if (node->is_catch_block()) {
- context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kPushContext, 1);
- }
-
- // Update context local.
- frame_->SaveContextRegister();
-
- // Verify that the runtime call result and rsi agree.
- if (FLAG_debug_code) {
- __ cmpq(context.reg(), rsi);
- __ Assert(equal, "Runtime::NewContext should end up in rsi");
- }
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithExitStatement");
- CodeForStatementPosition(node);
- // Pop context.
- __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
- // Update context local.
- frame_->SaveContextRegister();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ SwitchStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- // Compile the switch value.
- Load(node->tag());
-
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
- CaseClause* default_clause = NULL;
-
- JumpTarget next_test;
- // Compile the case label expressions and comparisons. Exit early
- // if a comparison is unconditionally true. The target next_test is
- // bound before the loop in order to indicate control flow to the
- // first comparison.
- next_test.Bind();
- for (int i = 0; i < length && !next_test.is_unused(); i++) {
- CaseClause* clause = cases->at(i);
- // The default is not a test, but remember it for later.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- // We recycle the same target next_test for each test. Bind it if
- // the previous test has not done so and then unuse it for the
- // loop.
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- next_test.Unuse();
-
- // Duplicate the switch value.
- frame_->Dup();
-
- // Compile the label expression.
- Load(clause->label());
-
- // Compare and branch to the body if true or the next test if
- // false. Prefer the next test as a fall through.
- ControlDestination dest(clause->body_target(), &next_test, false);
- Comparison(node, equal, true, &dest);
-
- // If the comparison fell through to the true target, jump to the
- // actual body.
- if (dest.true_was_fall_through()) {
- clause->body_target()->Unuse();
- clause->body_target()->Jump();
- }
- }
-
- // If there was control flow to a next test from the last one
- // compiled, compile a jump to the default or break target.
- if (!next_test.is_unused()) {
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- // Drop the switch value.
- frame_->Drop();
- if (default_clause != NULL) {
- default_clause->body_target()->Jump();
- } else {
- node->break_target()->Jump();
- }
- }
-
- // The last instruction emitted was a jump, either to the default
- // clause or the break target, or else to a case body from the loop
- // that compiles the tests.
- ASSERT(!has_valid_frame());
- // Compile case bodies as needed.
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
-
- // There are two ways to reach the body: from the corresponding
- // test or as the fall through of the previous body.
- if (clause->body_target()->is_linked() || has_valid_frame()) {
- if (clause->body_target()->is_linked()) {
- if (has_valid_frame()) {
- // If we have both a jump to the test and a fall through, put
- // a jump on the fall through path to avoid the dropping of
- // the switch value on the test path. The exception is the
- // default which has already had the switch value dropped.
- if (clause->is_default()) {
- clause->body_target()->Bind();
- } else {
- JumpTarget body;
- body.Jump();
- clause->body_target()->Bind();
- frame_->Drop();
- body.Bind();
- }
- } else {
- // No fall through to worry about.
- clause->body_target()->Bind();
- if (!clause->is_default()) {
- frame_->Drop();
- }
- }
- } else {
- // Otherwise, we have only fall through.
- ASSERT(has_valid_frame());
- }
-
- // We are now prepared to compile the body.
- Comment cmnt(masm_, "[ Case body");
- VisitStatements(clause->statements());
- }
- clause->body_target()->Unuse();
- }
-
- // We may not have a valid frame here so bind the break target only
- // if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DoWhileStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- IncrementLoopNesting();
-
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- // Label the top of the loop for the backward jump if necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // Use the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case ALWAYS_FALSE:
- // No need to label it.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- break;
- case DONT_KNOW:
- // Continue is the test, so use the backward body target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- body.Bind();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Compile the test.
- switch (info) {
- case ALWAYS_TRUE:
- // If control flow can fall off the end of the body, jump back
- // to the top and bind the break target at the exit.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- case ALWAYS_FALSE:
- // We may have had continues or breaks in the body.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- case DONT_KNOW:
- // We have to compile the test expression if it can be reached by
- // control flow falling out of the body or via continue.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- Comment cmnt(masm_, "[ DoWhileCondition");
- CodeForDoWhileConditionPosition(node);
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- }
-
- DecrementLoopNesting();
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WhileStatement");
- CodeForStatementPosition(node);
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop with the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is the test at the bottom, no need to label the test
- // at the top. The body is a backward target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else {
- // Label the test at the top as the continue target. The body
- // is a forward-only target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- }
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // The loop body has been labeled with the continue target.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- // If we have chosen to recompile the test at the bottom,
- // then it is the continue target.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here and thus an invalid fall-through).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // If we have chosen not to recompile the test at the bottom,
- // jump back to the one at the top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
- ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
- if (slot->type() == Slot::LOCAL) {
- frame_->SetTypeForLocalAt(slot->index(), info);
- } else {
- frame_->SetTypeForParamAt(slot->index(), info);
- }
- if (FLAG_debug_code && info.IsSmi()) {
- if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
- } else {
- frame_->PushParameterAt(slot->index());
- }
- Result var = frame_->Pop();
- var.ToRegister();
- __ AbortIfNotSmi(var.reg());
- }
-}
-
-
-void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
- // A fast smi loop is a for loop with an initializer
- // that is a simple assignment of a smi to a stack variable,
- // a test that is a simple test of that variable against a smi constant,
- // and a step that is a increment/decrement of the variable, and
- // where the variable isn't modified in the loop body.
- // This guarantees that the variable is always a smi.
-
- Variable* loop_var = node->loop_variable();
- Smi* initial_value = *Handle<Smi>::cast(node->init()
- ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
- Smi* limit_value = *Handle<Smi>::cast(
- node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
- Token::Value compare_op =
- node->cond()->AsCompareOperation()->op();
- bool increments =
- node->next()->StatementAsCountOperation()->op() == Token::INC;
-
- // Check that the condition isn't initially false.
- bool initially_false = false;
- int initial_int_value = initial_value->value();
- int limit_int_value = limit_value->value();
- switch (compare_op) {
- case Token::LT:
- initially_false = initial_int_value >= limit_int_value;
- break;
- case Token::LTE:
- initially_false = initial_int_value > limit_int_value;
- break;
- case Token::GT:
- initially_false = initial_int_value <= limit_int_value;
- break;
- case Token::GTE:
- initially_false = initial_int_value < limit_int_value;
- break;
- default:
- UNREACHABLE();
- }
- if (initially_false) return;
-
- // Only check loop condition at the end.
-
- Visit(node->init());
-
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- // Set type and stack height of BreakTargets.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- IncrementLoopNesting();
- loop.Bind();
-
- // Set number type of the loop variable to smi.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
-
- SetTypeForStackSlot(loop_var->AsSlot(), TypeInfo::Smi());
- Visit(node->body());
-
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
-
- if (has_valid_frame()) {
- CodeForStatementPosition(node);
- Slot* loop_var_slot = loop_var->AsSlot();
- if (loop_var_slot->type() == Slot::LOCAL) {
- frame_->TakeLocalAt(loop_var_slot->index());
- } else {
- ASSERT(loop_var_slot->type() == Slot::PARAMETER);
- frame_->TakeParameterAt(loop_var_slot->index());
- }
- Result loop_var_result = frame_->Pop();
- if (!loop_var_result.is_register()) {
- loop_var_result.ToRegister();
- }
- Register loop_var_reg = loop_var_result.reg();
- frame_->Spill(loop_var_reg);
- if (increments) {
- __ SmiAddConstant(loop_var_reg,
- loop_var_reg,
- Smi::FromInt(1));
- } else {
- __ SmiSubConstant(loop_var_reg,
- loop_var_reg,
- Smi::FromInt(1));
- }
-
- frame_->Push(&loop_var_result);
- if (loop_var_slot->type() == Slot::LOCAL) {
- frame_->StoreToLocalAt(loop_var_slot->index());
- } else {
- ASSERT(loop_var_slot->type() == Slot::PARAMETER);
- frame_->StoreToParameterAt(loop_var_slot->index());
- }
- frame_->Drop();
-
- __ SmiCompare(loop_var_reg, limit_value);
- Condition condition;
- switch (compare_op) {
- case Token::LT:
- condition = less;
- break;
- case Token::LTE:
- condition = less_equal;
- break;
- case Token::GT:
- condition = greater;
- break;
- case Token::GTE:
- condition = greater_equal;
- break;
- default:
- condition = never;
- UNREACHABLE();
- }
- loop.Branch(condition);
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ForStatement");
- CodeForStatementPosition(node);
-
- if (node->is_fast_smi_loop()) {
- GenerateFastSmiLoop(node);
- return;
- }
-
- // Compile the init expression if present.
- if (node->init() != NULL) {
- Visit(node->init());
- }
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything else.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
-
- // Target for backward edge if no test at the bottom, otherwise
- // unused.
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
- // Target for backward edge if there is a test at the bottom,
- // otherwise used as target for test at the top.
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop.
- if (node->next() == NULL) {
- // Use the continue target if there is no update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // Otherwise use the backward loop target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is either the update expression or the test at the
- // bottom, no need to label the test at the top.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else if (node->next() == NULL) {
- // We are not recompiling the test at the bottom and there is no
- // update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // We are not recompiling the test at the bottom and there is an
- // update expression.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
-
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
-
- Visit(node->body());
-
- // If there is an update expression, compile it if necessary.
- if (node->next() != NULL) {
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
-
- // Control can reach the update by falling out of the body or by a
- // continue.
- if (has_valid_frame()) {
- // Record the source position of the statement as this code which
- // is after the code for the body actually belongs to the loop
- // statement and not the body.
- CodeForStatementPosition(node);
- Visit(node->next());
- }
- }
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- if (node->continue_target()->is_linked()) {
- // We can have dangling jumps to the continue target if there
- // was no update expression.
- node->continue_target()->Bind();
- }
- // Control can reach the test at the bottom by falling out of
- // the body, by a continue in the body, or from the update
- // expression.
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // Otherwise, jump back to the test at the top.
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ForInStatement");
- CodeForStatementPosition(node);
-
- JumpTarget primitive;
- JumpTarget jsobject;
- JumpTarget fixed_array;
- JumpTarget entry(JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check;
- JumpTarget exit;
-
- // Get the object to enumerate over (converted to JSObject).
- LoadAndSpill(node->enumerable());
-
- // Both SpiderMonkey and kjs ignore null and undefined in contrast
- // to the specification. 12.6.4 mandates a call to ToObject.
- frame_->EmitPop(rax);
-
- // rax: value to be iterated over
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- exit.Branch(equal);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- exit.Branch(equal);
-
- // Stack layout in body:
- // [iteration counter (smi)] <- slot 0
- // [length of array] <- slot 1
- // [FixedArray] <- slot 2
- // [Map or 0] <- slot 3
- // [Object] <- slot 4
-
- // Check if enumerable is already a JSObject
- // rax: value to be iterated over
- Condition is_smi = masm_->CheckSmi(rax);
- primitive.Branch(is_smi);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- jsobject.Branch(above_equal);
-
- primitive.Bind();
- frame_->EmitPush(rax);
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
- // function call returns the value in rax, which is where we want it below
-
- jsobject.Bind();
- // Get the set of properties (as a FixedArray or Map).
- // rax: value to be iterated over
- frame_->EmitPush(rax); // Push the object being iterated over.
-
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- JumpTarget call_runtime;
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- JumpTarget check_prototype;
- JumpTarget use_cache;
- __ movq(rcx, rax);
- loop.Bind();
- // Check that there are no elements.
- __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
- __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
- call_runtime.Branch(not_equal);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in ebx for the subsequent
- // prototype load.
- __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
- __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
- call_runtime.Branch(equal);
- // Check that there in an enum cache in the non-empty instance
- // descriptors. This is the case if the next enumeration index
- // field does not contain a smi.
- __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
- is_smi = masm_->CheckSmi(rdx);
- call_runtime.Branch(is_smi);
- // For all objects but the receiver, check that the cache is empty.
- __ cmpq(rcx, rax);
- check_prototype.Branch(equal);
- __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
- call_runtime.Branch(not_equal);
- check_prototype.Bind();
- // Load the prototype from the map and loop if non-null.
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ CompareRoot(rcx, Heap::kNullValueRootIndex);
- loop.Branch(not_equal);
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- use_cache.Jump();
-
- call_runtime.Bind();
- // Call the runtime to get the property names for the object.
- frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
- frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a Map, we can do a fast modification check.
- // Otherwise, we got a FixedArray, and we have to do a slow check.
- // rax: map or fixed array (result from call to
- // Runtime::kGetPropertyNamesFast)
- __ movq(rdx, rax);
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
- fixed_array.Branch(not_equal);
-
- use_cache.Bind();
- // Get enum cache
- // rax: map (either the result from a call to
- // Runtime::kGetPropertyNamesFast or has been fetched directly from
- // the object)
- __ movq(rcx, rax);
- __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
- // Get the bridge array held in the enumeration index field.
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
- // Get the cache from the bridge array.
- __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- frame_->EmitPush(rax); // <- slot 3
- frame_->EmitPush(rdx); // <- slot 2
- __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
- frame_->EmitPush(rax); // <- slot 1
- frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
- entry.Jump();
-
- fixed_array.Bind();
- // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
- frame_->EmitPush(Smi::FromInt(0)); // <- slot 3
- frame_->EmitPush(rax); // <- slot 2
-
- // Push the length of the array and the initial index onto the stack.
- __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- frame_->EmitPush(rax); // <- slot 1
- frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
-
- // Condition.
- entry.Bind();
- // Grab the current frame's height for the break and continue
- // targets only after all the state is pushed on the frame.
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- __ movq(rax, frame_->ElementAt(0)); // load the current count
- __ SmiCompare(frame_->ElementAt(1), rax); // compare to the array length
- node->break_target()->Branch(below_equal);
-
- // Get the i'th entry of the array.
- __ movq(rdx, frame_->ElementAt(2));
- SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
- __ movq(rbx,
- FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
-
- // Get the expected map from the stack or a zero map in the
- // permanent slow case rax: current iteration count rbx: i'th entry
- // of the enum cache
- __ movq(rdx, frame_->ElementAt(3));
- // Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
- // rax: current iteration count
- // rbx: i'th entry of the enum cache
- // rdx: expected map value
- __ movq(rcx, frame_->ElementAt(4));
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ cmpq(rcx, rdx);
- end_del_check.Branch(equal);
-
- // Convert the entry to a string (or null if it isn't a property anymore).
- frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
- frame_->EmitPush(rbx); // push entry
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
- __ movq(rbx, rax);
-
- // If the property has been removed while iterating, we just skip it.
- __ SmiCompare(rbx, Smi::FromInt(0));
- node->continue_target()->Branch(equal);
-
- end_del_check.Bind();
- // Store the entry in the 'each' expression and take another spin in the
- // loop. rdx: i'th entry of the enum cache (or string there of)
- frame_->EmitPush(rbx);
- { Reference each(this, node->each());
- // Loading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
- if (!each.is_illegal()) {
- if (each.size() > 0) {
- frame_->EmitPush(frame_->ElementAt(each.size()));
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(2); // Drop the original and the copy of the element.
- } else {
- // If the reference has size zero then we can use the value below
- // the reference as if it were above the reference, instead of pushing
- // a new copy of it above the reference.
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(); // Drop the original of the element.
- }
- }
- }
- // Unloading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
-
- // Body.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
-
- // Next. Reestablish a spilled frame in case we are coming here via
- // a continue in the body.
- node->continue_target()->Bind();
- frame_->SpillAll();
- frame_->EmitPop(rax);
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- frame_->EmitPush(rax);
- entry.Jump();
-
- // Cleanup. No need to spill because VirtualFrame::Drop is safe for
- // any frame.
- node->break_target()->Bind();
- frame_->Drop(5);
-
- // Exit.
- exit.Bind();
-
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryCatchStatement");
- CodeForStatementPosition(node);
-
- JumpTarget try_block;
- JumpTarget exit;
-
- try_block.Call();
- // --- Catch block ---
- frame_->EmitPush(rax);
-
- // Store the caught exception in the catch variable.
- Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
- StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
- // Remove the exception from the stack.
- frame_->Drop();
-
- VisitStatementsAndSpill(node->catch_block()->statements());
- if (has_valid_frame()) {
- exit.Jump();
- }
-
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_CATCH_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- bool has_unlinks = false;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- has_unlinks = has_unlinks || shadows[i]->is_linked();
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
-
- // Make sure that there's nothing left on the stack above the
- // handler structure.
- if (FLAG_debug_code) {
- __ movq(kScratchRegister, handler_address);
- __ cmpq(rsp, Operand(kScratchRegister, 0));
- __ Assert(equal, "stack pointer should point to top handler");
- }
-
- // If we can fall off the end of the try block, unlink from try chain.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame. Unlink from
- // the handler list and drop the rest of this handler from the
- // frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- if (has_unlinks) {
- exit.Jump();
- }
- }
-
- // Generate unlink code for the (formerly) shadowing targets that
- // have been jumped to. Deallocate each shadow target.
- Result return_value;
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // Unlink from try chain; be careful not to destroy the TOS if
- // there is one.
- if (i == kReturnShadowIndex) {
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(rax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that we
- // break from (eg, for...in) may have left stuff on the stack.
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- frame_->Forget(frame_->height() - handler_height);
-
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- if (!function_return_is_shadowed_) frame_->PrepareForReturn();
- shadows[i]->other_target()->Jump(&return_value);
- } else {
- shadows[i]->other_target()->Jump();
- }
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryFinallyStatement");
- CodeForStatementPosition(node);
-
- // State: Used to keep track of reason for entering the finally
- // block. Should probably be extended to hold information for
- // break/continue from within the try block.
- enum { FALLING, THROWING, JUMPING };
-
- JumpTarget try_block;
- JumpTarget finally_block;
-
- try_block.Call();
-
- frame_->EmitPush(rax);
- // In case of thrown exceptions, this is where we continue.
- __ Move(rcx, Smi::FromInt(THROWING));
- finally_block.Jump();
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_FINALLY_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- int nof_unlinks = 0;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- if (shadows[i]->is_linked()) nof_unlinks++;
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
-
- // If we can fall off the end of the try block, unlink from the try
- // chain and set the state on the frame to FALLING.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- // Fake a top of stack value (unneeded when FALLING) and set the
- // state in ecx, then jump around the unlink blocks if any.
- frame_->EmitPush(Heap::kUndefinedValueRootIndex);
- __ Move(rcx, Smi::FromInt(FALLING));
- if (nof_unlinks > 0) {
- finally_block.Jump();
- }
- }
-
- // Generate code to unlink and set the state for the (formerly)
- // shadowing targets that have been jumped to.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // If we have come from the shadowed return, the return value is
- // on the virtual frame. We must preserve it until it is
- // pushed.
- if (i == kReturnShadowIndex) {
- Result return_value;
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(rax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that
- // we break from (eg, for...in) may have left stuff on the
- // stack.
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- frame_->Forget(frame_->height() - handler_height);
-
- // Unlink this handler and drop it from the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- // If this target shadowed the function return, materialize
- // the return value on the stack.
- frame_->EmitPush(rax);
- } else {
- // Fake TOS for targets that shadowed breaks and continues.
- frame_->EmitPush(Heap::kUndefinedValueRootIndex);
- }
- __ Move(rcx, Smi::FromInt(JUMPING + i));
- if (--nof_unlinks > 0) {
- // If this is not the last unlink block, jump around the next.
- finally_block.Jump();
- }
- }
- }
-
- // --- Finally block ---
- finally_block.Bind();
-
- // Push the state on the stack.
- frame_->EmitPush(rcx);
-
- // We keep two elements on the stack - the (possibly faked) result
- // and the state - while evaluating the finally block.
- //
- // Generate code for the statements in the finally block.
- VisitStatementsAndSpill(node->finally_block()->statements());
-
- if (has_valid_frame()) {
- // Restore state and return value or faked TOS.
- frame_->EmitPop(rcx);
- frame_->EmitPop(rax);
- }
-
- // Generate code to jump to the right destination for all used
- // formerly shadowing targets. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (has_valid_frame() && shadows[i]->is_bound()) {
- BreakTarget* original = shadows[i]->other_target();
- __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
- if (i == kReturnShadowIndex) {
- // The return value is (already) in rax.
- Result return_value = allocator_->Allocate(rax);
- ASSERT(return_value.is_valid());
- if (function_return_is_shadowed_) {
- original->Branch(equal, &return_value);
- } else {
- // Branch around the preparation for return which may emit
- // code.
- JumpTarget skip;
- skip.Branch(not_equal);
- frame_->PrepareForReturn();
- original->Jump(&return_value);
- skip.Bind();
- }
- } else {
- original->Branch(equal);
- }
- }
- }
-
- if (has_valid_frame()) {
- // Check if we need to rethrow the exception.
- JumpTarget exit;
- __ SmiCompare(rcx, Smi::FromInt(THROWING));
- exit.Branch(not_equal);
-
- // Rethrow exception.
- frame_->EmitPush(rax); // undo pop from above
- frame_->CallRuntime(Runtime::kReThrow, 1);
-
- // Done.
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DebuggerStatement");
- CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Spill everything, even constants, to the frame.
- frame_->SpillAll();
-
- frame_->DebugBreak();
- // Ignore the return value.
-#endif
-}
-
-
-void CodeGenerator::InstantiateFunction(
- Handle<SharedFunctionInfo> function_info,
- bool pretenure) {
- // The inevitable call will sync frame elements to memory anyway, so
- // we do it eagerly to allow us to push the arguments directly into
- // place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (scope()->is_function_scope() &&
- function_info->num_literals() == 0 &&
- !pretenure) {
- FastNewClosureStub stub;
- frame_->Push(function_info);
- Result answer = frame_->CallStub(&stub, 1);
- frame_->Push(&answer);
- } else {
- // Call the runtime to instantiate the function based on the
- // shared function info.
- frame_->EmitPush(rsi);
- frame_->EmitPush(function_info);
- frame_->EmitPush(pretenure
- ? Factory::true_value()
- : Factory::false_value());
- Result result = frame_->CallRuntime(Runtime::kNewClosure, 3);
- frame_->Push(&result);
- }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function info and instantiate it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(node, script());
- // Check for stack-overflow exception.
- if (function_info.is_null()) {
- SetStackOverflow();
- return;
- }
- InstantiateFunction(function_info, node->pretenure());
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- InstantiateFunction(node->shared_function_info(), false);
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
- Comment cmnt(masm_, "[ Conditional");
- JumpTarget then;
- JumpTarget else_;
- JumpTarget exit;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Load(node->else_expression());
-
- if (then.is_linked()) {
- exit.Jump();
- then.Bind();
- Load(node->then_expression());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Load(node->then_expression());
-
- if (else_.is_linked()) {
- exit.Jump();
- else_.Bind();
- Load(node->else_expression());
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- JumpTarget slow;
- JumpTarget done;
- Result value;
-
- // Generate fast case for loading from slots that correspond to
- // local/global variables or arguments unless they are shadowed by
- // eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(slot,
- typeof_state,
- &value,
- &slow,
- &done);
-
- slow.Bind();
- // A runtime call is inevitable. We eagerly sync frame elements
- // to memory so that we can push the arguments directly into place
- // on top of the frame.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
- if (typeof_state == INSIDE_TYPEOF) {
- value =
- frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- } else {
- value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- }
-
- done.Bind(&value);
- frame_->Push(&value);
-
- } else if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- //
- // We currently spill the virtual frame because constants use the
- // potentially unsafe direct-frame access of SlotOperand.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Load const");
- JumpTarget exit;
- __ movq(rcx, SlotOperand(slot, rcx));
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- exit.Branch(not_equal);
- __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
- exit.Bind();
- frame_->EmitPush(rcx);
-
- } else if (slot->type() == Slot::PARAMETER) {
- frame_->PushParameterAt(slot->index());
-
- } else if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
-
- } else {
- // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
- // here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because it will always be a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
- frame_->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- LoadFromSlot(slot, state);
-
- // Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
- // ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
- // Pop the loaded value from the stack.
- Result value = frame_->Pop();
-
- // If the loaded value is a constant, we know if the arguments
- // object has been lazily loaded yet.
- if (value.is_constant()) {
- if (value.handle()->IsArgumentsMarker()) {
- Result arguments = StoreArgumentsObject(false);
- frame_->Push(&arguments);
- } else {
- frame_->Push(&value);
- }
- return;
- }
-
- // The loaded value is in a register. If it is the sentinel that
- // indicates that we haven't loaded the arguments object yet, we
- // need to do it now.
- JumpTarget exit;
- __ CompareRoot(value.reg(), Heap::kArgumentsMarkerRootIndex);
- frame_->Push(&value);
- exit.Branch(not_equal);
- Result arguments = StoreArgumentsObject(false);
- frame_->SetElementAt(0, &arguments);
- exit.Bind();
-}
-
-
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- // Check that no extension objects have been created by calls to
- // eval from the current scope to the global scope.
- Register context = rsi;
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid()); // All non-reserved registers were available.
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- // Load next context in chain.
- __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(tmp.reg())) {
- __ movq(tmp.reg(), context);
- }
- // Load map for comparison into register, outside loop.
- __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
- __ bind(&next);
- // Terminate at global context.
- __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
- __ j(equal, &fast);
- // Check that extension is NULL.
- __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal);
- // Load next context in chain.
- __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- __ jmp(&next);
- __ bind(&fast);
- }
- tmp.Unuse();
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- LoadGlobal();
- frame_->Push(slot->var()->name());
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame_->CallLoadIC(mode);
- // A test rax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test rax
- // instruction here.
- masm_->nop();
- return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
- done->Jump(result);
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- *result = allocator_->Allocate();
- ASSERT(result->is_valid());
- __ movq(result->reg(),
- ContextSlotOperandCheckExtensions(potential_slot,
- *result,
- slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
- done->Branch(not_equal, result);
- __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
- }
- done->Jump(result);
- } else if (rewrite != NULL) {
- // Generate fast case for argument loads.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- Result arguments = allocator()->Allocate();
- ASSERT(arguments.is_valid());
- __ movq(arguments.reg(),
- ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
- arguments,
- slow));
- frame_->Push(&arguments);
- frame_->Push(key_literal->handle());
- *result = EmitKeyedLoad();
- done->Jump(result);
- }
- }
- }
- }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call. Since the call is inevitable,
- // we eagerly sync the virtual frame so we can directly push the
- // arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- frame_->EmitPush(rsi);
- frame_->EmitPush(slot->var()->name());
-
- Result value;
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize const
- // properties (introduced via eval("const foo = (some expr);")). Also,
- // uses the current function context instead of the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the same
- // time, because the const declaration may be at the end of the eval
- // code (sigh...) and the const variable may have been used before
- // (where its value is 'undefined'). Thus, we can only do the
- // initialization when we actually encounter the expression and when
- // the expression operands are defined and valid, and thus we need the
- // split into 2 operations: declaration of the context slot followed
- // by initialization.
- value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- frame_->Push(Smi::FromInt(strict_mode_flag()));
- value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling chained assignment
- // expressions.
- frame_->Push(&value);
- } else {
- ASSERT(!slot->var()->is_dynamic());
-
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is executed,
- // the code is identical to a normal store (see below).
- //
- // We spill the frame in the code below because the direct-frame
- // access of SlotOperand is potentially unsafe with an unspilled
- // frame.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Init const");
- __ movq(rcx, SlotOperand(slot, rcx));
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- exit.Branch(not_equal);
- }
-
- // We must execute the store. Storing a variable must keep the (new)
- // value on the stack. This is necessary for compiling assignment
- // expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will initialize
- // consts to 'the hole' value and by doing so, end up calling this code.
- if (slot->type() == Slot::PARAMETER) {
- frame_->StoreToParameterAt(slot->index());
- } else if (slot->type() == Slot::LOCAL) {
- frame_->StoreToLocalAt(slot->index());
- } else {
- // The other slot types (LOOKUP and GLOBAL) cannot reach here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because the slot is a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- frame_->Dup();
- Result value = frame_->Pop();
- value.ToRegister();
- Result start = allocator_->Allocate();
- ASSERT(start.is_valid());
- __ movq(SlotOperand(slot, start.reg()), value.reg());
- // RecordWrite may destroy the value registers.
- //
- // TODO(204): Avoid actually spilling when the value is not
- // needed (probably the common case).
- frame_->Spill(value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
- // The results start, value, and temp are unused by going out of
- // scope.
- }
-
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
- Comment cmnt(masm_, "[ Slot");
- LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
- Comment cmnt(masm_, "[ VariableProxy");
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- Reference ref(this, node);
- ref.GetValue();
- }
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
- Comment cmnt(masm_, "[ Literal");
- frame_->Push(node->handle());
-}
-
-
-void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
- UNIMPLEMENTED();
- // TODO(X64): Implement security policy for loads of smis.
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
- return false;
-}
-
-
-// Materialize the regexp literal 'node' in the literals array
-// 'literals' of the function. Leave the regexp boilerplate in
-// 'boilerplate'.
-class DeferredRegExpLiteral: public DeferredCode {
- public:
- DeferredRegExpLiteral(Register boilerplate,
- Register literals,
- RegExpLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredRegExpLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- RegExpLiteral* node_;
-};
-
-
-void DeferredRegExpLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ Push(Smi::FromInt(node_->literal_index()));
- // RegExp pattern (2).
- __ Push(node_->pattern());
- // RegExp flags (3).
- __ Push(node_->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
-}
-
-
-class DeferredAllocateInNewSpace: public DeferredCode {
- public:
- DeferredAllocateInNewSpace(int size,
- Register target,
- int registers_to_save = 0)
- : size_(size), target_(target), registers_to_save_(registers_to_save) {
- ASSERT(size >= kPointerSize && size <= Heap::MaxObjectSizeInNewSpace());
- set_comment("[ DeferredAllocateInNewSpace");
- }
- void Generate();
-
- private:
- int size_;
- Register target_;
- int registers_to_save_;
-};
-
-
-void DeferredAllocateInNewSpace::Generate() {
- for (int i = 0; i < kNumRegs; i++) {
- if (registers_to_save_ & (1 << i)) {
- Register save_register = { i };
- __ push(save_register);
- }
- }
- __ Push(Smi::FromInt(size_));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- if (!target_.is(rax)) {
- __ movq(target_, rax);
- }
- for (int i = kNumRegs - 1; i >= 0; i--) {
- if (registers_to_save_ & (1 << i)) {
- Register save_register = { i };
- __ pop(save_register);
- }
- }
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
- Comment cmnt(masm_, "[ RegExp Literal");
-
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ movq(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the RegExp object. If so,
- // jump to the deferred code passing the literals array.
- DeferredRegExpLiteral* deferred =
- new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
- __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
- deferred->Branch(equal);
- deferred->BindExit();
-
- // Register of boilerplate contains RegExp object.
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-
- DeferredAllocateInNewSpace* allocate_fallback =
- new DeferredAllocateInNewSpace(size, literals.reg());
- frame_->Push(&boilerplate);
- frame_->SpillTop();
- __ AllocateInNewSpace(size,
- literals.reg(),
- tmp.reg(),
- no_reg,
- allocate_fallback->entry_label(),
- TAG_OBJECT);
- allocate_fallback->BindExit();
- boilerplate = frame_->Pop();
- // Copy from boilerplate to clone and return clone.
-
- for (int i = 0; i < size; i += kPointerSize) {
- __ movq(tmp.reg(), FieldOperand(boilerplate.reg(), i));
- __ movq(FieldOperand(literals.reg(), i), tmp.reg());
- }
- frame_->Push(&literals);
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ movq(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
- // Literal array.
- frame_->Push(&literals);
- // Literal index.
- frame_->Push(Smi::FromInt(node->literal_index()));
- // Constant properties.
- frame_->Push(node->constant_properties());
- // Should the object literal have fast elements?
- frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
- Result clone;
- if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
- frame_->Push(&clone);
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- node->CalculateEmitStore();
-
- for (int i = 0; i < node->properties()->length(); i++) {
- ObjectLiteral::Property* property = node->properties()->at(i);
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
- // else fall through.
- case ObjectLiteral::Property::COMPUTED: {
- Handle<Object> key(property->key()->handle());
- if (key->IsSymbol()) {
- // Duplicate the object as the IC receiver.
- frame_->Dup();
- Load(property->value());
- if (property->emit_store()) {
- Result ignored =
- frame_->CallStoreIC(Handle<String>::cast(key), false,
- strict_mode_flag());
- // A test rax instruction following the store IC call would
- // indicate the presence of an inlined version of the
- // store. Add a nop to indicate that there is no such
- // inlined version.
- __ nop();
- } else {
- frame_->Drop(2);
- }
- break;
- }
- // Fall through
- }
- case ObjectLiteral::Property::PROTOTYPE: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- Load(property->value());
- if (property->emit_store()) {
- frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes
- // Ignore the result.
- Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
- } else {
- frame_->Drop(3);
- }
- break;
- }
- case ObjectLiteral::Property::SETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(1));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- case ObjectLiteral::Property::GETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(0));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- default: UNREACHABLE();
- }
- }
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ movq(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- frame_->Push(&literals);
- frame_->Push(Smi::FromInt(node->literal_index()));
- frame_->Push(node->constant_elements());
- int length = node->values()->length();
- Result clone;
- if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- clone = frame_->CallStub(&stub, 3);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
- } else if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
- clone = frame_->CallStub(&stub, 3);
- }
- frame_->Push(&clone);
-
- // Generate code to set the elements in the array that are not
- // literals.
- for (int i = 0; i < length; i++) {
- Expression* value = node->values()->at(i);
-
- if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
- continue;
- }
-
- // The property must be set by generated code.
- Load(value);
-
- // Get the property value off the stack.
- Result prop_value = frame_->Pop();
- prop_value.ToRegister();
-
- // Fetch the array literal while leaving a copy on the stack and
- // use it to get the elements array.
- frame_->Dup();
- Result elements = frame_->Pop();
- elements.ToRegister();
- frame_->Spill(elements.reg());
- // Get the elements FixedArray.
- __ movq(elements.reg(),
- FieldOperand(elements.reg(), JSObject::kElementsOffset));
-
- // Write to the indexed properties array.
- int offset = i * kPointerSize + FixedArray::kHeaderSize;
- __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
-
- // Update the write barrier for the array address.
- frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
- }
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
- ASSERT(!in_spilled_code());
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- Load(node->key());
- Load(node->value());
- Result result =
- frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm(), "[ Variable Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL);
- Slot* slot = var->AsSlot();
- ASSERT(slot != NULL);
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- Load(node->value());
-
- // Perform the binary operation.
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- // Construct the implicit binary operation.
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Perform the assignment.
- if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
- CodeForSourcePosition(node->position());
- StoreToSlot(slot,
- node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
- }
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm(), "[ Named Property Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
- ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
- // Initialize name and evaluate the receiver sub-expression if necessary. If
- // the receiver is trivial it is not placed on the stack at this point, but
- // loaded whenever actually needed.
- Handle<String> name;
- bool is_trivial_receiver = false;
- if (var != NULL) {
- name = var->name();
- } else {
- Literal* lit = prop->key()->AsLiteral();
- ASSERT_NOT_NULL(lit);
- name = Handle<String>::cast(lit->handle());
- // Do not materialize the receiver on the frame if it is trivial.
- is_trivial_receiver = prop->obj()->IsTrivial();
- if (!is_trivial_receiver) Load(prop->obj());
- }
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- // Initialization block consists of assignments of the form expr.x = ..., so
- // this will never be an assignment to a variable, so there must be a
- // receiver object.
- ASSERT_EQ(NULL, var);
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else {
- frame()->Dup();
- }
- Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block() && !is_trivial_receiver) {
- frame()->Dup();
- }
-
- // Stack layout:
- // [tos] : receiver (only materialized if non-trivial)
- // [tos+1] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else if (var != NULL) {
- // The LoadIC stub expects the object in rax.
- // Freeing rax causes the code generator to load the global into it.
- frame_->Spill(rax);
- LoadGlobal();
- } else {
- frame()->Dup();
- }
- Result value = EmitNamedLoad(name, var != NULL);
- frame()->Push(&value);
- Load(node->value());
-
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- // Construct the implicit binary operation.
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : receiver (only materialized if non-trivial)
- // [tos+2] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(var == NULL || var->mode() != Variable::CONST);
- ASSERT_NE(Token::INIT_CONST, node->op());
- if (is_trivial_receiver) {
- Result value = frame()->Pop();
- frame()->Push(prop->obj());
- frame()->Push(&value);
- }
- CodeForSourcePosition(node->position());
- bool is_contextual = (var != NULL);
- Result answer = EmitNamedStore(name, is_contextual);
- frame()->Push(&answer);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- if (node->ends_initialization_block()) {
- ASSERT_EQ(NULL, var);
- // The argument to the runtime call is the receiver.
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else {
- // A copy of the receiver is below the value of the assignment. Swap
- // the receiver and the value of the assignment expression.
- Result result = frame()->Pop();
- Result receiver = frame()->Pop();
- frame()->Push(&result);
- frame()->Push(&receiver);
- }
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT_EQ(frame()->height(), original_height + 1);
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm_, "[ Keyed Property Assignment");
- Property* prop = node->target()->AsProperty();
- ASSERT_NOT_NULL(prop);
-
- // Evaluate the receiver subexpression.
- Load(prop->obj());
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- frame_->Dup();
- Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block()) {
- frame_->Dup();
- }
-
- // Evaluate the key subexpression.
- Load(prop->key());
-
- // Stack layout:
- // [tos] : key
- // [tos+1] : receiver
- // [tos+2] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- // Duplicate receiver and key for loading the current property value.
- frame()->PushElementAt(1);
- frame()->PushElementAt(1);
- Result value = EmitKeyedLoad();
- frame()->Push(&value);
- Load(node->value());
-
- // Perform the binary operation.
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : key
- // [tos+2] : receiver
- // [tos+3] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(node->op() != Token::INIT_CONST);
- CodeForSourcePosition(node->position());
- Result answer = EmitKeyedStore(prop->key()->type());
- frame()->Push(&answer);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- // Change to fast case at the end of an initialization block.
- if (node->ends_initialization_block()) {
- // The argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment. Swap the receiver and
- // the value of the assignment expression.
- Result result = frame()->Pop();
- Result receiver = frame()->Pop();
- frame()->Push(&result);
- frame()->Push(&receiver);
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
-
- if (var != NULL && !var->is_global()) {
- EmitSlotAssignment(node);
-
- } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
- (var != NULL && var->is_global())) {
- // Properties whose keys are property names and global variables are
- // treated as named property references. We do not need to consider
- // global 'this' because it is not a valid left-hand side.
- EmitNamedPropertyAssignment(node);
-
- } else if (prop != NULL) {
- // Other properties (including rewritten parameters for a function that
- // uses arguments) are keyed property assignments.
- EmitKeyedPropertyAssignment(node);
-
- } else {
- // Invalid left-hand side.
- Load(node->target());
- Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
- // The runtime call doesn't actually return but the code generator will
- // still generate code and expects a certain frame height.
- frame()->Push(&result);
- }
-
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
- Comment cmnt(masm_, "[ Throw");
- Load(node->exception());
- Result result = frame_->CallRuntime(Runtime::kThrow, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
- Comment cmnt(masm_, "[ Property");
- Reference property(this, node);
- property.GetValue();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
- Comment cmnt(masm_, "[ Call");
-
- ZoneList<Expression*>* args = node->arguments();
-
- // Check if the function is a variable or a property.
- Expression* function = node->expression();
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
-
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
-
- if (var != NULL && var->is_possibly_eval()) {
- // ----------------------------------
- // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
- // ----------------------------------
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
-
- // Prepare the stack for the call to the resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Result to hold the result of the function resolution and the
- // final result of the eval call.
- Result result;
-
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- JumpTarget done;
- if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
- JumpTarget slow;
- // Prepare the stack for the call to
- // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
- // function, the first argument to the eval call and the
- // receiver.
- Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- frame_->Push(&fun);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
- frame_->PushParameterAt(-1);
-
- // Push the strict mode flag.
- frame_->Push(Smi::FromInt(strict_mode_flag()));
-
- // Resolve the call.
- result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
- done.Jump(&result);
- slow.Bind();
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval by
- // pushing the loaded function, the first argument to the eval
- // call and the receiver.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
- frame_->PushParameterAt(-1);
-
- // Push the strict mode flag.
- frame_->Push(Smi::FromInt(strict_mode_flag()));
-
- // Resolve the call.
- result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
- // If we generated fast-case code bind the jump-target where fast
- // and slow case merge.
- if (done.is_linked()) done.Bind(&result);
-
- // The runtime call returns a pair of values in rax (function) and
- // rdx (receiver). Touch up the stack with the right values.
- Result receiver = allocator_->Allocate(rdx);
- frame_->SetElementAt(arg_count + 1, &result);
- frame_->SetElementAt(arg_count, &receiver);
- receiver.Unuse();
-
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- result = frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
-
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
-
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Push the name of the function on the frame.
- frame_->Push(var->name());
-
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
- arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- // Replace the function on the stack with the result.
- frame_->Push(&result);
-
- } else if (var != NULL && var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::LOOKUP) {
- // ----------------------------------
- // JavaScript examples:
- //
- // with (obj) foo(1, 2, 3) // foo may be in obj.
- //
- // function f() {};
- // function g() {
- // eval(...);
- // f(); // f could be in extension object.
- // }
- // ----------------------------------
-
- JumpTarget slow, done;
- Result function;
-
- // Generate fast case for loading functions from slots that
- // correspond to local/global variables or arguments unless they
- // are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &function,
- &slow,
- &done);
-
- slow.Bind();
- // Load the function from the context. Sync the frame so we can
- // push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- frame_->EmitPush(var->name());
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- // The runtime call returns a pair of values in rax and rdx. The
- // looked-up function is in rax and the receiver is in rdx. These
- // register references are not ref counted here. We spill them
- // eagerly since they are arguments to an inevitable call (and are
- // not sharable by the arguments).
- ASSERT(!allocator()->is_used(rax));
- frame_->EmitPush(rax);
-
- // Load the receiver.
- ASSERT(!allocator()->is_used(rdx));
- frame_->EmitPush(rdx);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- JumpTarget call;
- call.Jump();
- done.Bind(&function);
- frame_->Push(&function);
- LoadGlobalReceiver();
- call.Bind();
- }
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-
- } else if (property != NULL) {
- // Check if the key is a literal string.
- Literal* literal = property->key()->AsLiteral();
-
- if (literal != NULL && literal->handle()->IsSymbol()) {
- // ------------------------------------------------------------------
- // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
- // ------------------------------------------------------------------
-
- Handle<String> name = Handle<String>::cast(literal->handle());
-
- if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
- name->IsEqualTo(CStrVector("apply")) &&
- args->length() == 2 &&
- args->at(1)->AsVariableProxy() != NULL &&
- args->at(1)->AsVariableProxy()->IsArguments()) {
- // Use the optimized Function.prototype.apply that avoids
- // allocating lazily allocated arguments objects.
- CallApplyLazy(property->obj(),
- args->at(0),
- args->at(1)->AsVariableProxy(),
- node->position());
-
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Push the name of the function onto the frame.
- frame_->Push(name);
-
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
-
- } else {
- // -------------------------------------------
- // JavaScript example: 'array[index](1, 2, 3)'
- // -------------------------------------------
-
- // Load the function to call from the property through a reference.
- if (property->is_synthetic()) {
- Reference ref(this, property, false);
- ref.GetValue();
- // Use global object as receiver.
- LoadGlobalReceiver();
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
-
- // Load the name of the function.
- Load(property->key());
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&key);
- frame_->Push(&receiver);
- key.Unuse();
- receiver.Unuse();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Place the key on top of stack and call the IC initialization code.
- frame_->PushElementAt(arg_count + 1);
- CodeForSourcePosition(node->position());
- Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting());
- frame_->Drop(); // Drop the key still on the stack.
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
- }
- } else {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is not global
- // ----------------------------------
-
- // Load the function.
- Load(function);
-
- // Pass the global proxy as the receiver.
- LoadGlobalReceiver();
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
- }
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
- Comment cmnt(masm_, "[ CallNew");
-
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments. This is different from ordinary calls, where the
- // actual function to call is resolved after the arguments have been
- // evaluated.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- Load(node->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = node->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallConstructor(arg_count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- value.Unuse();
- destination()->Split(is_smi);
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
- frame_->CallRuntime(Runtime::kLog, 2);
- }
-#endif
- // Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(Factory::undefined_value());
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition non_negative_smi = masm_->CheckNonNegativeSmi(value.reg());
- value.Unuse();
- destination()->Split(non_negative_smi);
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result_, Heap::kNanValueRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharCodeAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need two extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(object.reg(),
- index.reg(),
- scratch.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
-
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
- }
-
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharFromCode");
- ASSERT(args->length() == 1);
-
- Load(args->at(0));
-
- Result code = frame_->Pop();
- code.ToRegister();
- ASSERT(code.is_valid());
-
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
-
- DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
- code.reg(), result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result_, Smi::FromInt(0));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need three extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch1 = allocator()->Allocate();
- ASSERT(scratch1.is_valid());
- Result scratch2 = allocator()->Allocate();
- ASSERT(scratch2.is_valid());
-
- DeferredStringCharAt* deferred =
- new DeferredStringCharAt(object.reg(),
- index.reg(),
- scratch1.reg(),
- scratch2.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // It is a heap object - get map.
- // Check if the object is a JS array or not.
- __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // It is a heap object - get map.
- // Check if the object is a regexp.
- __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
-
- __ Move(kScratchRegister, Factory::null_value());
- __ cmpq(obj.reg(), kScratchRegister);
- destination()->true_target()->Branch(equal);
-
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ movzxbq(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
- destination()->false_target()->Branch(below);
- __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
- obj.Unuse();
- destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
- // typeof(arg) == function).
- // It includes undetectable objects (as opposed to IsObject).
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // Check that this is an object.
- __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(above_equal);
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
- DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
- Register map_result,
- Register scratch1,
- Register scratch2)
- : object_(object),
- map_result_(map_result),
- scratch1_(scratch1),
- scratch2_(scratch2) { }
-
- virtual void Generate() {
- Label false_result;
-
- // Check that map is loaded as expected.
- if (FLAG_debug_code) {
- __ cmpq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ Assert(equal, "Map not in expected register");
- }
-
- // Check for fast case object. Generate false result for slow case object.
- __ movq(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
- __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ CompareRoot(scratch1_, Heap::kHashTableMapRootIndex);
- __ j(equal, &false_result);
-
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ movq(map_result_,
- FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
- __ movq(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
- // map_result_: descriptor array
- // scratch1_: length of descriptor array
- // Calculate the end of the descriptor array.
- SmiIndex index = masm_->SmiToIndex(scratch2_, scratch1_, kPointerSizeLog2);
- __ lea(scratch1_,
- Operand(
- map_result_, index.reg, index.scale, FixedArray::kHeaderSize));
- // Calculate location of the first key name.
- __ addq(map_result_,
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
- // Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(scratch2_, FieldOperand(map_result_, 0));
- __ Cmp(scratch2_, Factory::value_of_symbol());
- __ j(equal, &false_result);
- __ addq(map_result_, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(map_result_, scratch1_);
- __ j(not_equal, &loop);
-
- // Reload map as register map_result_ was used as temporary above.
- __ movq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that it's
- // prototype is the un-modified String prototype. If not result is false.
- __ movq(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
- __ testq(scratch1_, Immediate(kSmiTagMask));
- __ j(zero, &false_result);
- __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ movq(scratch2_,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(scratch2_,
- FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
- __ cmpq(scratch1_,
- ContextOperand(
- scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, &false_result);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ Set(map_result_, 1);
- __ jmp(exit_label());
- __ bind(&false_result);
- // Set false result.
- __ Set(map_result_, 0);
- }
-
- private:
- Register object_;
- Register map_result_;
- Register scratch1_;
- Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop(); // Pop the string wrapper.
- obj.ToRegister();
- ASSERT(obj.is_valid());
- if (FLAG_debug_code) {
- __ AbortIfSmi(obj.reg());
- }
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- Result map_result = allocator()->Allocate();
- ASSERT(map_result.is_valid());
- __ movq(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(map_result.reg(), Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- destination()->true_target()->Branch(not_zero);
-
- // We need an additional two scratch registers for the deferred code.
- Result temp1 = allocator()->Allocate();
- ASSERT(temp1.is_valid());
- Result temp2 = allocator()->Allocate();
- ASSERT(temp2.is_valid());
-
- DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
- new DeferredIsStringWrapperSafeForDefaultValueOf(
- obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
- deferred->Branch(zero);
- deferred->BindExit();
- __ testq(map_result.reg(), map_result.reg());
- obj.Unuse();
- map_result.Unuse();
- temp1.Unuse();
- temp2.Unuse();
- destination()->Split(not_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (%_ClassOf(arg) === 'Function')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
- obj.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kBitFieldOffset));
- __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
- obj.Unuse();
- destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- // Get the frame pointer for the calling frame.
- Result fp = allocator()->Allocate();
- __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
- fp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Result fp = allocator_->Allocate();
- Result result = allocator_->Allocate();
- ASSERT(fp.is_valid() && result.is_valid());
-
- Label exit;
-
- // Get the number of formal parameters.
- __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ movq(result.reg(),
- Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- result.set_type_info(TypeInfo::Smi());
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(result.reg());
- }
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave, null, function, non_function_constructor;
- Load(args->at(0)); // Load the object.
- Result obj = frame_->Pop();
- obj.ToRegister();
- frame_->Spill(obj.reg());
-
- // If the object is a smi, we return null.
- Condition is_smi = masm_->CheckSmi(obj.reg());
- null.Branch(is_smi);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
-
- __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
- null.Branch(below);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
- function.Branch(equal);
-
- // Check if the constructor in the map is a function.
- __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
- non_function_constructor.Branch(not_equal);
-
- // The obj register now contains the constructor function. Grab the
- // instance class name from there.
- __ movq(obj.reg(),
- FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
- __ movq(obj.reg(),
- FieldOperand(obj.reg(),
- SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->Push(&obj);
- leave.Jump();
-
- // Functions have class 'Function'.
- function.Bind();
- frame_->Push(Factory::function_class_symbol());
- leave.Jump();
-
- // Objects with a non-function constructor have class 'Object'.
- non_function_constructor.Bind();
- frame_->Push(Factory::Object_symbol());
- leave.Jump();
-
- // Non-JS objects have class null.
- null.Bind();
- frame_->Push(Factory::null_value());
-
- // All done.
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- frame_->Dup();
- Result object = frame_->Pop();
- object.ToRegister();
- ASSERT(object.is_valid());
- // if (object->IsSmi()) return object.
- Condition is_smi = masm_->CheckSmi(object.reg());
- leave.Branch(is_smi);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // if (!object->IsJSValue()) return object.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
- leave.Branch(not_equal);
- __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
- object.Unuse();
- frame_->SetElementAt(0, &temp);
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- Result value = frame_->Pop();
- Result object = frame_->Pop();
- value.ToRegister();
- object.ToRegister();
-
- // if (object->IsSmi()) return value.
- Condition is_smi = masm_->CheckSmi(object.reg());
- leave.Branch(is_smi, &value);
-
- // It is a heap object - get its map.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- // if (!object->IsJSValue()) return value.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
- leave.Branch(not_equal, &value);
-
- // Store the value.
- __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- Result duplicate_value = allocator_->Allocate();
- ASSERT(duplicate_value.is_valid());
- __ movq(duplicate_value.reg(), value.reg());
- // The object register is also overwritten by the write barrier and
- // possibly aliased in the frame.
- frame_->Spill(object.reg());
- __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
- scratch.reg());
- object.Unuse();
- scratch.Unuse();
- duplicate_value.Unuse();
-
- // Leave.
- leave.Bind(&value);
- frame_->Push(&value);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in rdx and the formal
- // parameter count in rax.
- Load(args->at(0));
- Result key = frame_->Pop();
- // Explicitly create a constant result.
- Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
- // Call the shared stub to get to arguments[key].
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- Result result = frame_->CallStub(&stub, &key, &count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Result right = frame_->Pop();
- Result left = frame_->Pop();
- right.ToRegister();
- left.ToRegister();
- __ cmpq(right.reg(), left.reg());
- right.Unuse();
- left.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- // RBP value is aligned, so it should be tagged as a smi (without necesarily
- // being padded as a smi, so it should not be treated as a smi.).
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- Result rbp_as_smi = allocator_->Allocate();
- ASSERT(rbp_as_smi.is_valid());
- __ movq(rbp_as_smi.reg(), rbp);
- frame_->Push(&rbp_as_smi);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- frame_->SpillAll();
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
- __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rbx, rax);
-
- __ bind(&heapnumber_allocated);
-
- // Return a random uint32 number in rax.
- // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
- __ PrepareCallCFunction(0);
- __ CallCFunction(ExternalReference::random_uint32_function(), 0);
-
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, rcx);
- __ movd(xmm0, rax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorpd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-
- __ movq(rax, rbx);
- Result result = allocator_->Allocate(rax);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- SubStringStub stub;
- Result answer = frame_->CallStub(&stub, 3);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringCompareStub stub;
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 4);
-
- // Load the arguments on the stack and call the runtime system.
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
- Load(args->at(3));
- RegExpExecStub stub;
- Result result = frame_->CallStub(&stub, 4);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
- Load(args->at(0)); // Size of array, smi.
- Load(args->at(1)); // "index" property value.
- Load(args->at(2)); // "input" property value.
- RegExpConstructResultStub stub;
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
- DeferredSearchCache(Register dst,
- Register cache,
- Register key,
- Register scratch)
- : dst_(dst), cache_(cache), key_(key), scratch_(scratch) {
- set_comment("[ DeferredSearchCache");
- }
-
- virtual void Generate();
-
- private:
- Register dst_; // on invocation index of finger (as int32), on exit
- // holds value being looked up.
- Register cache_; // instance of JSFunctionResultCache.
- Register key_; // key being looked up.
- Register scratch_;
-};
-
-
-// Return a position of the element at |index| + |additional_offset|
-// in FixedArray pointer to which is held in |array|. |index| is int32.
-static Operand ArrayElement(Register array,
- Register index,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index, times_pointer_size, offset);
-}
-
-
-void DeferredSearchCache::Generate() {
- Label first_loop, search_further, second_loop, cache_miss;
-
- Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
- Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
-
- // Check the cache from finger to start of the cache.
- __ bind(&first_loop);
- __ subl(dst_, kEntrySizeImm);
- __ cmpl(dst_, kEntriesIndexImm);
- __ j(less, &search_further);
-
- __ cmpq(ArrayElement(cache_, dst_), key_);
- __ j(not_equal, &first_loop);
-
- __ Integer32ToSmiField(
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ movq(dst_, ArrayElement(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&search_further);
-
- // Check the cache from end of cache up to finger.
- __ SmiToInteger32(dst_,
- FieldOperand(cache_,
- JSFunctionResultCache::kCacheSizeOffset));
- __ SmiToInteger32(scratch_,
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
-
- __ bind(&second_loop);
- __ subl(dst_, kEntrySizeImm);
- __ cmpl(dst_, scratch_);
- __ j(less_equal, &cache_miss);
-
- __ cmpq(ArrayElement(cache_, dst_), key_);
- __ j(not_equal, &second_loop);
-
- __ Integer32ToSmiField(
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ movq(dst_, ArrayElement(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&cache_miss);
- __ push(cache_); // store a reference to cache
- __ push(key_); // store a key
- __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ push(key_);
- // On x64 function must be in rdi.
- __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
- ParameterCount expected(1);
- __ InvokeFunction(rdi, expected, CALL_FUNCTION);
-
- // Find a place to put new cached value into.
- Label add_new_entry, update_cache;
- __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache
- // Possible optimization: cache size is constant for the given cache
- // so technically we could use a constant here. However, if we have
- // cache miss this optimization would hardly matter much.
-
- // Check if we could add new entry to cache.
- __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ SmiToInteger32(r9,
- FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
- __ cmpl(rbx, r9);
- __ j(greater, &add_new_entry);
-
- // Check if we could evict entry after finger.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ addl(rdx, kEntrySizeImm);
- Label forward;
- __ cmpl(rbx, rdx);
- __ j(greater, &forward);
- // Need to wrap over the cache.
- __ movl(rdx, kEntriesIndexImm);
- __ bind(&forward);
- __ movl(r9, rdx);
- __ jmp(&update_cache);
-
- __ bind(&add_new_entry);
- // r9 holds cache size as int32.
- __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
- __ Integer32ToSmiField(
- FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
-
- // Update the cache itself.
- // r9 holds the index as int32.
- __ bind(&update_cache);
- __ pop(rbx); // restore the key
- __ Integer32ToSmiField(
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
- // Store key.
- __ movq(ArrayElement(rcx, r9), rbx);
- __ RecordWrite(rcx, 0, rbx, r9);
-
- // Store value.
- __ pop(rcx); // restore the cache.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ incl(rdx);
- // Backup rax, because the RecordWrite macro clobbers its arguments.
- __ movq(rbx, rax);
- __ movq(ArrayElement(rcx, rdx), rax);
- __ RecordWrite(rcx, 0, rbx, rdx);
-
- if (!dst_.is(rax)) {
- __ movq(dst_, rax);
- }
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- frame_->Push(Factory::undefined_value());
- return;
- }
-
- Load(args->at(1));
- Result key = frame_->Pop();
- key.ToRegister();
-
- Result cache = allocator()->Allocate();
- ASSERT(cache.is_valid());
- __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(cache.reg(),
- FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
- __ movq(cache.reg(),
- ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movq(cache.reg(),
- FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
- cache.reg(),
- key.reg(),
- scratch.reg());
-
- const int kFingerOffset =
- FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
- // tmp.reg() now holds finger offset as a smi.
- __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
- __ cmpq(key.reg(), FieldOperand(cache.reg(),
- tmp.reg(), times_pointer_size,
- FixedArray::kHeaderSize));
- deferred->Branch(not_equal);
- __ movq(tmp.reg(), FieldOperand(cache.reg(),
- tmp.reg(), times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
-
- deferred->BindExit();
- frame_->Push(&tmp);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and jump to the runtime.
- Load(args->at(0));
-
- NumberToStringStub stub;
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
-
- virtual void Generate();
-
- private:
- Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
- __ push(object_);
- __ push(index1_);
- __ push(index2_);
- __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateSwapElements");
-
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- Result index2 = frame_->Pop();
- index2.ToRegister();
-
- Result index1 = frame_->Pop();
- index1.ToRegister();
-
- Result object = frame_->Pop();
- object.ToRegister();
-
- Result tmp1 = allocator()->Allocate();
- tmp1.ToRegister();
- Result tmp2 = allocator()->Allocate();
- tmp2.ToRegister();
-
- frame_->Spill(object.reg());
- frame_->Spill(index1.reg());
- frame_->Spill(index2.reg());
-
- DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
- index1.reg(),
- index2.reg());
-
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
- deferred->Branch(below);
- __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
- Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
- deferred->Branch(not_zero);
-
- // Check the object's elements are in fast case and writable.
- __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- deferred->Branch(not_equal);
-
- // Check that both indices are smis.
- Condition both_smi = masm()->CheckBothSmi(index1.reg(), index2.reg());
- deferred->Branch(NegateCondition(both_smi));
-
- // Check that both indices are valid.
- __ movq(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
- __ SmiCompare(tmp2.reg(), index1.reg());
- deferred->Branch(below_equal);
- __ SmiCompare(tmp2.reg(), index2.reg());
- deferred->Branch(below_equal);
-
- // Bring addresses into index1 and index2.
- __ SmiToInteger32(index1.reg(), index1.reg());
- __ lea(index1.reg(), FieldOperand(tmp1.reg(),
- index1.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(index2.reg(), index2.reg());
- __ lea(index2.reg(), FieldOperand(tmp1.reg(),
- index2.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize));
-
- // Swap elements.
- __ movq(object.reg(), Operand(index1.reg(), 0));
- __ movq(tmp2.reg(), Operand(index2.reg(), 0));
- __ movq(Operand(index2.reg(), 0), object.reg());
- __ movq(Operand(index1.reg(), 0), tmp2.reg());
-
- Label done;
- __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
-
- __ movq(tmp2.reg(), tmp1.reg());
- __ RecordWriteHelper(tmp1.reg(), index1.reg(), object.reg());
- __ RecordWriteHelper(tmp2.reg(), index2.reg(), object.reg());
- __ bind(&done);
-
- deferred->BindExit();
- frame_->Push(Factory::undefined_value());
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateCallFunction");
-
- ASSERT(args->length() >= 2);
-
- int n_args = args->length() - 2; // for receiver and function.
- Load(args->at(0)); // receiver
- for (int i = 0; i < n_args; i++) {
- Load(args->at(i + 1));
- }
- Load(args->at(n_args + 1)); // function
- Result result = frame_->CallJSFunction(n_args);
- frame_->Push(&result);
-}
-
-
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Load(args->at(1));
-
- Label allocate_return;
- // Load the two operands while leaving the values on the frame.
- frame()->Dup();
- Result exponent = frame()->Pop();
- exponent.ToRegister();
- frame()->Spill(exponent.reg());
- frame()->PushElementAt(1);
- Result base = frame()->Pop();
- base.ToRegister();
- frame()->Spill(base.reg());
-
- Result answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- ASSERT(!exponent.reg().is(base.reg()));
- JumpTarget call_runtime;
-
- // Save 1 in xmm3 - we need this several times later on.
- __ movl(answer.reg(), Immediate(1));
- __ cvtlsi2sd(xmm3, answer.reg());
-
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
- __ JumpIfNotSmi(base.reg(), &base_nonsmi);
-
- // Optimized version when y is an integer.
- Label powi;
- __ SmiToInteger32(base.reg(), base.reg());
- __ cvtlsi2sd(xmm0, base.reg());
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
-
- __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // Optimized version of pow if y is an integer.
- __ bind(&powi);
- __ SmiToInteger32(exponent.reg(), exponent.reg());
-
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ movl(base.reg(), exponent.reg());
-
- // Get absolute value of exponent.
- Label no_neg;
- __ cmpl(exponent.reg(), Immediate(0));
- __ j(greater_equal, &no_neg);
- __ negl(exponent.reg());
- __ bind(&no_neg);
-
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
- __ bind(&while_true);
- __ shrl(exponent.reg(), Immediate(1));
- __ j(not_carry, &no_multiply);
- __ mulsd(xmm1, xmm0);
- __ bind(&no_multiply);
- __ testl(exponent.reg(), exponent.reg());
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
-
- // x has the original value of y - if y is negative return 1/result.
- __ testl(base.reg(), base.reg());
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ movl(answer.reg(), Immediate(0x7FB00000));
- __ movd(xmm0, answer.reg());
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- call_runtime.Branch(equal);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- call_runtime.Branch(parity_even);
-
- Label base_not_smi;
- Label handle_special_cases;
- __ JumpIfNotSmi(base.reg(), &base_not_smi);
- __ SmiToInteger32(base.reg(), base.reg());
- __ cvtlsi2sd(xmm0, base.reg());
- __ jmp(&handle_special_cases);
- __ bind(&base_not_smi);
- __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
- __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
- __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- call_runtime.Branch(greater_equal);
- __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ movl(answer.reg(), Immediate(0xBF000000));
- __ movd(xmm2, answer.reg());
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(&not_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- call_runtime.Branch(not_equal);
-
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
-
- JumpTarget done;
- Label failure, success;
- __ bind(&allocate_return);
- // Make a copy of the frame to enable us to handle allocation
- // failure after the JumpTarget jump.
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
- __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
- // Remove the two original values from the frame - we only need those
- // in the case where we branch to runtime.
- frame()->Drop(2);
- exponent.Unuse();
- base.Unuse();
- done.Jump(&answer);
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- // If we experience an allocation failure we branch to runtime.
- __ bind(&failure);
- call_runtime.Bind();
- answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
-
- done.Bind(&answer);
- frame()->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
-
- // Leave original value on the frame if we need to call runtime.
- frame()->Dup();
- Result result = frame()->Pop();
- result.ToRegister();
- frame()->Spill(result.reg());
- Label runtime;
- Label non_smi;
- Label load_done;
- JumpTarget end;
-
- __ JumpIfNotSmi(result.reg(), &non_smi);
- __ SmiToInteger32(result.reg(), result.reg());
- __ cvtlsi2sd(xmm0, result.reg());
- __ jmp(&load_done);
- __ bind(&non_smi);
- __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &runtime);
- __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
-
- __ bind(&load_done);
- __ sqrtsd(xmm0, xmm0);
- // A copy of the virtual frame to allow us to go to runtime after the
- // JumpTarget jump.
- Result scratch = allocator()->Allocate();
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
-
- __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
- frame()->Drop(1);
- scratch.Unuse();
- end.Jump(&result);
- // We only branch to runtime if we have an allocation error.
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- __ bind(&runtime);
- result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-
- end.Bind(&result);
- frame()->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
- Load(args->at(0));
- Load(args->at(1));
- Result right_res = frame_->Pop();
- Result left_res = frame_->Pop();
- right_res.ToRegister();
- left_res.ToRegister();
- Result tmp_res = allocator()->Allocate();
- ASSERT(tmp_res.is_valid());
- Register right = right_res.reg();
- Register left = left_res.reg();
- Register tmp = tmp_res.reg();
- right_res.Unuse();
- left_res.Unuse();
- tmp_res.Unuse();
- __ cmpq(left, right);
- destination()->true_target()->Branch(equal);
- // Fail if either is a non-HeapObject.
- Condition either_smi =
- masm()->CheckEitherSmi(left, right, tmp);
- destination()->false_target()->Branch(either_smi);
- __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(JS_REGEXP_TYPE));
- destination()->false_target()->Branch(not_equal);
- __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
- destination()->false_target()->Branch(not_equal);
- __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ testl(FieldOperand(value.reg(), String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- value.Unuse();
- destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result string = frame_->Pop();
- string.ToRegister();
-
- Result number = allocator()->Allocate();
- ASSERT(number.is_valid());
- __ movl(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
- __ IndexFromHash(number.reg(), number.reg());
- string.Unuse();
- frame_->Push(&number);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- frame_->Push(Factory::undefined_value());
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- if (CheckForInlineRuntimeCall(node)) {
- return;
- }
-
- ZoneList<Expression*>* args = node->arguments();
- Comment cmnt(masm_, "[ CallRuntime");
- Runtime::Function* function = node->function();
-
- if (function == NULL) {
- // Push the builtins object found in the current global object.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(), GlobalObjectOperand());
- __ movq(temp.reg(),
- FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
- frame_->Push(&temp);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- if (function == NULL) {
- // Call the JS runtime function.
- frame_->Push(node->name());
- Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting_);
- frame_->RestoreContextRegister();
- frame_->Push(&answer);
- } else {
- // Call the C runtime function.
- Result answer = frame_->CallRuntime(function, arg_count);
- frame_->Push(&answer);
- }
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- Comment cmnt(masm_, "[ UnaryOperation");
-
- Token::Value op = node->op();
-
- if (op == Token::NOT) {
- // Swap the true and false targets but keep the same actual label
- // as the fall through.
- destination()->Invert();
- LoadCondition(node->expression(), destination(), true);
- // Swap the labels back.
- destination()->Invert();
-
- } else if (op == Token::DELETE) {
- Property* property = node->expression()->AsProperty();
- if (property != NULL) {
- Load(property->obj());
- Load(property->key());
- frame_->Push(Smi::FromInt(strict_mode_flag()));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
- frame_->Push(&answer);
- return;
- }
-
- Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
- if (variable != NULL) {
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is.
- ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
- Slot* slot = variable->AsSlot();
- if (variable->is_global()) {
- LoadGlobal();
- frame_->Push(variable->name());
- frame_->Push(Smi::FromInt(kNonStrictMode));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 3);
- frame_->Push(&answer);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to delete from the context holding the named
- // variable. Sync the virtual frame eagerly so we can push the
- // arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- frame_->EmitPush(variable->name());
- Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
- frame_->Push(&answer);
- } else {
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->Push(Factory::false_value());
- }
- } else {
- // Default: Result of deleting expressions is true.
- Load(node->expression()); // may have side-effects
- frame_->SetElementAt(0, Factory::true_value());
- }
-
- } else if (op == Token::TYPEOF) {
- // Special case for loading the typeof expression; see comment on
- // LoadTypeofExpression().
- LoadTypeofExpression(node->expression());
- Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
- frame_->Push(&answer);
-
- } else if (op == Token::VOID) {
- Expression* expression = node->expression();
- if (expression && expression->AsLiteral() && (
- expression->AsLiteral()->IsTrue() ||
- expression->AsLiteral()->IsFalse() ||
- expression->AsLiteral()->handle()->IsNumber() ||
- expression->AsLiteral()->handle()->IsString() ||
- expression->AsLiteral()->handle()->IsJSRegExp() ||
- expression->AsLiteral()->IsNull())) {
- // Omit evaluating the value of the primitive literal.
- // It will be discarded anyway, and can have no side effect.
- frame_->Push(Factory::undefined_value());
- } else {
- Load(node->expression());
- frame_->SetElementAt(0, Factory::undefined_value());
- }
-
- } else {
- bool can_overwrite = node->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- bool no_negative_zero = node->expression()->no_negative_zero();
- Load(node->expression());
- switch (op) {
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- UNREACHABLE(); // handled above
- break;
-
- case Token::SUB: {
- GenericUnaryOpStub stub(
- Token::SUB,
- overwrite,
- NO_UNARY_FLAGS,
- no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
- Result operand = frame_->Pop();
- Result answer = frame_->CallStub(&stub, &operand);
- answer.set_type_info(TypeInfo::Number());
- frame_->Push(&answer);
- break;
- }
-
- case Token::BIT_NOT: {
- // Smi check.
- JumpTarget smi_label;
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- operand.ToRegister();
-
- Condition is_smi = masm_->CheckSmi(operand.reg());
- smi_label.Branch(is_smi, &operand);
-
- GenericUnaryOpStub stub(Token::BIT_NOT,
- overwrite,
- NO_UNARY_SMI_CODE_IN_STUB);
- Result answer = frame_->CallStub(&stub, &operand);
- continue_label.Jump(&answer);
-
- smi_label.Bind(&answer);
- answer.ToRegister();
- frame_->Spill(answer.reg());
- __ SmiNot(answer.reg(), answer.reg());
- continue_label.Bind(&answer);
- answer.set_type_info(TypeInfo::Smi());
- frame_->Push(&answer);
- break;
- }
-
- case Token::ADD: {
- // Smi check.
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- TypeInfo operand_info = operand.type_info();
- operand.ToRegister();
- Condition is_smi = masm_->CheckSmi(operand.reg());
- continue_label.Branch(is_smi, &operand);
- frame_->Push(&operand);
- Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
- CALL_FUNCTION, 1);
-
- continue_label.Bind(&answer);
- if (operand_info.IsSmi()) {
- answer.set_type_info(TypeInfo::Smi());
- } else if (operand_info.IsInteger32()) {
- answer.set_type_info(TypeInfo::Integer32());
- } else {
- answer.set_type_info(TypeInfo::Number());
- }
- frame_->Push(&answer);
- break;
- }
- default:
- UNREACHABLE();
- }
- }
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged. Call into the runtime
-// to convert the argument to a number, and call the specialized add
-// or subtract stub. The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
- DeferredPrefixCountOperation(Register dst,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPrefixCountOperation::Generate() {
- Register left;
- if (input_type_.IsNumber()) {
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- left = rax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged. Call into the runtime
-// to convert the argument to a number. Update the original value in
-// old. Call the specialized add or subtract stub. The result is
-// left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
- DeferredPostfixCountOperation(Register dst,
- Register old,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst),
- old_(old),
- is_increment_(is_increment),
- input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Register old_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPostfixCountOperation::Generate() {
- Register left;
- if (input_type_.IsNumber()) {
- __ push(dst_); // Save the input to use as the old value.
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ push(rax); // Save the result of ToNumber to use as the old value.
- left = rax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
- __ pop(old_);
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
- Comment cmnt(masm_, "[ CountOperation");
-
- bool is_postfix = node->is_postfix();
- bool is_increment = node->op() == Token::INC;
-
- Variable* var = node->expression()->AsVariableProxy()->AsVariable();
- bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
- // Postfix operations need a stack slot under the reference to hold
- // the old value while the new value is being stored. This is so that
- // in the case that storing the new value requires a call, the old
- // value will be in the frame to be spilled.
- if (is_postfix) frame_->Push(Smi::FromInt(0));
-
- // A constant reference is not saved to, so the reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
- if (target.is_illegal()) {
- // Spoof the virtual frame to have the expected height (one higher
- // than on entry).
- if (!is_postfix) frame_->Push(Smi::FromInt(0));
- return;
- }
- target.TakeValue();
-
- Result new_value = frame_->Pop();
- new_value.ToRegister();
-
- Result old_value; // Only allocated in the postfix case.
- if (is_postfix) {
- // Allocate a temporary to preserve the old value.
- old_value = allocator_->Allocate();
- ASSERT(old_value.is_valid());
- __ movq(old_value.reg(), new_value.reg());
-
- // The return value for postfix operations is ToNumber(input).
- // Keep more precise type info if the input is some kind of
- // number already. If the input is not a number we have to wait
- // for the deferred code to convert it.
- if (new_value.type_info().IsNumber()) {
- old_value.set_type_info(new_value.type_info());
- }
- }
- // Ensure the new value is writable.
- frame_->Spill(new_value.reg());
-
- DeferredCode* deferred = NULL;
- if (is_postfix) {
- deferred = new DeferredPostfixCountOperation(new_value.reg(),
- old_value.reg(),
- is_increment,
- new_value.type_info());
- } else {
- deferred = new DeferredPrefixCountOperation(new_value.reg(),
- is_increment,
- new_value.type_info());
- }
-
- if (new_value.is_smi()) {
- if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
- } else {
- __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
- }
- if (is_increment) {
- __ SmiAddConstant(new_value.reg(),
- new_value.reg(),
- Smi::FromInt(1),
- deferred->entry_label());
- } else {
- __ SmiSubConstant(new_value.reg(),
- new_value.reg(),
- Smi::FromInt(1),
- deferred->entry_label());
- }
- deferred->BindExit();
-
- // Postfix count operations return their input converted to
- // number. The case when the input is already a number is covered
- // above in the allocation code for old_value.
- if (is_postfix && !new_value.type_info().IsNumber()) {
- old_value.set_type_info(TypeInfo::Number());
- }
-
- new_value.set_type_info(TypeInfo::Number());
-
- // Postfix: store the old value in the allocated slot under the
- // reference.
- if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-
- frame_->Push(&new_value);
- // Non-constant: update the reference.
- if (!is_const) target.SetValue(NOT_CONST_INIT);
- }
-
- // Postfix: drop the new value and use the old.
- if (is_postfix) frame_->Drop();
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- // According to ECMA-262 section 11.11, page 58, the binary logical
- // operators must yield the result of one of the two expressions
- // before any ToBoolean() conversions. This means that the value
- // produced by a && or || operator is not necessarily a boolean.
-
- // NOTE: If the left hand side produces a materialized value (not
- // control flow), we force the right hand side to do the same. This
- // is necessary because we assume that if we get control flow on the
- // last path out of an expression we got it on all paths.
- if (node->op() == Token::AND) {
- JumpTarget is_true;
- ControlDestination dest(&is_true, destination()->false_target(), true);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The current false target was used as the fall-through. If
- // there are no dangling jumps to is_true then the left
- // subexpression was unconditionally false. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_true.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current false target was a forward jump then we have a
- // valid frame, we have just bound the false target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->false_target()->Unuse();
- destination()->false_target()->Jump();
- }
- is_true.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have actually just jumped to or bound the current false
- // target but the current control destination is not marked as
- // used.
- destination()->Use(false);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_true
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_true
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'false' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&pop_and_continue, &exit, true);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_true.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
-
- } else {
- ASSERT(node->op() == Token::OR);
- JumpTarget is_false;
- ControlDestination dest(destination()->true_target(), &is_false, false);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.true_was_fall_through()) {
- // The current true target was used as the fall-through. If
- // there are no dangling jumps to is_false then the left
- // subexpression was unconditionally true. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_false.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current true target was a forward jump then we have a
- // valid frame, we have just bound the true target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->true_target()->Unuse();
- destination()->true_target()->Jump();
- }
- is_false.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have just jumped to or bound the current true target but
- // the current control destination is not marked as used.
- destination()->Use(true);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_false
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_false
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'true' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&exit, &pop_and_continue, false);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_false.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
- }
-}
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- Comment cmnt(masm_, "[ BinaryOperation");
-
- if (node->op() == Token::AND || node->op() == Token::OR) {
- GenerateLogicalBooleanOperation(node);
- } else {
- // NOTE: The code below assumes that the slow cases (calls to runtime)
- // never return a constant/immutable object.
- OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_RIGHT;
- }
-
- if (node->left()->IsTrivial()) {
- Load(node->right());
- Result right = frame_->Pop();
- frame_->Push(node->left());
- frame_->Push(&right);
- } else {
- Load(node->left());
- Load(node->right());
- }
- GenericBinaryOperation(node, overwrite_mode);
- }
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- frame_->PushFunction();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
- Comment cmnt(masm_, "[ CompareOperation");
-
- // Get the expressions from the node.
- Expression* left = node->left();
- Expression* right = node->right();
- Token::Value op = node->op();
- // To make typeof testing for natives implemented in JavaScript really
- // efficient, we generate special code for expressions of the form:
- // 'typeof <expression> == <string>'.
- UnaryOperation* operation = left->AsUnaryOperation();
- if ((op == Token::EQ || op == Token::EQ_STRICT) &&
- (operation != NULL && operation->op() == Token::TYPEOF) &&
- (right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsString())) {
- Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
-
- // Load the operand and move it to a register.
- LoadTypeofExpression(operation->expression());
- Result answer = frame_->Pop();
- answer.ToRegister();
-
- if (check->Equals(Heap::number_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->true_target()->Branch(is_smi);
- frame_->Spill(answer.reg());
- __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(Heap::string_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
-
- // It can be an undetectable string object.
- __ movq(kScratchRegister,
- FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
- answer.Unuse();
- destination()->Split(below); // Unsigned byte comparison needed.
-
- } else if (check->Equals(Heap::boolean_symbol())) {
- __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
- destination()->true_target()->Branch(equal);
- __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(Heap::undefined_symbol())) {
- __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
- destination()->true_target()->Branch(equal);
-
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
-
- // It can be an undetectable object.
- __ movq(kScratchRegister,
- FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- answer.Unuse();
- destination()->Split(not_zero);
-
- } else if (check->Equals(Heap::function_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
- frame_->Spill(answer.reg());
- __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
- destination()->true_target()->Branch(equal);
- // Regular expressions are callable so typeof == 'function'.
- __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(Heap::object_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
- __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
- destination()->true_target()->Branch(equal);
-
- // Regular expressions are typeof == 'function', not 'object'.
- __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
- destination()->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
- destination()->false_target()->Branch(below);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
- answer.Unuse();
- destination()->Split(below_equal);
- } else {
- // Uncommon case: typeof testing against a string literal that is
- // never returned from the typeof operator.
- answer.Unuse();
- destination()->Goto(false);
- }
- return;
- }
-
- Condition cc = no_condition;
- bool strict = false;
- switch (op) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN: {
- Load(left);
- Load(right);
- Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
- frame_->Push(&answer); // push the result
- return;
- }
- case Token::INSTANCEOF: {
- Load(left);
- Load(right);
- InstanceofStub stub(InstanceofStub::kNoFlags);
- Result answer = frame_->CallStub(&stub, 2);
- answer.ToRegister();
- __ testq(answer.reg(), answer.reg());
- answer.Unuse();
- destination()->Split(zero);
- return;
- }
- default:
- UNREACHABLE();
- }
-
- if (left->IsTrivial()) {
- Load(right);
- Result right_result = frame_->Pop();
- frame_->Push(left);
- frame_->Push(&right_result);
- } else {
- Load(left);
- Load(right);
- }
-
- Comparison(node, cc, strict, destination());
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
- Comment cmnt(masm_, "[ CompareToNull");
-
- Load(node->expression());
- Result operand = frame_->Pop();
- operand.ToRegister();
- __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
- if (node->is_strict()) {
- operand.Unuse();
- destination()->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- destination()->true_target()->Branch(equal);
- __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
- destination()->true_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(operand.reg());
- destination()->false_target()->Branch(is_smi);
-
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- temp.Unuse();
- operand.Unuse();
- destination()->Split(not_zero);
- }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
- return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
- && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
- && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
- && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
- && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
- && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
- && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
- && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
- && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
- && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
-}
-#endif
-
-
-
-// Emit a LoadIC call to get the value from receiver and leave it in
-// dst. The receiver register is restored after the call.
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
- DeferredReferenceGetNamedValue(Register dst,
- Register receiver,
- Handle<String> name)
- : dst_(dst), receiver_(receiver), name_(name) {
- set_comment("[ DeferredReferenceGetNamedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Handle<String> name_;
-};
-
-
-void DeferredReferenceGetNamedValue::Generate() {
- if (!receiver_.is(rax)) {
- __ movq(rax, receiver_);
- }
- __ Move(rcx, name_);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a test rax instruction to indicate
- // that the inobject property case was inlined.
- //
- // Store the delta to the map check instruction here in the test
- // instruction. Use masm_-> instead of the __ macro since the
- // latter can't return a value.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::named_load_inline_miss, 1);
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
- explicit DeferredReferenceGetKeyedValue(Register dst,
- Register receiver,
- Register key)
- : dst_(dst), receiver_(receiver), key_(key) {
- set_comment("[ DeferredReferenceGetKeyedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Register key_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
- if (receiver_.is(rdx)) {
- if (!key_.is(rax)) {
- __ movq(rax, key_);
- } // else do nothing.
- } else if (receiver_.is(rax)) {
- if (key_.is(rdx)) {
- __ xchg(rax, rdx);
- } else if (key_.is(rax)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rdx, receiver_);
- __ movq(rax, key_);
- }
- } else if (key_.is(rax)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rax, key_);
- __ movq(rdx, receiver_);
- }
- // Calculate the delta from the IC call instruction to the map check
- // movq instruction in the inlined version. This delta is stored in
- // a test(rax, delta) instruction after the call so that we can find
- // it in the IC initialization code and patch the movq instruction.
- // This means that we cannot allow test instructions after calls to
- // KeyedLoadIC stubs in other places.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_-> directly here instead of the __
- // macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- // TODO(X64): Consider whether it's worth switching the test to a
- // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
- // be generated normally.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver,
- StrictModeFlag strict_mode)
- : value_(value),
- key_(key),
- receiver_(receiver),
- strict_mode_(strict_mode) {
- set_comment("[ DeferredReferenceSetKeyedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Register value_;
- Register key_;
- Register receiver_;
- Label patch_site_;
- StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
- __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
- // Move value, receiver, and key to registers rax, rdx, and rcx, as
- // the IC stub expects.
- // Move value to rax, using xchg if the receiver or key is in rax.
- if (!value_.is(rax)) {
- if (!receiver_.is(rax) && !key_.is(rax)) {
- __ movq(rax, value_);
- } else {
- __ xchg(rax, value_);
- // Update receiver_ and key_ if they are affected by the swap.
- if (receiver_.is(rax)) {
- receiver_ = value_;
- } else if (receiver_.is(value_)) {
- receiver_ = rax;
- }
- if (key_.is(rax)) {
- key_ = value_;
- } else if (key_.is(value_)) {
- key_ = rax;
- }
- }
- }
- // Value is now in rax. Its original location is remembered in value_,
- // and the value is restored to value_ before returning.
- // The variables receiver_ and key_ are not preserved.
- // Move receiver and key to rdx and rcx, swapping if necessary.
- if (receiver_.is(rdx)) {
- if (!key_.is(rcx)) {
- __ movq(rcx, key_);
- } // Else everything is already in the right place.
- } else if (receiver_.is(rcx)) {
- if (key_.is(rdx)) {
- __ xchg(rcx, rdx);
- } else if (key_.is(rcx)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rdx, receiver_);
- __ movq(rcx, key_);
- }
- } else if (key_.is(rcx)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rcx, key_);
- __ movq(rdx, receiver_);
- }
-
- // Call the IC stub.
- Handle<Code> ic(Builtins::builtin(
- (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instructions (initial movq)
- // to the test instruction. We use masm_-> directly here instead of the
- // __ macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- // Restore value (returned from store IC).
- if (!value_.is(rax)) __ movq(value_, rax);
-}
-
-
-Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Do not inline the inobject property case for loads from the global
- // object. Also do not inline for unoptimized code. This saves time
- // in the code generator. Unoptimized code is toplevel code or code
- // that is not in a loop.
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- Comment cmnt(masm(), "[ Load from named Property");
- frame()->Push(name);
-
- RelocInfo::Mode mode = is_contextual
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- result = frame()->CallLoadIC(mode);
- // A test rax instruction following the call signals that the
- // inobject property case was inlined. Ensure that there is not
- // a test rax instruction here.
- __ nop();
- } else {
- // Inline the inobject property case.
- Comment cmnt(masm(), "[ Inlined named property load");
- Result receiver = frame()->Pop();
- receiver.ToRegister();
- result = allocator()->Allocate();
- ASSERT(result.is_valid());
-
- // Cannot use r12 for receiver, because that changes
- // the distance between a call and a fixup location,
- // due to a special encoding of r12 as r/m in a ModR/M byte.
- if (receiver.reg().is(r12)) {
- frame()->Spill(receiver.reg()); // It will be overwritten with result.
- // Swap receiver and value.
- __ movq(result.reg(), receiver.reg());
- Result temp = receiver;
- receiver = result;
- result = temp;
- }
-
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
-
- // Check that the receiver is a heap object.
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
- __ bind(deferred->patch_site());
- // This is the map check instruction that will be patched (so we can't
- // use the double underscore macro that may insert instructions).
- // Initially use an invalid map to force a failure.
- masm()->Move(kScratchRegister, Factory::null_value());
- masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- // This branch is always a forwards branch so it's always a fixed
- // size which allows the assert below to succeed and patching to work.
- // Don't use deferred->Branch(...), since that might add coverage code.
- masm()->j(not_equal, deferred->entry_label());
-
- // The delta from the patch label to the load offset must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
- LoadIC::kOffsetToLoadInstruction);
- // The initial (invalid) offset has to be large enough to force
- // a 32-bit instruction encoding to allow patching with an
- // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
-
- __ IncrementCounter(&Counters::named_load_inline, 1);
- deferred->BindExit();
- }
- ASSERT(frame()->height() == original_height - 1);
- return result;
-}
-
-
-Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
- Result result;
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- // A test rax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test rax
- // instruction here.
- __ nop();
- } else {
- // Inline the in-object property case.
- JumpTarget slow, done;
- Label patch_site;
-
- // Get the value and receiver from the stack.
- Result value = frame()->Pop();
- value.ToRegister();
- Result receiver = frame()->Pop();
- receiver.ToRegister();
-
- // Allocate result register.
- result = allocator()->Allocate();
- ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
-
- // Cannot use r12 for receiver, because that changes
- // the distance between a call and a fixup location,
- // due to a special encoding of r12 as r/m in a ModR/M byte.
- if (receiver.reg().is(r12)) {
- frame()->Spill(receiver.reg()); // It will be overwritten with result.
- // Swap receiver and value.
- __ movq(result.reg(), receiver.reg());
- Result temp = receiver;
- receiver = result;
- result = temp;
- }
-
- // Check that the receiver is a heap object.
- Condition is_smi = masm()->CheckSmi(receiver.reg());
- slow.Branch(is_smi, &value, &receiver);
-
- // This is the map check instruction that will be patched.
- // Initially use an invalid map to force a failure. The exact
- // instruction sequence is important because we use the
- // kOffsetToStoreInstruction constant for patching. We avoid using
- // the __ macro for the following two instructions because it
- // might introduce extra instructions.
- __ bind(&patch_site);
- masm()->Move(kScratchRegister, Factory::null_value());
- masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- // This branch is always a forwards branch so it's always a fixed size
- // which allows the assert below to succeed and patching to work.
- slow.Branch(not_equal, &value, &receiver);
-
- // The delta from the patch label to the store offset must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
- StoreIC::kOffsetToStoreInstruction);
-
- // The initial (invalid) offset has to be large enough to force a 32-bit
- // instruction encoding to allow patching with an arbitrary offset. Use
- // kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- __ movq(FieldOperand(receiver.reg(), offset), value.reg());
- __ movq(result.reg(), value.reg());
-
- // Allocate scratch register for write barrier.
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- // The write barrier clobbers all input registers, so spill the
- // receiver and the value.
- frame_->Spill(receiver.reg());
- frame_->Spill(value.reg());
-
- // If the receiver and the value share a register allocate a new
- // register for the receiver.
- if (receiver.reg().is(value.reg())) {
- receiver = allocator()->Allocate();
- ASSERT(receiver.is_valid());
- __ movq(receiver.reg(), value.reg());
- }
-
- // Update the write barrier. To save instructions in the inlined
- // version we do not filter smis.
- Label skip_write_barrier;
- __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
- int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
- __ lea(scratch.reg(), Operand(receiver.reg(), offset));
- __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
- if (FLAG_debug_code) {
- __ movq(receiver.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- __ movq(value.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- __ movq(scratch.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- }
- __ bind(&skip_write_barrier);
- value.Unuse();
- scratch.Unuse();
- receiver.Unuse();
- done.Jump(&result);
-
- slow.Bind(&value, &receiver);
- frame()->Push(&receiver);
- frame()->Push(&value);
- result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- // Encode the offset to the map check instruction and the offset
- // to the write barrier store address computation in a test rax
- // instruction.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
- __ testl(rax,
- Immediate((delta_to_record_write << 16) | delta_to_patch_site));
- done.Bind(&result);
- }
-
- ASSERT_EQ(expected_height, frame()->height());
- return result;
-}
-
-
-Result CodeGenerator::EmitKeyedLoad() {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
- if (loop_nesting() > 0) {
- Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- // Allocate the temporary early so that we use rax if it is free.
- Result elements = allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // If key and receiver are shared registers on the frame, their values will
- // be automatically saved and restored when going to deferred code.
- // The result is returned in elements, which is not shared.
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(elements.reg(),
- receiver.reg(),
- key.reg());
-
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
- // Check that the receiver has the expected map.
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching. Do not use a load
- // from the root array to load null_value, since the load must be patched
- // with the expected receiver map, which is not in the root array.
- masm_->movq(kScratchRegister, Factory::null_value(),
- RelocInfo::EMBEDDED_OBJECT);
- masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
-
- __ JumpUnlessNonNegativeSmi(key.reg(), deferred->entry_label());
-
- // Get the elements array from the receiver.
- __ movq(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ AssertFastElements(elements.reg());
-
- // Check that key is within bounds.
- __ SmiCompare(key.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is rax, the we can reuse that one because the value
- // coming from the deferred code will be in rax.
- SmiIndex index =
- masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
- __ movq(elements.reg(),
- FieldOperand(elements.reg(),
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- result = elements;
- __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
- deferred->BindExit();
- } else {
- Comment cmnt(masm_, "[ Load from keyed Property");
- result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- }
- ASSERT(frame()->height() == original_height - 2);
- return result;
-}
-
-
-Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Generate inlined version of the keyed store if the code is in a loop
- // and the key is likely to be a smi.
- if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
- Comment cmnt(masm(), "[ Inlined store to keyed Property");
-
- // Get the receiver, key and value into registers.
- result = frame()->Pop();
- Result key = frame()->Pop();
- Result receiver = frame()->Pop();
-
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid());
- Result tmp2 = allocator_->Allocate();
- ASSERT(tmp2.is_valid());
-
- // Determine whether the value is a constant before putting it in a
- // register.
- bool value_is_constant = result.is_constant();
-
- // Make sure that value, key and receiver are in registers.
- result.ToRegister();
- key.ToRegister();
- receiver.ToRegister();
-
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(result.reg(),
- key.reg(),
- receiver.reg(),
- strict_mode_flag());
-
- // Check that the receiver is not a smi.
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
- // Check that the key is a smi.
- if (!key.is_smi()) {
- __ JumpIfNotSmi(key.reg(), deferred->entry_label());
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(key.reg());
- }
-
- // Check that the receiver is a JSArray.
- __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
- deferred->Branch(not_equal);
-
- // Check that the key is within bounds. Both the key and the length of
- // the JSArray are smis. Use unsigned comparison to handle negative keys.
- __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
- key.reg());
- deferred->Branch(below_equal);
-
- // Get the elements array from the receiver and check that it is not a
- // dictionary.
- __ movq(tmp.reg(),
- FieldOperand(receiver.reg(), JSArray::kElementsOffset));
-
- // Check whether it is possible to omit the write barrier. If the elements
- // array is in new space or the value written is a smi we can safely update
- // the elements array without write barrier.
- Label in_new_space;
- __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
- if (!value_is_constant) {
- __ JumpIfNotSmi(result.reg(), deferred->entry_label());
- }
-
- __ bind(&in_new_space);
- // Bind the deferred code patch site to be able to locate the fixed
- // array map comparison. When debugging, we patch this comparison to
- // always fail so that we will hit the IC call in the deferred code
- // which will allow the debugger to break for fast case stores.
- __ bind(deferred->patch_site());
- // Avoid using __ to ensure the distance from patch_site
- // to the map address is always the same.
- masm()->movq(kScratchRegister, Factory::fixed_array_map(),
- RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
-
- // Store the value.
- SmiIndex index =
- masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
- __ movq(FieldOperand(tmp.reg(),
- index.reg,
- index.scale,
- FixedArray::kHeaderSize),
- result.reg());
- __ IncrementCounter(&Counters::keyed_store_inline, 1);
-
- deferred->BindExit();
- } else {
- result = frame()->CallKeyedStoreIC(strict_mode_flag());
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed store.
- __ nop();
- }
- ASSERT(frame()->height() == original_height - 3);
- return result;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>(String::cast(*raw_name->handle()));
- }
-}
-
-
-void Reference::GetValue() {
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
-
- // Record the source position for the property load.
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- break;
- }
-
- case NAMED: {
- Variable* var = expression_->AsVariableProxy()->AsVariable();
- bool is_global = var != NULL;
- ASSERT(!is_global || var->is_global());
- if (persist_after_get_) {
- cgen_->frame()->Dup();
- }
- Result result = cgen_->EmitNamedLoad(GetName(), is_global);
- cgen_->frame()->Push(&result);
- break;
- }
-
- case KEYED: {
- // A load of a bare identifier (load from global) cannot be keyed.
- ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
- if (persist_after_get_) {
- cgen_->frame()->PushElementAt(1);
- cgen_->frame()->PushElementAt(1);
- }
- Result value = cgen_->EmitKeyedLoad();
- cgen_->frame()->Push(&value);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- if (!persist_after_get_) {
- set_unloaded();
- }
-}
-
-
-void Reference::TakeValue() {
- // TODO(X64): This function is completely architecture independent. Move
- // it somewhere shared.
-
- // For non-constant frame-allocated slots, we invalidate the value in the
- // slot. For all others, we fall back on GetValue.
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(!is_illegal());
- if (type_ != SLOT) {
- GetValue();
- return;
- }
-
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP ||
- slot->type() == Slot::CONTEXT ||
- slot->var()->mode() == Variable::CONST ||
- slot->is_arguments()) {
- GetValue();
- return;
- }
-
- // Only non-constant, frame-allocated parameters and locals can reach
- // here. Be careful not to use the optimizations for arguments
- // object access since it may not have been initialized yet.
- ASSERT(!slot->is_arguments());
- if (slot->type() == Slot::PARAMETER) {
- cgen_->frame()->TakeParameterAt(slot->index());
- } else {
- ASSERT(slot->type() == Slot::LOCAL);
- cgen_->frame()->TakeLocalAt(slot->index());
- }
-
- ASSERT(persist_after_get_);
- // Do not unload the reference, because it is used in SetValue.
-}
-
-
-void Reference::SetValue(InitState init_state) {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- cgen_->StoreToSlot(slot, init_state);
- set_unloaded();
- break;
- }
-
- case NAMED: {
- Comment cmnt(masm, "[ Store to named Property");
- Result answer = cgen_->EmitNamedStore(GetName(), false);
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
- }
-
- case KEYED: {
- Comment cmnt(masm, "[ Store to keyed Property");
- Property* property = expression()->AsProperty();
- ASSERT(property != NULL);
-
- Result answer = cgen_->EmitKeyedStore(property->key()->type());
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
- }
-
- case UNLOADED:
- case ILLEGAL:
- UNREACHABLE();
- }
-}
-
-
-Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
- Result* left,
- Result* right) {
- if (stub->ArgsInRegistersSupported()) {
- stub->SetArgsInRegisters();
- return frame_->CallStub(stub, left, right);
- } else {
- frame_->Push(left);
- frame_->Push(right);
- return frame_->CallStub(stub, 2);
- }
-}
-
-#undef __
-
#define __ masm.
#ifdef _WIN64
@@ -8747,7 +58,7 @@ ModuloFunction CreateModuloFunction() {
&actual_size,
true));
CHECK(buffer);
- Assembler masm(buffer, static_cast<int>(actual_size));
+ Assembler masm(NULL, buffer, static_cast<int>(actual_size));
// Generated code is put into a fixed, unmovable, buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
@@ -8821,7 +132,7 @@ ModuloFunction CreateModuloFunction() {
CodeDesc desc;
masm.GetCode(&desc);
- // Call the function from C++.
+ // Call the function from C++ through this pointer.
return FUNCTION_CAST<ModuloFunction>(buffer);
}
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 439282919..94c785028 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -30,270 +30,17 @@
#include "ast.h"
#include "ic-inl.h"
-#include "jump-target-heavy.h"
namespace v8 {
namespace internal {
// Forward declarations
class CompilationInfo;
-class DeferredCode;
-class RegisterAllocator;
-class RegisterFile;
-enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame. The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Like GetValue except that the slot is expected to be written to before
- // being read from again. The value of the reference may be invalidated,
- // causing subsequent attempts to read it to fail.
- void TakeValue();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state);
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Control destinations.
-
-// A control destination encapsulates a pair of jump targets and a
-// flag indicating which one is the preferred fall-through. The
-// preferred fall-through must be unbound, the other may be already
-// bound (ie, a backward target).
-//
-// The true and false targets may be jumped to unconditionally or
-// control may split conditionally. Unconditional jumping and
-// splitting should be emitted in tail position (as the last thing
-// when compiling an expression) because they can cause either label
-// to be bound or the non-fall through to be jumped to leaving an
-// invalid virtual frame.
-//
-// The labels in the control destination can be extracted and
-// manipulated normally without affecting the state of the
-// destination.
-
-class ControlDestination BASE_EMBEDDED {
- public:
- ControlDestination(JumpTarget* true_target,
- JumpTarget* false_target,
- bool true_is_fall_through)
- : true_target_(true_target),
- false_target_(false_target),
- true_is_fall_through_(true_is_fall_through),
- is_used_(false) {
- ASSERT(true_is_fall_through ? !true_target->is_bound()
- : !false_target->is_bound());
- }
-
- // Accessors for the jump targets. Directly jumping or branching to
- // or binding the targets will not update the destination's state.
- JumpTarget* true_target() const { return true_target_; }
- JumpTarget* false_target() const { return false_target_; }
-
- // True if the the destination has been jumped to unconditionally or
- // control has been split to both targets. This predicate does not
- // test whether the targets have been extracted and manipulated as
- // raw jump targets.
- bool is_used() const { return is_used_; }
-
- // True if the destination is used and the true target (respectively
- // false target) was the fall through. If the target is backward,
- // "fall through" included jumping unconditionally to it.
- bool true_was_fall_through() const {
- return is_used_ && true_is_fall_through_;
- }
-
- bool false_was_fall_through() const {
- return is_used_ && !true_is_fall_through_;
- }
-
- // Emit a branch to one of the true or false targets, and bind the
- // other target. Because this binds the fall-through target, it
- // should be emitted in tail position (as the last thing when
- // compiling an expression).
- void Split(Condition cc) {
- ASSERT(!is_used_);
- if (true_is_fall_through_) {
- false_target_->Branch(NegateCondition(cc));
- true_target_->Bind();
- } else {
- true_target_->Branch(cc);
- false_target_->Bind();
- }
- is_used_ = true;
- }
-
- // Emit an unconditional jump in tail position, to the true target
- // (if the argument is true) or the false target. The "jump" will
- // actually bind the jump target if it is forward, jump to it if it
- // is backward.
- void Goto(bool where) {
- ASSERT(!is_used_);
- JumpTarget* target = where ? true_target_ : false_target_;
- if (target->is_bound()) {
- target->Jump();
- } else {
- target->Bind();
- }
- is_used_ = true;
- true_is_fall_through_ = where;
- }
-
- // Mark this jump target as used as if Goto had been called, but
- // without generating a jump or binding a label (the control effect
- // should have already happened). This is used when the left
- // subexpression of the short-circuit boolean operators are
- // compiled.
- void Use(bool where) {
- ASSERT(!is_used_);
- ASSERT((where ? true_target_ : false_target_)->is_bound());
- is_used_ = true;
- true_is_fall_through_ = where;
- }
-
- // Swap the true and false targets but keep the same actual label as
- // the fall through. This is used when compiling negated
- // expressions, where we want to swap the targets but preserve the
- // state.
- void Invert() {
- JumpTarget* temp_target = true_target_;
- true_target_ = false_target_;
- false_target_ = temp_target;
-
- true_is_fall_through_ = !true_is_fall_through_;
- }
-
- private:
- // True and false jump targets.
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-
- // Before using the destination: true if the true target is the
- // preferred fall through, false if the false target is. After
- // using the destination: true if the true target was actually used
- // as the fall through, false if the false target was.
- bool true_is_fall_through_;
-
- // True if the Split or Goto functions have been called.
- bool is_used_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the jump target pair). It is threaded through
-// the call stack. Constructing a state implicitly pushes it on the owning
-// code generator's stack of states, and destroying one implicitly pops it.
-//
-// The code generator state is only used for expressions, so statements have
-// the initial state.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
- // Create a code generator state based on a code generator's current
- // state. The new state has its own control destination.
- CodeGenState(CodeGenerator* owner, ControlDestination* destination);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- ~CodeGenState();
-
- // Accessors for the state.
- ControlDestination* destination() const { return destination_; }
-
- private:
- // The owning code generator.
- CodeGenerator* owner_;
-
- // A control destination in case the expression has a control-flow
- // effect.
- ControlDestination* destination_;
-
- // The previous state of the owning code generator, restored when
- // this state is destroyed.
- CodeGenState* previous_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
- NO_ARGUMENTS_ALLOCATION,
- EAGER_ARGUMENTS_ALLOCATION,
- LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
@@ -319,428 +66,7 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
-
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
-
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
- bool in_spilled_code() const { return in_spilled_code_; }
- void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
private:
- // Type of a member function that generates inline code for a native function.
- typedef void (CodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- // Construction/Destruction
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors
- inline bool is_eval();
- inline Scope* scope();
- inline StrictModeFlag strict_mode_flag();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- // State
- ControlDestination* destination() const { return state_->destination(); }
-
- // Track loop nesting level.
- int loop_nesting() const { return loop_nesting_; }
- void IncrementLoopNesting() { loop_nesting_++; }
- void DecrementLoopNesting() { loop_nesting_--; }
-
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
- virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Visit a statement and then spill the virtual frame if control flow can
- // reach the end of the statement (ie, it does not exit via break,
- // continue, return, or throw). This function is used temporarily while
- // the code generator is being transformed.
- void VisitAndSpill(Statement* statement);
-
- // Visit a list of statements and then spill the virtual frame if control
- // flow can reach the end of the list.
- void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // Generate the return sequence code. Should be called no more than
- // once per compiled function, immediately after binding the return
- // target (which can not be done more than once).
- void GenerateReturnSequence(Result* return_value);
-
- // Generate code for a fast smi loop.
- void GenerateFastSmiLoop(ForStatement* node);
-
- // Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode();
-
- // Store the arguments object and allocate it if necessary.
- Result StoreArgumentsObject(bool initial);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
- void UnloadReference(Reference* ref);
-
- Operand SlotOperand(Slot* slot, Register tmp);
-
- Operand ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow);
-
- // Expressions
- void LoadCondition(Expression* x,
- ControlDestination* destination,
- bool force_control);
- void Load(Expression* expr);
- void LoadGlobal();
- void LoadGlobalReceiver();
-
- // Generate code to push the value of an expression on top of the frame
- // and then spill the frame fully to memory. This function is used
- // temporarily while the code generator is being transformed.
- void LoadAndSpill(Expression* expression);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
- Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow);
-
- // Support for loading from local/global variables and arguments
- // whose location is known unless they are shadowed by
- // eval-introduced bindings. Generates no code for unsupported slot
- // types and therefore expects to fall through to the slow jump target.
- void EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done);
-
- // Store the value on top of the expression stack into a slot, leaving the
- // value in place.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- // Support for compiling assignment expressions.
- void EmitSlotAssignment(Assignment* node);
- void EmitNamedPropertyAssignment(Assignment* node);
- void EmitKeyedPropertyAssignment(Assignment* node);
-
- // Receiver is passed on the frame and not consumed.
- Result EmitNamedLoad(Handle<String> name, bool is_contextual);
-
- // If the store is contextual, value is passed on the frame and consumed.
- // Otherwise, receiver and value are passed on the frame and consumed.
- Result EmitNamedStore(Handle<String> name, bool is_contextual);
-
- // Load a property of an object, returning it in a Result.
- // The object and the property name are passed on the stack, and
- // not changed.
- Result EmitKeyedLoad();
-
- // Receiver, key, and value are passed on the frame and consumed.
- Result EmitKeyedStore(StaticType* key_type);
-
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
-
- // Translate the value on top of the frame into control flow to the
- // control destination.
- void ToBoolean(ControlDestination* destination);
-
- // Generate code that computes a shortcutting logical operation.
- void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
- void GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode);
-
- // Generate a stub call from the virtual frame.
- Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
- Result* left,
- Result* right);
-
- // Emits code sequence that jumps to a JumpTarget if the inputs
- // are both smis. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
- void JumpIfBothSmiUsingTypeInfo(Result* left,
- Result* right,
- JumpTarget* both_smi);
-
- // Emits code sequence that jumps to deferred code if the input
- // is not a smi. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
- void JumpIfNotSmiUsingTypeInfo(Register reg,
- TypeInfo type,
- DeferredCode* deferred);
-
- // Emits code sequence that jumps to deferred code if the inputs
- // are not both smis. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
- void JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred);
-
- // If possible, combine two constant smi values using op to produce
- // a smi result, and push it on the virtual frame, all at compile time.
- // Returns true if it succeeds. Otherwise it has no effect.
- bool FoldConstantSmis(Token::Value op, int left, int right);
-
- // Emit code to perform a binary operation on a constant
- // smi and a likely smi. Consumes the Result *operand.
- Result ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> constant_operand,
- bool reversed,
- OverwriteMode overwrite_mode);
-
- // Emit code to perform a binary operation on two likely smis.
- // The code to handle smi arguments is produced inline.
- // Consumes the Results *left and *right.
- Result LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
-
- void Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* destination);
-
- // If at least one of the sides is a constant smi, generate optimized code.
- void ConstantSmiComparison(Condition cc,
- bool strict,
- ControlDestination* destination,
- Result* left_side,
- Result* right_side,
- bool left_side_constant_smi,
- bool right_side_constant_smi,
- bool is_loop_condition);
-
- void GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest);
-
- // To prevent long attacker-controlled byte sequences, integer constants
- // from the JavaScript source are loaded in two parts if they are larger
- // than 16 bits.
- static const int kMaxSmiInlinedBits = 16;
- bool IsUnsafeSmi(Handle<Object> value);
- // Load an integer constant x into a register target using
- // at most 16 bits of user-controlled data per assembly operation.
- void LoadUnsafeSmi(Register target, Handle<Object> value);
-
- void CallWithArguments(ZoneList<Expression*>* arguments,
- CallFunctionFlags flags,
- int position);
-
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position);
-
- void CheckStack();
-
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function based on the shared function info.
- void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
- bool pretenure);
-
- // Support for type checks.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharAt(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- void GenerateGetFramePointer(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- // Fast support for StringAdd.
- void GenerateStringAdd(ZoneList<Expression*>* args);
-
- // Fast support for SubString.
- void GenerateSubString(ZoneList<Expression*>* args);
-
- // Fast support for StringCompare.
- void GenerateStringCompare(ZoneList<Expression*>* args);
-
- // Support for direct calls from JavaScript to native RegExp code.
- void GenerateRegExpExec(ZoneList<Expression*>* args);
-
- void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
- // Support for fast native caches.
- void GenerateGetFromCache(ZoneList<Expression*>* args);
-
- // Fast support for number to string.
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast swapping of elements. Takes three expressions, the object and two
- // indices. This should only be used if the indices are known to be
- // non-negative and within bounds of the elements array at the call site.
- void GenerateSwapElements(ZoneList<Expression*>* args);
-
- // Fast call for custom callbacks.
- void GenerateCallFunction(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
- void GenerateMathLog(ZoneList<Expression*>* args);
-
- // Check whether two RegExps are equivalent.
- void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
- void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* node);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
- void SetTypeForStackSlot(Slot* slot, TypeInfo info);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block. There should
- // be no frame-external references to (non-reserved) registers.
- bool HasValidEntryRegisters();
-#endif
-
- ZoneList<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- CodeGenState* state_;
- int loop_nesting_;
-
- // Jump targets.
- // The target of the return from the function.
- BreakTarget function_return_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- // True when we are in code that expects the virtual frame to be fully
- // spilled. Some virtual frame function are disabled in DEBUG builds when
- // called from spilled code, because they do not leave the virtual frame
- // in a spilled state.
- bool in_spilled_code_;
-
- friend class VirtualFrame;
- friend class JumpTarget;
- friend class Reference;
- friend class Result;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
-
- friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
-
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index 3ff292e82..e637ba124 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -42,10 +42,12 @@ namespace v8 {
namespace internal {
void CPU::Setup() {
- CpuFeatures::Probe(true);
- if (Serializer::enabled()) {
- V8::DisableCrankshaft();
- }
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return true; // Yay!
}
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 2c50ddd14..423e6f244 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -29,7 +29,8 @@
#if defined(V8_TARGET_ARCH_X64)
-#include "codegen-inl.h"
+#include "assembler.h"
+#include "codegen.h"
#include "debug.h"
@@ -49,7 +50,8 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >=
Assembler::kCallInstructionLength);
- rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
+ rinfo()->PatchCodeWithCall(
+ Isolate::Current()->debug()->debug_break_return()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
@@ -79,7 +81,7 @@ bool BreakLocationIterator::IsDebugBreakAtSlot() {
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCodeWithCall(
- Debug::debug_break_slot()->entry(),
+ Isolate::Current()->debug()->debug_break_slot()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
}
@@ -128,7 +130,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ Set(rax, 0); // No arguments (argc == 0).
- __ movq(rbx, ExternalReference::debug_break());
+ __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
CEntryStub ceb(1);
__ CallStub(&ceb);
@@ -167,7 +169,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget());
+ ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
__ movq(kScratchRegister, after_break_target);
__ jmp(Operand(kScratchRegister, 0));
}
@@ -283,7 +285,8 @@ void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
- ExternalReference(Debug_Address::RestarterFrameFunctionPointer());
+ ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
+ masm->isolate());
__ movq(rax, restarter_frame_function_slot);
__ movq(Operand(rax, 0), Immediate(0));
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 61f438110..abac2b6b3 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -107,6 +107,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
@@ -191,8 +192,9 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- node->set_next(deoptimizing_code_list_);
- deoptimizing_code_list_ = node;
+ DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+ node->set_next(data->deoptimizing_code_list_);
+ data->deoptimizing_code_list_ = node;
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -201,6 +203,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+#ifdef DEBUG
+ if (FLAG_print_code) {
+ code->PrintLn();
+ }
+#endif
}
}
@@ -354,13 +361,32 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
- for (int i = 0; ok && i < 4; i++) {
+ for (int i = StandardFrameConstants::kCallerPCOffset;
+ ok && i >= StandardFrameConstants::kMarkerOffset;
+ i -= kPointerSize) {
intptr_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
- PrintF(" [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] (fixed part)\n",
+ const char* name = "UNKNOWN";
+ switch (i) {
+ case StandardFrameConstants::kCallerPCOffset:
+ name = "caller's pc";
+ break;
+ case StandardFrameConstants::kCallerFPOffset:
+ name = "fp";
+ break;
+ case StandardFrameConstants::kContextOffset:
+ name = "context";
+ break;
+ case StandardFrameConstants::kMarkerOffset:
+ name = "function";
+ break;
+ }
+ PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] "
+ "(fixed part - %s)\n",
output_offset,
input_value,
- input_offset);
+ input_offset,
+ name);
}
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
@@ -387,7 +413,8 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
- Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+ Code* continuation =
+ function->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
@@ -559,8 +586,8 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Set the continuation for the topmost frame.
if (is_topmost) {
Code* continuation = (bailout_type_ == EAGER)
- ? Builtins::builtin(Builtins::NotifyDeoptimized)
- : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+ ? isolate_->builtins()->builtin(Builtins::kNotifyDeoptimized)
+ : isolate_->builtins()->builtin(Builtins::kNotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
@@ -573,7 +600,6 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
- CpuFeatures::Scope scope(SSE2);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -636,21 +662,26 @@ void Deoptimizer::EntryGenerator::Generate() {
__ neg(arg5);
// Allocate a new deoptimizer object.
- __ PrepareCallCFunction(5);
+ __ PrepareCallCFunction(6);
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(arg1, rax);
- __ movq(arg2, Immediate(type()));
+ __ Set(arg2, type());
// Args 3 and 4 are already in the right registers.
- // On windows put the argument on the stack (PrepareCallCFunction have
- // created space for this). On linux pass the argument in r8.
+ // On windows put the arguments on the stack (PrepareCallCFunction
+ // has created space for this). On linux pass the arguments in r8 and r9.
#ifdef _WIN64
- __ movq(Operand(rsp, 0 * kPointerSize), arg5);
+ __ movq(Operand(rsp, 4 * kPointerSize), arg5);
+ __ LoadAddress(arg5, ExternalReference::isolate_address());
+ __ movq(Operand(rsp, 5 * kPointerSize), arg5);
#else
__ movq(r8, arg5);
+ __ LoadAddress(r9, ExternalReference::isolate_address());
#endif
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
+ Isolate* isolate = masm()->isolate();
+
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
__ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
@@ -693,9 +724,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// Compute the output frame in the deoptimizer.
__ push(rax);
- __ PrepareCallCFunction(1);
+ __ PrepareCallCFunction(2);
__ movq(arg1, rax);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ __ LoadAddress(arg2, ExternalReference::isolate_address());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 2);
__ pop(rax);
// Replace the current frame with the output frames.
@@ -753,12 +786,8 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(r13, roots_address);
-
- __ movq(kSmiConstantRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE);
+ __ InitializeRootRegister();
+ __ InitializeSmiConstantRegister();
// Return to the continuation point.
__ ret(0);
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 21a100f59..14c95bc5a 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -269,6 +269,7 @@ void InstructionTable::AddJumpConditionalShort() {
static InstructionTable instruction_table;
+
static InstructionDesc cmov_instructions[16] = {
{"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
{"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
@@ -451,9 +452,11 @@ void DisassemblerX64::AppendToBuffer(const char* format, ...) {
int DisassemblerX64::PrintRightOperandHelper(
byte* modrmp,
- RegisterNameMapping register_name) {
+ RegisterNameMapping direct_register_name) {
int mod, regop, rm;
get_modrm(*modrmp, &mod, &regop, &rm);
+ RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
+ &DisassemblerX64::NameOfCPURegister;
switch (mod) {
case 0:
if ((rm & 7) == 5) {
@@ -649,6 +652,9 @@ int DisassemblerX64::PrintImmediateOp(byte* data) {
case 2:
mnem = "adc";
break;
+ case 3:
+ mnem = "sbb";
+ break;
case 4:
mnem = "and";
break;
@@ -1015,12 +1021,26 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightOperand(current);
AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
current += 1;
+ } else if (third_byte == 0x0b) {
+ get_modrm(*current, &mod, &regop, &rm);
+ // roundsd xmm, xmm/m64, imm8
+ AppendToBuffer("roundsd %s, ", NameOfCPURegister(regop));
+ current += PrintRightOperand(current);
+ AppendToBuffer(", %d", (*current) & 3);
+ current += 1;
} else {
UnimplementedInstruction();
}
} else {
get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x6E) {
+ if (opcode == 0x28) {
+ AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x29) {
+ AppendToBuffer("movapd ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ } else if (opcode == 0x6E) {
AppendToBuffer("mov%c %s,",
rex_w() ? 'q' : 'd',
NameOfXMMRegister(regop));
@@ -1028,7 +1048,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x6F) {
AppendToBuffer("movdqa %s,",
NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
+ current += PrintRightXMMOperand(current);
} else if (opcode == 0x7E) {
AppendToBuffer("mov%c ",
rex_w() ? 'q' : 'd');
@@ -1036,13 +1056,18 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer(", %s", NameOfXMMRegister(regop));
} else if (opcode == 0x7F) {
AppendToBuffer("movdqa ");
- current += PrintRightOperand(current);
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ } else if (opcode == 0xD6) {
+ AppendToBuffer("movq ");
+ current += PrintRightXMMOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ } else if (opcode == 0x50) {
+ AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
} else {
const char* mnemonic = "?";
- if (opcode == 0x50) {
- mnemonic = "movmskpd";
- } else if (opcode == 0x54) {
+ if (opcode == 0x54) {
mnemonic = "andpd";
} else if (opcode == 0x56) {
mnemonic = "orpd";
@@ -1068,11 +1093,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
if (opcode == 0x11) {
- current += PrintRightOperand(current);
+ current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else {
AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
+ current += PrintRightXMMOperand(current);
}
} else if (opcode == 0x2A) {
// CVTSI2SD: integer to XMM double conversion.
@@ -1139,6 +1164,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x7E) {
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movq %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
} else {
UnimplementedInstruction();
}
@@ -1156,6 +1186,22 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 4;
} // else no immediate displacement.
AppendToBuffer("nop");
+
+ } else if (opcode == 0x28) {
+ // movaps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movaps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x29) {
+ // movaps xmm/m128, xmm
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movaps ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %s", NameOfXMMRegister(regop));
+
} else if (opcode == 0xA2 || opcode == 0x31) {
// RDTSC or CPUID
AppendToBuffer("%s", mnemonic);
@@ -1167,6 +1213,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte_size_operand_ = idesc.byte_size_operation;
current += PrintOperands(idesc.mnem, idesc.op_order_, current);
+ } else if (opcode == 0x57) {
+ // xorps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
} else if ((opcode & 0xF0) == 0x80) {
// Jcc: Conditional jump (branch).
current = data + JumpConditional(data);
@@ -1435,19 +1488,26 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
{
bool is_byte = *data == 0xC6;
data++;
-
- AppendToBuffer("mov%c ", is_byte ? 'b' : operand_size_code());
- data += PrintRightOperand(data);
- int32_t imm = is_byte ? *data : *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += is_byte ? 1 : 4;
+ if (is_byte) {
+ AppendToBuffer("movb ");
+ data += PrintRightByteOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ } else {
+ AppendToBuffer("mov%c ", operand_size_code());
+ data += PrintRightOperand(data);
+ int32_t imm = *reinterpret_cast<int32_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 4;
+ }
}
break;
case 0x80: {
data++;
AppendToBuffer("cmpb ");
- data += PrintRightOperand(data);
+ data += PrintRightByteOperand(data);
int32_t imm = *data;
AppendToBuffer(",0x%x", imm);
data++;
@@ -1461,9 +1521,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
data++;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("mov%c ", is_byte ? 'b' : operand_size_code());
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
+ if (is_byte) {
+ AppendToBuffer("movb ");
+ data += PrintRightByteOperand(data);
+ AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+ } else {
+ AppendToBuffer("mov%c ", operand_size_code());
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ }
}
break;
@@ -1486,20 +1552,51 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
}
break;
-
+ case 0xB0:
+ case 0xB1:
+ case 0xB2:
+ case 0xB3:
+ case 0xB4:
+ case 0xB5:
+ case 0xB6:
+ case 0xB7:
+ case 0xB8:
+ case 0xB9:
+ case 0xBA:
+ case 0xBB:
+ case 0xBC:
+ case 0xBD:
+ case 0xBE:
+ case 0xBF: {
+ // mov reg8,imm8 or mov reg32,imm32
+ byte opcode = *data;
+ data++;
+ bool is_32bit = (opcode >= 0xB8);
+ int reg = (opcode & 0x7) | (rex_b() ? 8 : 0);
+ if (is_32bit) {
+ AppendToBuffer("mov%c %s, ",
+ operand_size_code(),
+ NameOfCPURegister(reg));
+ data += PrintImmediate(data, DOUBLEWORD_SIZE);
+ } else {
+ AppendToBuffer("movb %s, ",
+ NameOfByteCPURegister(reg));
+ data += PrintImmediate(data, BYTE_SIZE);
+ }
+ break;
+ }
case 0xFE: {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
if (regop == 1) {
AppendToBuffer("decb ");
- data += PrintRightOperand(data);
+ data += PrintRightByteOperand(data);
} else {
UnimplementedInstruction();
}
- }
break;
-
+ }
case 0x68:
AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
data += 5;
@@ -1652,9 +1749,8 @@ static const char* xmm_regs[16] = {
const char* NameConverter::NameOfAddress(byte* addr) const {
- static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
- v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
- return tmp_buffer.start();
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
}
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 81be81919..b14267c82 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -99,7 +99,7 @@ class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kSavedRegistersOffset = +2 * kPointerSize;
+ static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 60b77b5bf..4283f1be6 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
@@ -44,6 +44,12 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+static unsigned GetPropertyId(Property* property) {
+ if (property->is_synthetic()) return AstNode::kNoNumber;
+ return property->id();
+}
+
+
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm)
@@ -57,14 +63,18 @@ class JumpPatchSite BASE_EMBEDDED {
ASSERT(patch_site_.is_bound() == info_emitted_);
}
- void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+ void EmitJumpIfNotSmi(Register reg,
+ Label* target,
+ Label::Distance near_jump = Label::kFar) {
__ testb(reg, Immediate(kSmiTagMask));
- EmitJump(not_carry, target); // Always taken before patched.
+ EmitJump(not_carry, target, near_jump); // Always taken before patched.
}
- void EmitJumpIfSmi(Register reg, NearLabel* target) {
+ void EmitJumpIfSmi(Register reg,
+ Label* target,
+ Label::Distance near_jump = Label::kFar) {
__ testb(reg, Immediate(kSmiTagMask));
- EmitJump(carry, target); // Never taken before patched.
+ EmitJump(carry, target, near_jump); // Never taken before patched.
}
void EmitPatchInfo() {
@@ -80,11 +90,11 @@ class JumpPatchSite BASE_EMBEDDED {
private:
// jc will be patched with jz, jnc will become jnz.
- void EmitJump(Condition cc, NearLabel* target) {
+ void EmitJump(Condition cc, Label* target, Label::Distance near_jump) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
ASSERT(cc == carry || cc == not_carry);
__ bind(&patch_site_);
- __ j(cc, target);
+ __ j(cc, target, near_jump);
}
MacroAssembler* masm_;
@@ -120,6 +130,22 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ int3();
}
#endif
+
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). rcx is zero for method calls and non-zero for
+ // function calls.
+ if (info->is_strict_mode() || info->is_native()) {
+ Label ok;
+ __ testq(rcx, rcx);
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+ __ bind(&ok);
+ }
+
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
@@ -149,7 +175,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in both rax and rsi. It replaces the context
@@ -198,13 +224,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ ArgumentsAccessStub stub(
+ is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
+ : ArgumentsAccessStub::NEW_NON_STRICT_SLOW);
__ CallStub(&stub);
- // Store new arguments object in both "arguments" and ".arguments" slots.
- __ movq(rcx, rax);
+
Move(arguments->AsSlot(), rax, rbx, rdx);
- Slot* dot_arguments_slot = scope()->arguments_shadow()->AsSlot();
- Move(dot_arguments_slot, rcx, rbx, rdx);
}
if (FLAG_trace) {
@@ -227,10 +252,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- NearLabel ok;
+ PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
+ __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
@@ -259,9 +284,9 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
Comment cmnt(masm_, "[ Stack check");
- NearLabel ok;
+ Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
+ __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
@@ -346,7 +371,7 @@ void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
codegen()->Move(result_register(), slot);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -379,7 +404,7 @@ void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
if (true_label_ != fall_through_) __ jmp(true_label_);
} else {
__ LoadRoot(result_register(), index);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -424,7 +449,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
} else {
// For simplicity we always test the accumulator register.
__ Move(result_register(), lit);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -460,7 +485,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
__ Drop(count);
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -474,12 +499,12 @@ void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
void FullCodeGenerator::AccumulatorValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
- NearLabel done;
+ Label done;
__ bind(materialize_true);
- __ Move(result_register(), Factory::true_value());
- __ jmp(&done);
+ __ Move(result_register(), isolate()->factory()->true_value());
+ __ jmp(&done, Label::kNear);
__ bind(materialize_false);
- __ Move(result_register(), Factory::false_value());
+ __ Move(result_register(), isolate()->factory()->false_value());
__ bind(&done);
}
@@ -487,12 +512,12 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
- NearLabel done;
+ Label done;
__ bind(materialize_true);
- __ Push(Factory::true_value());
- __ jmp(&done);
+ __ Push(isolate()->factory()->true_value());
+ __ jmp(&done, Label::kNear);
__ bind(materialize_false);
- __ Push(Factory::false_value());
+ __ Push(isolate()->factory()->false_value());
__ bind(&done);
}
@@ -535,28 +560,14 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const {
}
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
Label* if_false,
Label* fall_through) {
- // Emit the inlined tests assumed by the stub.
- __ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
- __ j(equal, if_false);
- __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
- __ j(equal, if_true);
- __ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
- __ j(equal, if_false);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiCompare(result_register(), Smi::FromInt(0));
- __ j(equal, if_false);
- Condition is_smi = masm_->CheckSmi(result_register());
- __ j(is_smi, if_true);
-
- // Call the ToBoolean stub for all other cases.
- ToBooleanStub stub;
+ ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub);
- __ testq(rax, rax);
-
+ __ testq(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -627,8 +638,8 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
// preparation to avoid preparing with the same AST id twice.
if (!context()->IsTest() || !info_->IsOptimizable()) return;
- NearLabel skip;
- if (should_normalize) __ jmp(&skip);
+ Label skip;
+ if (should_normalize) __ jmp(&skip, Label::kNear);
ForwardBailoutStack* current = forward_bailout_stack_;
while (current != NULL) {
@@ -669,13 +680,16 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// We bypass the general EmitSlotSearch because we know more about
// this specific context.
- // The variable in the decl always resides in the current context.
+ // The variable in the decl always resides in the current function
+ // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
- __ movq(rbx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
- __ cmpq(rbx, rsi);
- __ Check(equal, "Unexpected declaration in current context.");
+ // Check that we're not inside a with or catch context.
+ __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
+ __ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
+ __ Check(not_equal, "Declaration in with context.");
+ __ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
+ __ Check(not_equal, "Declaration in catch context.");
}
if (mode == Variable::CONST) {
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
@@ -714,31 +728,28 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
}
} else if (prop != NULL) {
- if (function != NULL || mode == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value. We
- // cannot visit the rewrite because it's shared and we risk
- // recording duplicate AST IDs for bailouts from optimized code.
+ // A const declaration aliasing a parameter is an illegal redeclaration.
+ ASSERT(mode != Variable::CONST);
+ if (function != NULL) {
+ // We are declaring a function that rewrites to a property.
+ // Use (keyed) IC to set the initial value. We cannot visit the
+ // rewrite because it's shared and we risk recording duplicate AST
+ // IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
- if (function != NULL) {
- __ push(rax);
- VisitForAccumulatorValue(function);
- __ pop(rdx);
- } else {
- __ movq(rdx, rax);
- __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
- }
+ __ push(rax);
+ VisitForAccumulatorValue(function);
+ __ pop(rdx);
ASSERT(prop->key()->AsLiteral() != NULL &&
prop->key()->AsLiteral()->handle()->IsSmi());
__ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(is_strict()
- ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
}
}
}
@@ -776,7 +787,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
- clause->body_target()->entry_label()->Unuse();
+ clause->body_target()->Unuse();
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
@@ -796,27 +807,27 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
- NearLabel slow_case;
+ Label slow_case;
__ movq(rcx, rdx);
__ or_(rcx, rax);
- patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+ patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpq(rdx, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ __ jmp(clause->body_target());
__ bind(&slow_case);
}
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site);
+ EmitCallIC(ic, &patch_site, clause->CompareId());
__ testq(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ __ jmp(clause->body_target());
}
// Discard the test value and jump to the default if present, otherwise to
@@ -826,14 +837,15 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (default_clause == NULL) {
__ jmp(nested_statement.break_target());
} else {
- __ jmp(default_clause->body_target()->entry_label());
+ __ jmp(default_clause->body_target());
}
// Compile all the case bodies.
for (int i = 0; i < clauses->length(); i++) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target()->entry_label());
+ __ bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
VisitStatements(clause->statements());
}
@@ -864,7 +876,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(rax, &convert);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &done_convert);
__ bind(&convert);
__ push(rax);
@@ -895,9 +907,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// check for an enum cache. Leave the map in rbx for the subsequent
// prototype load.
__ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
- __ cmpq(rdx, empty_descriptor_array_value);
- __ j(equal, &call_runtime);
+ __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
+ __ JumpIfSmi(rdx, &call_runtime);
// Check that there is an enum cache in the non-empty instance
// descriptors (rdx). This is the case if the next enumeration
@@ -906,9 +917,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ JumpIfSmi(rdx, &call_runtime);
// For all objects but the receiver, check that the cache is empty.
- NearLabel check_prototype;
+ Label check_prototype;
__ cmpq(rcx, rax);
- __ j(equal, &check_prototype);
+ __ j(equal, &check_prototype, Label::kNear);
__ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ cmpq(rdx, empty_fixed_array_value);
__ j(not_equal, &call_runtime);
@@ -921,9 +932,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
- NearLabel use_cache;
+ Label use_cache;
__ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- __ jmp(&use_cache);
+ __ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
@@ -933,14 +944,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
- NearLabel fixed_array;
+ Label fixed_array;
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
- __ j(not_equal, &fixed_array);
+ __ j(not_equal, &fixed_array, Label::kNear);
// We got a map in register rax. Get the enumeration cache from it.
__ bind(&use_cache);
- __ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset));
+ __ LoadInstanceDescriptors(rax, rcx);
__ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
__ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -980,10 +991,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check if the expected map still matches that of the enumerable.
// If not, we have to filter the key.
- NearLabel update_each;
+ Label update_each;
__ movq(rcx, Operand(rsp, 4 * kPointerSize));
__ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ j(equal, &update_each);
+ __ j(equal, &update_each, Label::kNear);
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
@@ -991,7 +1002,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(rcx); // Enumerable.
__ push(rbx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ SmiCompare(rax, Smi::FromInt(0));
+ __ Cmp(rax, Smi::FromInt(0));
__ j(equal, loop_statement.continue_target());
__ movq(rbx, rax);
@@ -1035,16 +1046,18 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
// doesn't just get a copy of the existing unoptimized code.
if (!FLAG_always_opt &&
!FLAG_prepare_always_opt &&
+ !pretenure &&
scope()->is_function_scope() &&
- info->num_literals() == 0 &&
- !pretenure) {
- FastNewClosureStub stub;
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
__ Push(info);
__ CallStub(&stub);
} else {
__ push(rsi);
__ Push(info);
- __ Push(pretenure ? Factory::true_value() : Factory::false_value());
+ __ Push(pretenure
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value());
__ CallRuntime(Runtime::kNewClosure, 3);
}
context()->Plug(rax);
@@ -1074,8 +1087,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
__ j(not_equal, slow);
}
// Load next context in chain.
- __ movq(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
@@ -1089,7 +1101,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
if (s != NULL && s->is_eval_scope()) {
// Loop up the context chain. There is no frame effect so it is
// safe to use raw labels here.
- NearLabel next, fast;
+ Label next, fast;
if (!context.is(temp)) {
__ movq(temp, context);
}
@@ -1098,13 +1110,12 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
__ bind(&next);
// Terminate at global context.
__ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
- __ j(equal, &fast);
+ __ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
__ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
- __ movq(temp, ContextOperand(temp, Context::CLOSURE_INDEX));
- __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ movq(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
__ bind(&fast);
}
@@ -1113,11 +1124,11 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
// load IC call.
__ movq(rax, GlobalObjectOperand());
__ Move(rcx, slot->var()->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- EmitCallIC(ic, mode);
+ EmitCallIC(ic, mode, AstNode::kNoNumber);
}
@@ -1136,8 +1147,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
Immediate(0));
__ j(not_equal, slow);
}
- __ movq(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
@@ -1196,8 +1206,9 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
slow));
__ Move(rax, key_literal->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic =
+ isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@@ -1207,20 +1218,19 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
- if (var->is_global() && !var->is_this()) {
+ if (slot == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
__ movq(rax, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
context()->Plug(rax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1239,52 +1249,24 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
context()->Plug(rax);
- } else if (slot != NULL) {
+ } else {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
if (var->mode() == Variable::CONST) {
// Constants may be the hole value if they have not been initialized.
// Unhole them.
- NearLabel done;
+ Label done;
MemOperand slot_operand = EmitSlotSearch(slot, rax);
__ movq(rax, slot_operand);
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ bind(&done);
context()->Plug(rax);
} else {
context()->Plug(slot);
}
-
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- MemOperand object_loc = EmitSlotSearch(object_slot, rax);
- __ movq(rdx, object_loc);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ Move(rax, key_literal->handle());
-
- // Do a keyed property load.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- context()->Plug(rax);
}
}
@@ -1349,7 +1331,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->constant_properties());
- __ Push(Smi::FromInt(expr->fast_elements() ? 1 : 0));
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ Push(Smi::FromInt(flags));
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
@@ -1387,10 +1375,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->handle());
__ movq(rdx, Operand(rsp, 0));
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1422,6 +1410,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ push(Operand(rsp, 0));
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1440,11 +1434,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->constant_elements());
- if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ if (expr->constant_elements()->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
__ CallStub(&stub);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
} else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -1504,7 +1499,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
@@ -1530,55 +1525,38 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case KEYED_PROPERTY: {
if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
- __ push(slot_operand);
- __ Move(rax, property->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ movq(rdx, Operand(rsp, 0));
__ push(rax);
} else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
- __ push(slot_operand);
- __ Push(property->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
}
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
if (expr->is_compound()) {
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
}
}
- // For property compound assignments we need another deoptimization
- // point after the property load.
- if (property != NULL) {
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- }
-
Token::Value op = expr->binary_op();
__ push(rax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
@@ -1589,13 +1567,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr,
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
mode,
expr->target(),
expr->value());
} else {
- EmitBinaryOp(op, mode);
+ EmitBinaryOp(expr->binary_operation(), op, mode);
}
// Deoptimization point in case the binary operation may have side effects.
PrepareForBailout(expr->binary_operation(), TOS_REG);
@@ -1628,19 +1606,19 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode,
Expression* left,
@@ -1648,18 +1626,18 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
// Do combined smi check of the operands. Left operand is on the
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
- NearLabel done, stub_call, smi_case;
+ Label done, stub_call, smi_case;
__ pop(rdx);
__ movq(rcx, rax);
__ or_(rax, rdx);
JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(rax, &smi_case);
+ patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
__ bind(&stub_call);
__ movq(rax, rcx);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
- __ jmp(&done);
+ BinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ jmp(&done, Label::kNear);
__ bind(&smi_case);
switch (op) {
@@ -1700,11 +1678,13 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
OverwriteMode mode) {
__ pop(rdx);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), NULL); // NULL signals no inlined smi code.
+ BinaryOpStub stub(op, mode);
+ // NULL signals no inlined smi code.
+ EmitCallIC(stub.GetCode(), NULL, expr->id());
context()->Plug(rax);
}
@@ -1718,7 +1698,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
@@ -1741,33 +1721,23 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ movq(rdx, rax);
__ pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
break;
}
case KEYED_PROPERTY: {
__ push(rax); // Preserve value.
- if (prop->is_synthetic()) {
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- __ movq(rdx, rax);
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ movq(rcx, rax);
- __ pop(rdx);
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ movq(rcx, rax);
+ __ pop(rdx);
__ pop(rax); // Restore value.
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
break;
}
}
@@ -1778,8 +1748,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->AsSlot() != NULL);
@@ -1790,10 +1758,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// rcx, and the global object on the stack.
__ Move(rcx, var->name());
__ movq(rdx, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(is_strict()
- ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1813,17 +1781,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ j(not_equal, &skip);
__ movq(Operand(rbp, SlotOffset(slot)), rax);
break;
- case Slot::CONTEXT: {
- __ movq(rcx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
- __ movq(rdx, ContextOperand(rcx, slot->index()));
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &skip);
- __ movq(ContextOperand(rcx, slot->index()), rax);
- int offset = Context::SlotOffset(slot->index());
- __ movq(rdx, rax); // Preserve the stored value in eax.
- __ RecordWrite(rcx, offset, rdx, rbx);
- break;
- }
+ case Slot::CONTEXT:
case Slot::LOOKUP:
__ push(rax);
__ push(rsi);
@@ -1893,10 +1851,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
} else {
__ pop(rdx);
}
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1933,10 +1891,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1986,8 +1944,9 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
SetSourcePosition(expr->position());
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, mode);
+ Handle<Code> ic =
+ ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+ EmitCallIC(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -1996,8 +1955,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key,
- RelocInfo::Mode mode) {
+ Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
@@ -2019,9 +1977,10 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
SetSourcePosition(expr->position());
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
+ Handle<Code> ic =
+ ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, mode);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2029,7 +1988,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2041,7 +2000,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+ CallFunctionStub stub(arg_count, in_loop, flags);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2132,7 +2091,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+ CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_IMPLICIT);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2170,18 +2129,21 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// function and receiver and have the slow path jump around this
// code.
if (done.is_linked()) {
- NearLabel call;
- __ jmp(&call);
+ Label call;
+ __ jmp(&call, Label::kNear);
__ bind(&done);
// Push function.
__ push(rax);
- // Push global receiver.
- __ movq(rbx, GlobalObjectOperand());
- __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
- __ bind(&call);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ bind(&call);
}
- EmitCallWithStub(expr);
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot. That object could be the hole if the
+ // receiver is implicitly the global object.
+ EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
} else if (fun->AsProperty() != NULL) {
// Call to an object property.
Property* prop = fun->AsProperty();
@@ -2212,31 +2174,22 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source code position for IC call.
SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
// Push result (function).
__ push(rax);
// Push Global receiver.
__ movq(rcx, GlobalObjectOperand());
__ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- EmitCallWithStub(expr);
+ EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
} else {
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(prop->obj());
}
- EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
+ EmitKeyedCallWithIC(expr, prop->key());
}
}
} else {
- // Call to some other expression. If the expression is an anonymous
- // function literal not called in a loop, mark it as one that should
- // also use the full code generator.
- FunctionLiteral* lit = fun->AsFunctionLiteral();
- if (lit != NULL &&
- lit->name()->Equals(Heap::empty_string()) &&
- loop_depth() == 0) {
- lit->set_try_full_codegen(true);
- }
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(fun);
}
@@ -2244,7 +2197,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ movq(rbx, GlobalObjectOperand());
__ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
// Emit function call.
- EmitCallWithStub(expr);
+ EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
}
#ifdef DEBUG
@@ -2280,7 +2233,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ Set(rax, arg_count);
__ movq(rdi, Operand(rsp, arg_count * kPointerSize));
- Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> construct_builtin =
+ isolate()->builtins()->JSConstructCall();
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
context()->Plug(rax);
}
@@ -2347,9 +2301,9 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
__ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpq(rbx, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
- __ cmpq(rbx, Immediate(LAST_JS_OBJECT_TYPE));
+ __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
@@ -2370,7 +2324,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
@@ -2414,11 +2368,71 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
- // used in a few functions in runtime.js which should not normally be hit by
- // this compiler.
+ if (FLAG_debug_code) __ AbortIfSmi(rax);
+
+ // Check whether this map has already been checked to be safe for default
+ // valueOf.
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rbx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ j(not_zero, if_true);
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
+ __ j(equal, if_false);
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ LoadInstanceDescriptors(rbx, rbx);
+ __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+ // rbx: descriptor array
+ // rcx: length of descriptor array
+ // Calculate the end of the descriptor array.
+ SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
+ __ lea(rcx,
+ Operand(
+ rbx, index.reg, index.scale, FixedArray::kHeaderSize));
+ // Calculate location of the first key name.
+ __ addq(rbx,
+ Immediate(FixedArray::kHeaderSize +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(rdx, FieldOperand(rbx, 0));
+ __ Cmp(rdx, FACTORY->value_of_symbol());
+ __ j(equal, if_false);
+ __ addq(rbx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(rbx, rcx);
+ __ j(not_equal, &loop);
+
+ // Reload map as register rbx was used as temporary above.
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ __ testq(rcx, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+ __ cmpq(rcx,
+ ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ j(not_equal, if_false);
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ jmp(if_true);
+
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ jmp(if_false);
context()->Plug(if_true, if_false);
}
@@ -2502,15 +2516,15 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
- __ SmiCompare(Operand(rax, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker);
__ movq(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
- __ SmiCompare(Operand(rax, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
+ __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2558,15 +2572,15 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
- NearLabel exit;
+ Label exit;
// Get the number of formal parameters.
__ Move(rax, Smi::FromInt(scope()->num_parameters()));
// Check if the calling frame is an arguments adaptor frame.
__ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &exit);
+ __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &exit, Label::kNear);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame.
@@ -2589,16 +2603,18 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); // Map is now in rax.
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
+ // Map is now in rax.
__ j(below, &null);
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
- __ j(equal, &function);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &function);
// Check if the constructor in the map is a function.
__ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
@@ -2613,12 +2629,12 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Functions have class 'Function'.
__ bind(&function);
- __ Move(rax, Factory::function_class_symbol());
+ __ Move(rax, isolate()->factory()->function_class_symbol());
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ Move(rax, Factory::Object_symbol());
+ __ Move(rax, isolate()->factory()->Object_symbol());
__ jmp(&done);
// Non-JS objects have class null.
@@ -2672,8 +2688,13 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
// Return a random uint32 number in rax.
// The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
- __ PrepareCallCFunction(0);
- __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+ __ PrepareCallCFunction(1);
+#ifdef _WIN64
+ __ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+ __ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// Convert 32 random bits in rax to 0.(32 random bits) in a double
// by computing:
@@ -2682,7 +2703,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ movd(xmm1, rcx);
__ movd(xmm0, rax);
__ cvtss2sd(xmm1, xmm1);
- __ xorpd(xmm0, xmm1);
+ __ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1);
__ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
@@ -2967,17 +2988,17 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() >= 2);
- int arg_count = args->length() - 2; // For receiver and function.
- VisitForStackValue(args->at(0)); // Receiver.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i + 1));
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
}
- VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
+ VisitForAccumulatorValue(args->last()); // Function.
- // InvokeFunction requires function in rdi. Move it in there.
- if (!result_register().is(rdi)) __ movq(rdi, result_register());
+ // InvokeFunction requires the function in rdi. Move it in there.
+ __ movq(rdi, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION);
+ __ InvokeFunction(rdi, count, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
}
@@ -3010,8 +3031,8 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
// Fetch the map and check if array is in fast case.
// Check that object doesn't require security checks and
// has no indexed interceptor.
- __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp);
- __ j(below, &slow_case);
+ __ CmpObjectType(object, JS_ARRAY_TYPE, temp);
+ __ j(not_equal, &slow_case);
__ testb(FieldOperand(temp, Map::kBitFieldOffset),
Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
__ j(not_zero, &slow_case);
@@ -3077,7 +3098,7 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
+ isolate()->global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3098,7 +3119,7 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
__ movq(cache,
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
- NearLabel done, not_found;
+ Label done, not_found;
// tmp now holds finger offset as a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
@@ -3108,12 +3129,12 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
index.reg,
index.scale,
FixedArray::kHeaderSize));
- __ j(not_equal, &not_found);
+ __ j(not_equal, &not_found, Label::kNear);
__ movq(rax, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize + kPointerSize));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&not_found);
// Call runtime to perform the lookup.
@@ -3137,27 +3158,27 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
VisitForAccumulatorValue(args->at(1));
__ pop(left);
- NearLabel done, fail, ok;
+ Label done, fail, ok;
__ cmpq(left, right);
- __ j(equal, &ok);
+ __ j(equal, &ok, Label::kNear);
// Fail if either is a non-HeapObject.
Condition either_smi = masm()->CheckEitherSmi(left, right, tmp);
- __ j(either_smi, &fail);
- __ j(zero, &fail);
+ __ j(either_smi, &fail, Label::kNear);
+ __ j(zero, &fail, Label::kNear);
__ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
__ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
Immediate(JS_REGEXP_TYPE));
- __ j(not_equal, &fail);
+ __ j(not_equal, &fail, Label::kNear);
__ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail);
+ __ j(not_equal, &fail, Label::kNear);
__ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
__ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok);
+ __ j(equal, &ok, Label::kNear);
__ bind(&fail);
- __ Move(rax, Factory::false_value());
- __ jmp(&done);
+ __ Move(rax, isolate()->factory()->false_value());
+ __ jmp(&done, Label::kNear);
__ bind(&ok);
- __ Move(rax, Factory::true_value());
+ __ Move(rax, isolate()->factory()->true_value());
__ bind(&done);
context()->Plug(rax);
@@ -3203,7 +3224,286 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- context()->Plug(Heap::kUndefinedValueRootIndex);
+ Label bailout, return_result, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+ ASSERT(args->length() == 2);
+ // We will leave the separator on the stack until the end of the function.
+ VisitForStackValue(args->at(1));
+ // Load this to rax (= array)
+ VisitForAccumulatorValue(args->at(0));
+ // All aliases of the same register have disjoint lifetimes.
+ Register array = rax;
+ Register elements = no_reg; // Will be rax.
+
+ Register index = rdx;
+
+ Register string_length = rcx;
+
+ Register string = rsi;
+
+ Register scratch = rbx;
+
+ Register array_length = rdi;
+ Register result_pos = no_reg; // Will be rdi.
+
+ Operand separator_operand = Operand(rsp, 2 * kPointerSize);
+ Operand result_operand = Operand(rsp, 1 * kPointerSize);
+ Operand array_length_operand = Operand(rsp, 0 * kPointerSize);
+ // Separator operand is already pushed. Make room for the two
+ // other stack fields, and clear the direction flag in anticipation
+ // of calling CopyBytes.
+ __ subq(rsp, Immediate(2 * kPointerSize));
+ __ cld();
+ // Check that the array is a JSArray
+ __ JumpIfSmi(array, &bailout);
+ __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, &bailout);
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(scratch, &bailout);
+
+ // Array has fast elements, so its length must be a smi.
+ // If the array has length zero, return the empty string.
+ __ movq(array_length, FieldOperand(array, JSArray::kLengthOffset));
+ __ SmiCompare(array_length, Smi::FromInt(0));
+ __ j(not_zero, &non_trivial_array);
+ __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
+ __ jmp(&return_result);
+
+ // Save the array length on the stack.
+ __ bind(&non_trivial_array);
+ __ SmiToInteger32(array_length, array_length);
+ __ movl(array_length_operand, array_length);
+
+ // Save the FixedArray containing array's elements.
+ // End of array's live range.
+ elements = array;
+ __ movq(elements, FieldOperand(array, JSArray::kElementsOffset));
+ array = no_reg;
+
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ Set(index, 0);
+ __ Set(string_length, 0);
+ // Loop condition: while (index < array_length).
+ // Live loop registers: index(int32), array_length(int32), string(String*),
+ // scratch, string_length(int32), elements(FixedArray*).
+ if (FLAG_debug_code) {
+ __ cmpq(index, array_length);
+ __ Assert(below, "No empty arrays here in EmitFastAsciiArrayJoin");
+ }
+ __ bind(&loop);
+ __ movq(string, FieldOperand(elements,
+ index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ JumpIfSmi(string, &bailout);
+ __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ andb(scratch, Immediate(
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
+ __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
+ __ j(not_equal, &bailout);
+ __ AddSmiField(string_length,
+ FieldOperand(string, SeqAsciiString::kLengthOffset));
+ __ j(overflow, &bailout);
+ __ incl(index);
+ __ cmpl(index, array_length);
+ __ j(less, &loop);
+
+ // Live registers:
+ // string_length: Sum of string lengths.
+ // elements: FixedArray of strings.
+ // index: Array length.
+ // array_length: Array length.
+
+ // If array_length is 1, return elements[0], a string.
+ __ cmpl(array_length, Immediate(1));
+ __ j(not_equal, &not_size_one_array);
+ __ movq(rax, FieldOperand(elements, FixedArray::kHeaderSize));
+ __ jmp(&return_result);
+
+ __ bind(&not_size_one_array);
+
+ // End of array_length live range.
+ result_pos = array_length;
+ array_length = no_reg;
+
+ // Live registers:
+ // string_length: Sum of string lengths.
+ // elements: FixedArray of strings.
+ // index: Array length.
+
+ // Check that the separator is a sequential ASCII string.
+ __ movq(string, separator_operand);
+ __ JumpIfSmi(string, &bailout);
+ __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ andb(scratch, Immediate(
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
+ __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
+ __ j(not_equal, &bailout);
+
+ // Live registers:
+ // string_length: Sum of string lengths.
+ // elements: FixedArray of strings.
+ // index: Array length.
+ // string: Separator string.
+
+ // Add (separator length times (array_length - 1)) to string_length.
+ __ SmiToInteger32(scratch,
+ FieldOperand(string, SeqAsciiString::kLengthOffset));
+ __ decl(index);
+ __ imull(scratch, index);
+ __ j(overflow, &bailout);
+ __ addl(string_length, scratch);
+ __ j(overflow, &bailout);
+
+ // Live registers and stack values:
+ // string_length: Total length of result string.
+ // elements: FixedArray of strings.
+ __ AllocateAsciiString(result_pos, string_length, scratch,
+ index, string, &bailout);
+ __ movq(result_operand, result_pos);
+ __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+
+ __ movq(string, separator_operand);
+ __ SmiCompare(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ Smi::FromInt(1));
+ __ j(equal, &one_char_separator);
+ __ j(greater, &long_separator);
+
+
+ // Empty separator case:
+ __ Set(index, 0);
+ __ movl(scratch, array_length_operand);
+ __ jmp(&loop_1_condition);
+ // Loop condition: while (index < array_length).
+ __ bind(&loop_1);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+ // elements: the FixedArray of strings we are joining.
+ // scratch: array length.
+
+ // Get string = array[index].
+ __ movq(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ SmiToInteger32(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(result_pos, string, string_length);
+ __ incl(index);
+ __ bind(&loop_1_condition);
+ __ cmpl(index, scratch);
+ __ j(less, &loop_1); // Loop while (index < array_length).
+ __ jmp(&done);
+
+ // Generic bailout code used from several places.
+ __ bind(&bailout);
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ jmp(&return_result);
+
+
+ // One-character separator case
+ __ bind(&one_char_separator);
+ // Get the separator ascii character value.
+ // Register "string" holds the separator.
+ __ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ Set(index, 0);
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_2_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_2);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // elements: The FixedArray of strings we are joining.
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+ // scratch: Separator character.
+
+ // Copy the separator character to the result.
+ __ movb(Operand(result_pos, 0), scratch);
+ __ incq(result_pos);
+
+ __ bind(&loop_2_entry);
+ // Get string = array[index].
+ __ movq(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ SmiToInteger32(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(result_pos, string, string_length);
+ __ incl(index);
+ __ cmpl(index, array_length_operand);
+ __ j(less, &loop_2); // End while (index < length).
+ __ jmp(&done);
+
+
+ // Long separator case (separator is more than one character).
+ __ bind(&long_separator);
+
+ // Make elements point to end of elements array, and index
+ // count from -array_length to zero, so we don't need to maintain
+ // a loop limit.
+ __ movl(index, array_length_operand);
+ __ lea(elements, FieldOperand(elements, index, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ neg(index);
+
+ // Replace separator string with pointer to its first character, and
+ // make scratch be its length.
+ __ movq(string, separator_operand);
+ __ SmiToInteger32(scratch,
+ FieldOperand(string, String::kLengthOffset));
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ movq(separator_operand, string);
+
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_3_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_3);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+ // scratch: Separator length.
+ // separator_operand (rsp[0x10]): Address of first char of separator.
+
+ // Copy the separator to the result.
+ __ movq(string, separator_operand);
+ __ movl(string_length, scratch);
+ __ CopyBytes(result_pos, string, string_length, 2);
+
+ __ bind(&loop_3_entry);
+ // Get string = array[index].
+ __ movq(string, Operand(elements, index, times_pointer_size, 0));
+ __ SmiToInteger32(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(result_pos, string, string_length);
+ __ incq(index);
+ __ j(not_equal, &loop_3); // Loop while (index < 0).
+
+ __ bind(&done);
+ __ movq(rax, result_operand);
+
+ __ bind(&return_result);
+ // Drop temp values from the stack, and restore context register.
+ __ addq(rsp, Immediate(3 * kPointerSize));
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ context()->Plug(rax);
}
@@ -3234,8 +3534,10 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the JS runtime function using a call IC.
__ Move(rcx, expr->name());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+ Handle<Code> ic =
+ ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+ EmitCallIC(ic, mode, expr->id());
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
@@ -3338,8 +3640,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmt(masm_, "[ UnaryOperation (ADD)");
VisitForAccumulatorValue(expr->expression());
Label no_conversion;
- Condition is_smi = masm_->CheckSmi(result_register());
- __ j(is_smi, &no_conversion);
+ __ JumpIfSmi(result_register(), &no_conversion);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -3347,46 +3648,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
- case Token::SUB: {
- Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register rax.
- VisitForAccumulatorValue(expr->expression());
- __ CallStub(&stub);
- context()->Plug(rax);
+ case Token::SUB:
+ EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
break;
- }
- case Token::BIT_NOT: {
- Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- // The generic unary operation stub expects the argument to be
- // in the accumulator register rax.
- VisitForAccumulatorValue(expr->expression());
- Label done;
- bool inline_smi_case = ShouldInlineSmiCase(expr->op());
- if (inline_smi_case) {
- Label call_stub;
- __ JumpIfNotSmi(rax, &call_stub);
- __ SmiNot(rax, rax);
- __ jmp(&done);
- __ bind(&call_stub);
- }
- bool overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode mode =
- overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpFlags flags = inline_smi_case
- ? NO_UNARY_SMI_CODE_IN_STUB
- : NO_UNARY_FLAGS;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
- __ CallStub(&stub);
- __ bind(&done);
- context()->Plug(rax);
+ case Token::BIT_NOT:
+ EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
break;
- }
default:
UNREACHABLE();
@@ -3394,6 +3662,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+ const char* comment) {
+ // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+ Comment cmt(masm_, comment);
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode overwrite =
+ can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ UnaryOpStub stub(expr->op(), overwrite);
+ // UnaryOpStub expects the argument to be in the
+ // accumulator register rax.
+ VisitForAccumulatorValue(expr->expression());
+ SetSourcePosition(expr->position());
+ EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
@@ -3406,7 +3691,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
@@ -3432,16 +3717,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
- __ push(slot_operand);
- __ Move(rax, prop->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
__ push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
@@ -3450,13 +3727,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
- PrepareForBailout(expr->increment(), TOS_REG);
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ }
// Call ToNumber only if operand is not a smi.
- NearLabel no_conversion;
- Condition is_smi;
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &no_conversion);
+ Label no_conversion;
+ __ JumpIfSmi(rax, &no_conversion, Label::kNear);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -3482,7 +3761,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Inline smi case if we are in a loop.
- NearLabel stub_call, done;
+ Label done, stub_call;
JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(expr->op())) {
@@ -3491,10 +3770,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
__ SmiSubConstant(rax, rax, Smi::FromInt(1));
}
- __ j(overflow, &stub_call);
+ __ j(overflow, &stub_call, Label::kNear);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(rax, &done);
+ patch_site.EmitJumpIfSmi(rax, &done, Label::kNear);
__ bind(&stub_call);
// Call stub. Undo operation first.
@@ -3509,14 +3788,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
// Call stub for +1/-1.
- TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+ BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
if (expr->op() == Token::INC) {
__ Move(rdx, Smi::FromInt(1));
} else {
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(1));
}
- EmitCallIC(stub.GetCode(), &patch_site);
+ EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
__ bind(&done);
// Store the value returned in rax.
@@ -3546,10 +3825,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->handle());
__ pop(rdx);
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3563,10 +3842,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(rcx);
__ pop(rdx);
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3590,10 +3869,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Comment cmnt(masm_, "Global variable");
__ Move(rcx, proxy->name());
__ movq(rax, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL &&
@@ -3616,94 +3895,80 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(rax);
} else {
// This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
+ VisitInCurrentContext(expr);
}
}
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
+ VisitForTypeofValue(expr);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- if (check->Equals(Heap::number_symbol())) {
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_true);
+ if (check->Equals(isolate()->heap()->number_symbol())) {
+ __ JumpIfSmi(rax, if_true);
__ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::string_symbol())) {
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
+ } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ __ JumpIfSmi(rax, if_false);
// Check for undetectable objects => false.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
+ __ j(above_equal, if_false);
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ CmpInstanceType(rdx, FIRST_NONSTRING_TYPE);
- Split(below, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::boolean_symbol())) {
+ Split(zero, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(equal, if_true);
__ CompareRoot(rax, Heap::kFalseValueRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
+ __ JumpIfSmi(rax, if_false);
// Check for undetectable objects => true.
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::function_symbol())) {
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
- __ j(equal, if_true);
- // Regular expressions => 'function' (they are callable).
- __ CmpInstanceType(rdx, JS_REGEXP_TYPE);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::object_symbol())) {
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
+ } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ __ JumpIfSmi(rax, if_false);
+ STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx);
+ Split(above_equal, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ __ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
- // Regular expressions => 'function', not 'object'.
- __ CmpObjectType(rax, JS_REGEXP_TYPE, rdx);
- __ j(equal, if_false);
+ __ CmpObjectType(rax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, rdx);
+ __ j(below, if_false);
+ __ CmpInstanceType(rdx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, if_false);
// Check for undetectable objects => false.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- // Check for JS objects => true.
- __ CmpInstanceType(rdx, FIRST_JS_OBJECT_TYPE);
- __ j(below, if_false);
- __ CmpInstanceType(rdx, LAST_JS_OBJECT_TYPE);
- Split(below_equal, if_true, if_false, fall_through);
+ Split(zero, if_true, if_false, fall_through);
} else {
if (if_false != fall_through) __ jmp(if_false);
}
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- return true;
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
}
@@ -3722,14 +3987,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
+ Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
@@ -3792,10 +4055,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
- NearLabel slow_case;
+ Label slow_case;
__ movq(rcx, rdx);
__ or_(rcx, rax);
- patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+ patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpq(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
@@ -3804,7 +4067,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site);
+ EmitCallIC(ic, &patch_site, expr->id());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ testq(rax, rax);
@@ -3836,8 +4099,7 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
__ j(equal, if_true);
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
+ __ JumpIfSmi(rax, if_false);
// It can be an undetectable object.
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
@@ -3864,69 +4126,51 @@ Register FullCodeGenerator::context_register() {
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+ RelocInfo::Mode mode,
+ unsigned ast_id) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
+ Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1);
+ __ IncrementCounter(counters->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1);
+ __ IncrementCounter(counters->keyed_load_full(), 1);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1);
+ __ IncrementCounter(counters->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1);
+ __ IncrementCounter(counters->keyed_store_full(), 1);
default:
break;
}
-
- __ call(ic, mode);
-
- // Crankshaft doesn't need patching of inlined loads and stores.
- // When compiling the snapshot we need to produce code that works
- // with and without Crankshaft.
- if (V8::UseCrankshaft() && !Serializer::enabled()) {
- return;
- }
-
- // If we're calling a (keyed) load or store stub, we have to mark
- // the call as containing no inlined code so we will not attempt to
- // patch it.
- switch (ic->kind()) {
- case Code::LOAD_IC:
- case Code::KEYED_LOAD_IC:
- case Code::STORE_IC:
- case Code::KEYED_STORE_IC:
- __ nop(); // Signals no inlined code.
- break;
- default:
- // Do nothing.
- break;
- }
+ __ call(ic, mode, ast_id);
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+ JumpPatchSite* patch_site,
+ unsigned ast_id) {
+ Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1);
+ __ IncrementCounter(counters->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1);
+ __ IncrementCounter(counters->keyed_load_full(), 1);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1);
+ __ IncrementCounter(counters->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1);
+ __ IncrementCounter(counters->keyed_store_full(), 1);
default:
break;
}
-
- __ call(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET, ast_id);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
@@ -3946,6 +4190,25 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
}
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ if (scope()->is_global_scope()) {
+ // Contexts nested in the global context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ Push(Smi::FromInt(0));
+ } else if (scope()->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(scope()->is_function_scope());
+ __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Non-local control flow support.
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index b3243cf48..8919765cb 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_X64)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
@@ -76,11 +76,11 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
// Check that the receiver is a valid JS object.
__ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
- __ cmpb(r0, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
__ j(below, miss);
// If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
GenerateGlobalInstanceTypeCheck(masm, r0, miss);
@@ -97,58 +97,6 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
}
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r1|. Jump to the |miss| label
-// otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
-
- // Compute the capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
- __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
- __ decl(r0);
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- static const int kProbes = 4;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- for (int i = 0; i < kProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
- __ shrl(r1, Immediate(String::kHashShift));
- if (i > 0) {
- __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(r1, r0);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
-
- // Check if the key is identical to the name.
- __ cmpq(name, Operand(elements, r1, times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- if (i != kProbes - 1) {
- __ j(equal, done);
- } else {
- __ j(not_equal, miss);
- }
- }
-}
-
// Helper function used to load a property from a dictionary backing storage.
// This function may return false negatives, so miss_label
@@ -179,13 +127,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
@@ -237,13 +185,13 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss_label,
- &done,
- elements,
- name,
- scratch0,
- scratch1);
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ scratch0,
+ scratch1);
// If probing finds an entry in the dictionary, scratch0 contains the
// index into the dictionary. Check that the value is a normal
@@ -381,11 +329,6 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
}
-// The offset from the inlined patch site to the start of the inlined
-// load instruction.
-const int LoadIC::kOffsetToLoadInstruction = 20;
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -554,7 +497,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
Label slow, check_string, index_smi, index_string, property_array_property;
- Label check_pixel_array, probe_dictionary, check_number_dictionary;
+ Label probe_dictionary, check_number_dictionary;
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string);
@@ -565,11 +508,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(
masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
- // Check the "has fast elements" bit in the receiver's map which is
- // now in rcx.
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kHasFastElements));
- __ j(zero, &check_pixel_array);
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(rcx, &check_number_dictionary);
GenerateFastArrayLoad(masm,
rdx,
@@ -579,21 +519,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
rax,
NULL,
&slow);
- __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
__ ret(0);
- __ bind(&check_pixel_array);
- GenerateFastPixelArrayLoad(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rax,
- &check_number_dictionary,
- NULL,
- &slow);
-
__ bind(&check_number_dictionary);
+ __ SmiToInteger32(rbx, rax);
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+
// Check whether the elements is a number dictionary.
// rdx: receiver
// rax: key
@@ -609,7 +542,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: Jump to runtime.
// rdx: receiver
// rax: key
- __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
+ __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
@@ -638,10 +571,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the key (consisting of map and symbol) from the cache and
// check for match.
ExternalReference cache_keys
- = ExternalReference::keyed_lookup_cache_keys();
+ = ExternalReference::keyed_lookup_cache_keys(masm->isolate());
__ movq(rdi, rcx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
- __ movq(kScratchRegister, cache_keys);
+ __ LoadAddress(kScratchRegister, cache_keys);
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
__ j(not_equal, &slow);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize));
@@ -649,8 +582,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Get field offset, which is a 32-bit integer.
ExternalReference cache_field_offsets
- = ExternalReference::keyed_lookup_cache_field_offsets();
- __ movq(kScratchRegister, cache_field_offsets);
+ = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
+ __ LoadAddress(kScratchRegister, cache_field_offsets);
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
__ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
__ subq(rdi, rcx);
@@ -660,7 +593,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rcx, rdi);
__ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Load property array property.
@@ -668,7 +601,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ movq(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ movq(rax, FieldOperand(rax, rdi, times_pointer_size,
FixedArray::kHeaderSize));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
@@ -683,7 +616,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
GenerateDictionaryLoad(masm, &slow, rbx, rax, rcx, rdi, rax);
- __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
+ __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
__ ret(0);
__ bind(&index_string);
@@ -722,7 +655,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm);
+ GenerateMiss(masm, false);
}
@@ -758,11 +691,14 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ push(rcx); // return address
// Perform tail call to the entry.
- __ TailCallExternalReference(ExternalReference(
- IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
+ masm->isolate()),
+ 2,
+ 1);
__ bind(&slow);
- GenerateMiss(masm);
+ GenerateMiss(masm, false);
}
@@ -774,7 +710,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, slow_with_tagged_index, fast, array, extra, check_pixel_array;
+ Label slow, slow_with_tagged_index, fast, array, extra;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
@@ -791,9 +727,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CmpInstanceType(rbx, JS_ARRAY_TYPE);
__ j(equal, &array);
- // Check that the object is some kind of JS object.
- __ CmpInstanceType(rbx, FIRST_JS_OBJECT_TYPE);
+ // Check that the object is some kind of JSObject.
+ __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
__ j(below, &slow);
+ __ CmpInstanceType(rbx, JS_PROXY_TYPE);
+ __ j(equal, &slow);
+ __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, &slow);
// Object case: Check key against length in the elements array.
// rax: value
@@ -803,7 +743,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object is in fast mode and writable.
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &check_pixel_array);
+ __ j(not_equal, &slow);
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value
// rbx: FixedArray
@@ -817,25 +757,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
GenerateRuntimeSetProperty(masm, strict_mode);
// Never returns to here.
- // Check whether the elements is a pixel array.
- // rax: value
- // rdx: receiver
- // rbx: receiver's elements array
- // rcx: index, zero-extended.
- __ bind(&check_pixel_array);
- GenerateFastPixelArrayStore(masm,
- rdx,
- rcx,
- rax,
- rbx,
- rdi,
- false,
- true,
- NULL,
- &slow,
- &slow,
- &slow);
-
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
@@ -875,10 +796,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rbx: receiver's elements array (a FixedArray)
// rcx: index
- NearLabel non_smi_value;
+ Label non_smi_value;
__ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
- __ JumpIfNotSmi(rax, &non_smi_value);
+ __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear);
__ ret(0);
__ bind(&non_smi_value);
// Slow case that needs to retain rcx for use by RecordWrite.
@@ -893,7 +814,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
- Code::Kind kind) {
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// rcx : function name
// rdx : receiver
@@ -904,10 +826,11 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
MONOMORPHIC,
- Code::kNoExtraICState,
+ extra_ic_state,
NORMAL,
argc);
- StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, rax);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
+ rax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -943,7 +866,8 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
+ no_reg);
__ bind(&miss);
}
@@ -969,7 +893,8 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
// Invoke the function.
ParameterCount actual(argc);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
@@ -1001,7 +926,10 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
}
-static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
+static void GenerateCallMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1012,10 +940,11 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
+ Counters* counters = masm->isolate()->counters();
if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(&Counters::call_miss, 1);
+ __ IncrementCounter(counters->call_miss(), 1);
} else {
- __ IncrementCounter(&Counters::keyed_call_miss, 1);
+ __ IncrementCounter(counters->keyed_call_miss(), 1);
}
// Get the receiver of the function from the stack; 1 ~ return address.
@@ -1030,8 +959,8 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// Call the entry.
CEntryStub stub(1);
- __ movq(rax, Immediate(2));
- __ movq(rbx, ExternalReference(IC_Utility(id)));
+ __ Set(rax, 2);
+ __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
__ CallStub(&stub);
// Move result to rdi and exit the internal frame.
@@ -1057,12 +986,21 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
}
// Invoke the function.
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
ParameterCount actual(argc);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+ __ InvokeFunction(rdi,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ call_kind);
}
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1075,8 +1013,8 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
- GenerateMiss(masm, argc);
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+ GenerateMiss(masm, argc, extra_ic_state);
}
@@ -1092,11 +1030,13 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -----------------------------------
GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc);
+ GenerateMiss(masm, argc, Code::kNoExtraICState);
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1107,7 +1047,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+ GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
}
@@ -1141,7 +1081,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateFastArrayLoad(
masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
__ bind(&do_call);
// receiver in rdx is not used after this point.
@@ -1159,13 +1100,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ SmiToInteger32(rbx, rcx);
// ebx: untagged index
GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1);
+ __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
__ jmp(&do_call);
__ bind(&slow_load);
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
- __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1);
+ __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
__ EnterInternalFrame();
__ push(rcx); // save the key
__ push(rdx); // pass the receiver
@@ -1192,12 +1133,15 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ j(not_equal, &lookup_monomorphic_cache);
GenerateDictionaryLoad(masm, &slow_load, rbx, rcx, rax, rdi, rdi);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
__ jmp(&do_call);
__ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1);
- GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
+ GenerateMonomorphicCacheProbe(masm,
+ argc,
+ Code::KEYED_CALL_IC,
+ Code::kNoExtraICState);
// Fall through on miss.
__ bind(&slow_call);
@@ -1207,7 +1151,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
- __ IncrementCounter(&Counters::keyed_call_generic_slow, 1);
+ __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
GenerateMiss(masm, argc);
__ bind(&index_string);
@@ -1250,7 +1194,170 @@ void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
+ GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
+}
+
+
+static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+ __ j(below, slow_case);
+
+ // Check that the key is a positive smi.
+ Condition check = masm->CheckNonNegativeSmi(key);
+ __ j(NegateCondition(check), slow_case);
+
+ // Load the elements into scratch1 and check its map. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ movq(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments.
+ __ movq(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
+ __ cmpq(key, scratch2);
+ __ j(greater_equal, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+ __ SmiToInteger64(scratch3, key);
+ __ movq(scratch2, FieldOperand(scratch1,
+ scratch3,
+ times_pointer_size,
+ kHeaderSize));
+ __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+ __ j(equal, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ movq(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
+ __ SmiToInteger64(scratch3, scratch2);
+ return FieldOperand(scratch1,
+ scratch3,
+ times_pointer_size,
+ Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ __ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmpq(key, scratch);
+ __ j(greater_equal, slow_case);
+ __ SmiToInteger64(scratch, key);
+ return FieldOperand(backing_store,
+ scratch,
+ times_pointer_size,
+ FixedArray::kHeaderSize);
+}
+
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(
+ masm, rdx, rax, rbx, rcx, rdi, &notin, &slow);
+ __ movq(rax, mapped_location);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in rbx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
+ __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &slow);
+ __ movq(rax, unmapped_location);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Operand mapped_location = GenerateMappedArgumentsLookup(
+ masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
+ __ movq(mapped_location, rax);
+ __ lea(r9, mapped_location);
+ __ movq(r8, rax);
+ __ RecordWrite(rbx, r9, r8);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in rbx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
+ __ movq(unmapped_location, rax);
+ __ lea(r9, unmapped_location);
+ __ movq(r8, rax);
+ __ RecordWrite(rbx, r9, r8);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
+ // ...
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
+ Label slow, notin;
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ Operand mapped_location = GenerateMappedArgumentsLookup(
+ masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
+ __ movq(rdi, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow);
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in rbx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rax, &slow);
+ __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &slow);
+ __ movq(rdi, unmapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
}
@@ -1265,7 +1372,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, rax, rcx, rbx, rdx);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rax, rcx, rbx,
+ rdx);
// Cache miss: Jump to runtime.
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -1300,7 +1408,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
- __ IncrementCounter(&Counters::load_miss, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->load_miss(), 1);
__ pop(rbx);
__ push(rax); // receiver
@@ -1308,143 +1417,21 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ push(rbx); // return address
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
}
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // If the instruction following the call is not a test rax, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction.
- int delta = *reinterpret_cast<int*>(delta_address);
-
- // The map address is the last 8 bytes of the 10-byte
- // immediate move instruction, so we add 2 to get the
- // offset to the last 8 bytes.
- Address map_address = test_instruction_address + delta + 2;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // The offset is in the 32-bit displacement of a seven byte
- // memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
- // so we add 3 to get the offset of the displacement.
- Address offset_address =
- test_instruction_address + delta + kOffsetToLoadInstruction + 3;
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
- return true;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete) {
- // TODO(<bug#>): implement this.
- return false;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test rax, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- // Extract the encoded deltas from the test rax instruction.
- Address encoded_offsets_address = test_instruction_address + 1;
- int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
- int delta_to_map_check = -(encoded_offsets & 0xFFFF);
- int delta_to_record_write = encoded_offsets >> 16;
-
- // Patch the map to check. The map address is the last 8 bytes of
- // the 10-byte immediate move instruction.
- Address map_check_address = test_instruction_address + delta_to_map_check;
- Address map_address = map_check_address + 2;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // Patch the offset in the store instruction. The offset is in the
- // last 4 bytes of a 7 byte register-to-memory move instruction.
- Address offset_address =
- map_check_address + StoreIC::kOffsetToStoreInstruction + 3;
- // The offset should have initial value (kMaxInt - 1), cleared value
- // (-1) or we should be clearing the inlined version.
- ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
- *reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == Heap::null_value()));
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
- // Patch the offset in the write-barrier code. The offset is the
- // last 4 bytes of a 7 byte lea instruction.
- offset_address = map_check_address + delta_to_record_write + 3;
- // The offset should have initial value (kMaxInt), cleared value
- // (-1) or we should be clearing the inlined version.
- ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
- *reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == Heap::null_value()));
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
- return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
- if (V8::UseCrankshaft()) return false;
-
- // Arguments are address of start of call sequence that called
- // the IC,
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // The keyed load has a fast inlined case if the IC call instruction
- // is immediately followed by a test instruction.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- // Fetch the offset from the test instruction to the map compare
- // instructions (starting with the 64-bit immediate mov of the map
- // address). This offset is stored in the last 4 bytes of the 5
- // byte test instruction.
- Address delta_address = test_instruction_address + 1;
- int delta = *reinterpret_cast<int*>(delta_address);
- // Compute the map address. The map address is in the last 8 bytes
- // of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
- // to the offset to get the map address.
- Address map_address = test_instruction_address + delta + 2;
- // Patch the map check.
- *(reinterpret_cast<Object**>(map_address)) = map;
- return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- return PatchInlinedMapCheck(address, map);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- __ IncrementCounter(&Counters::keyed_load_miss, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_miss(), 1);
__ pop(rbx);
__ push(rdx); // receiver
@@ -1452,7 +1439,10 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ push(rbx); // return address
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
+ ExternalReference ref = force_generic
+ ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
+ masm->isolate())
+ : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
}
@@ -1488,7 +1478,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
NOT_IN_LOOP,
MONOMORPHIC,
strict_mode);
- StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
+ no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1510,16 +1501,12 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
__ push(rbx); // return address
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
-// The offset from the inlined patch site to the start of the inlined
-// store instruction.
-const int StoreIC::kOffsetToStoreInstruction = 20;
-
-
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
@@ -1563,7 +1550,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ push(value);
__ push(scratch); // return address
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
__ bind(&miss);
@@ -1585,11 +1573,12 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
- __ IncrementCounter(&Counters::store_normal_hit, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1);
__ ret(0);
__ bind(&miss);
- __ IncrementCounter(&Counters::store_normal_miss, 1);
+ __ IncrementCounter(counters->store_normal_miss(), 1);
GenerateMiss(masm);
}
@@ -1637,7 +1626,27 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(rdx); // receiver
+ __ push(rcx); // key
+ __ push(rax); // value
+ __ push(rbx); // return address
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1652,7 +1661,10 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ push(rbx); // return address
// Do tail-call to runtime routine.
- ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
+ ExternalReference ref = force_generic
+ ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+ masm->isolate())
+ : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
diff --git a/deps/v8/src/x64/jump-target-x64.cc b/deps/v8/src/x64/jump-target-x64.cc
deleted file mode 100644
index e71560463..000000000
--- a/deps/v8/src/x64/jump-target-x64.cc
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
- ASSERT(cgen()->has_valid_frame());
- // Live non-frame registers are not allowed at unconditional jumps
- // because we have no way of invalidating the corresponding results
- // which are still live in the C++ code.
- ASSERT(cgen()->HasValidEntryRegisters());
-
- if (is_bound()) {
- // Backward jump. There is an expected frame to merge to.
- ASSERT(direction_ == BIDIRECTIONAL);
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- } else if (entry_frame_ != NULL) {
- // Forward jump with a preconfigured entry frame. Assert the
- // current frame matches the expected one and jump to the block.
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- } else {
- // Forward jump. Remember the current frame and emit a jump to
- // its merge code.
- AddReachingFrame(cgen()->frame());
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- __ jmp(&merge_labels_.last());
- }
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint b) {
- ASSERT(cgen() != NULL);
- ASSERT(cgen()->has_valid_frame());
-
- if (is_bound()) {
- ASSERT(direction_ == BIDIRECTIONAL);
- // Backward branch. We have an expected frame to merge to on the
- // backward edge.
-
- // Swap the current frame for a copy (we do the swapping to get
- // the off-frame registers off the fall through) to use for the
- // branch.
- VirtualFrame* fall_through_frame = cgen()->frame();
- VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers;
- cgen()->SetFrame(branch_frame, &non_frame_registers);
-
- // Check if we can avoid merge code.
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- if (cgen()->frame()->Equals(entry_frame_)) {
- // Branch right in to the block.
- cgen()->DeleteFrame();
- __ j(cc, &entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
-
- // Check if we can reuse existing merge code.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL &&
- cgen()->frame()->Equals(reaching_frames_[i])) {
- // Branch to the merge code.
- cgen()->DeleteFrame();
- __ j(cc, &merge_labels_[i]);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
- }
-
- // To emit the merge code here, we negate the condition and branch
- // around the merge code on the fall through path.
- Label original_fall_through;
- __ j(NegateCondition(cc), &original_fall_through);
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- __ bind(&original_fall_through);
-
- } else if (entry_frame_ != NULL) {
- // Forward branch with a preconfigured entry frame. Assert the
- // current frame matches the expected one and branch to the block.
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- // Explicitly use the macro assembler instead of __ as forward
- // branches are expected to be a fixed size (no inserted
- // coverage-checking instructions please). This is used in
- // Reference::GetValue.
- cgen()->masm()->j(cc, &entry_label_);
-
- } else {
- // Forward branch. A copy of the current frame is remembered and
- // a branch to the merge code is emitted. Explicitly use the
- // macro assembler instead of __ as forward branches are expected
- // to be a fixed size (no inserted coverage-checking instructions
- // please). This is used in Reference::GetValue.
- AddReachingFrame(new VirtualFrame(cgen()->frame()));
- cgen()->masm()->j(cc, &merge_labels_.last());
- }
-}
-
-
-void JumpTarget::Call() {
- // Call is used to push the address of the catch block on the stack as
- // a return address when compiling try/catch and try/finally. We
- // fully spill the frame before making the call. The expected frame
- // at the label (which should be the only one) is the spilled current
- // frame plus an in-memory return address. The "fall-through" frame
- // at the return site is the spilled current frame.
- ASSERT(cgen() != NULL);
- ASSERT(cgen()->has_valid_frame());
- // There are no non-frame references across the call.
- ASSERT(cgen()->HasValidEntryRegisters());
- ASSERT(!is_linked());
-
- cgen()->frame()->SpillAll();
- VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
- target_frame->Adjust(1);
- // We do not expect a call with a preconfigured entry frame.
- ASSERT(entry_frame_ == NULL);
- AddReachingFrame(target_frame);
- __ call(&merge_labels_.last());
-}
-
-
-void JumpTarget::DoBind() {
- ASSERT(cgen() != NULL);
- ASSERT(!is_bound());
-
- // Live non-frame registers are not allowed at the start of a basic
- // block.
- ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
- // Fast case: the jump target was manually configured with an entry
- // frame to use.
- if (entry_frame_ != NULL) {
- // Assert no reaching frames to deal with.
- ASSERT(reaching_frames_.is_empty());
- ASSERT(!cgen()->has_valid_frame());
-
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- // Copy the entry frame so the original can be used for a
- // possible backward jump.
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- } else {
- // Take ownership of the entry frame.
- cgen()->SetFrame(entry_frame_, &empty);
- entry_frame_ = NULL;
- }
- __ bind(&entry_label_);
- return;
- }
-
- if (!is_linked()) {
- ASSERT(cgen()->has_valid_frame());
- if (direction_ == FORWARD_ONLY) {
- // Fast case: no forward jumps and no possible backward jumps.
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- VirtualFrame* frame = cgen()->frame();
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ addq(rsp, Immediate(difference * kPointerSize));
- }
- } else {
- ASSERT(direction_ == BIDIRECTIONAL);
- // Fast case: no forward jumps, possible backward ones. Remove
- // constants and copies above the watermark on the fall-through
- // frame and use it as the entry frame.
- cgen()->frame()->MakeMergable();
- entry_frame_ = new VirtualFrame(cgen()->frame());
- }
- __ bind(&entry_label_);
- return;
- }
-
- if (direction_ == FORWARD_ONLY &&
- !cgen()->has_valid_frame() &&
- reaching_frames_.length() == 1) {
- // Fast case: no fall-through, a single forward jump, and no
- // possible backward jumps. Pick up the only reaching frame, take
- // ownership of it, and use it for the block about to be emitted.
- VirtualFrame* frame = reaching_frames_[0];
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[0] = NULL;
- __ bind(&merge_labels_[0]);
-
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ addq(rsp, Immediate(difference * kPointerSize));
- }
-
- __ bind(&entry_label_);
- return;
- }
-
- // If there is a current frame, record it as the fall-through. It
- // is owned by the reaching frames for now.
- bool had_fall_through = false;
- if (cgen()->has_valid_frame()) {
- had_fall_through = true;
- AddReachingFrame(cgen()->frame()); // Return value ignored.
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- }
-
- // Compute the frame to use for entry to the block.
- ComputeEntryFrame();
-
- // Some moves required to merge to an expected frame require purely
- // frame state changes, and do not require any code generation.
- // Perform those first to increase the possibility of finding equal
- // frames below.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL) {
- reaching_frames_[i]->PrepareMergeTo(entry_frame_);
- }
- }
-
- if (is_linked()) {
- // There were forward jumps. Handle merging the reaching frames
- // to the entry frame.
-
- // Loop over the (non-null) reaching frames and process any that
- // need merge code. Iterate backwards through the list to handle
- // the fall-through frame first. Set frames that will be
- // processed after 'i' to NULL if we want to avoid processing
- // them.
- for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
- VirtualFrame* frame = reaching_frames_[i];
-
- if (frame != NULL) {
- // Does the frame (probably) need merge code?
- if (!frame->Equals(entry_frame_)) {
- // We could have a valid frame as the fall through to the
- // binding site or as the fall through from a previous merge
- // code block. Jump around the code we are about to
- // generate.
- if (cgen()->has_valid_frame()) {
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- }
- // Pick up the frame for this block. Assume ownership if
- // there cannot be backward jumps.
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- cgen()->SetFrame(new VirtualFrame(frame), &empty);
- } else {
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- __ bind(&merge_labels_[i]);
-
- // Loop over the remaining (non-null) reaching frames,
- // looking for any that can share merge code with this one.
- for (int j = 0; j < i; j++) {
- VirtualFrame* other = reaching_frames_[j];
- if (other != NULL && other->Equals(cgen()->frame())) {
- // Set the reaching frame element to null to avoid
- // processing it later, and then bind its entry label.
- reaching_frames_[j] = NULL;
- __ bind(&merge_labels_[j]);
- }
- }
-
- // Emit the merge code.
- cgen()->frame()->MergeTo(entry_frame_);
- } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
- // If this is the fall through frame, and it didn't need
- // merge code, we need to pick up the frame so we can jump
- // around subsequent merge blocks if necessary.
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- }
- }
-
- // The code generator may not have a current frame if there was no
- // fall through and none of the reaching frames needed merging.
- // In that case, clone the entry frame as the current frame.
- if (!cgen()->has_valid_frame()) {
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- }
-
- // There may be unprocessed reaching frames that did not need
- // merge code. They will have unbound merge labels. Bind their
- // merge labels to be the same as the entry label and deallocate
- // them.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (!merge_labels_[i].is_bound()) {
- reaching_frames_[i] = NULL;
- __ bind(&merge_labels_[i]);
- }
- }
-
- // There are non-NULL reaching frames with bound labels for each
- // merge block, but only on backward targets.
- } else {
- // There were no forward jumps. There must be a current frame and
- // this must be a bidirectional target.
- ASSERT(reaching_frames_.length() == 1);
- ASSERT(reaching_frames_[0] != NULL);
- ASSERT(direction_ == BIDIRECTIONAL);
-
- // Use a copy of the reaching frame so the original can be saved
- // for possible reuse as a backward merge block.
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
- __ bind(&merge_labels_[0]);
- cgen()->frame()->MergeTo(entry_frame_);
- }
-
- __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
- // Drop leftover statement state from the frame before merging, without
- // emitting code.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
- // Drop leftover statement state from the frame before merging, without
- // emitting code.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- DoJump();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even on
- // the fall through. This is so we can bind the return target with state
- // on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- }
- DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_ + 1);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even on
- // the fall through. This is so we can bind the return target with state
- // on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- }
- DoBind();
- *arg = cgen()->frame()->Pop();
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 48844a5bf..92c889126 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -39,7 +39,7 @@ namespace internal {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public PostCallGenerator {
+class SafepointGenerator : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -49,14 +49,26 @@ class SafepointGenerator : public PostCallGenerator {
deoptimization_index_(deoptimization_index) { }
virtual ~SafepointGenerator() { }
- virtual void Generate() {
- // Ensure that we have enough space in the reloc info to patch
- // this with calls when doing deoptimization.
- codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true);
+ virtual void BeforeCall(int call_size) const {
+ ASSERT(call_size >= 0);
+ // Ensure that we have enough space after the previous safepoint position
+ // for the jump generated there.
+ int call_end = codegen_->masm()->pc_offset() + call_size;
+ int prev_jump_end = codegen_->LastSafepointEnd() + kMinSafepointSize;
+ if (call_end < prev_jump_end) {
+ int padding_size = prev_jump_end - call_end;
+ STATIC_ASSERT(kMinSafepointSize <= 9); // One multibyte nop is enough.
+ codegen_->masm()->nop(padding_size);
+ }
+ }
+
+ virtual void AfterCall() const {
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
private:
+ static const int kMinSafepointSize =
+ MacroAssembler::kShortCallInstructionLength;
LCodeGen* codegen_;
LPointerMap* pointers_;
int deoptimization_index_;
@@ -79,7 +91,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
- code->set_stack_slots(StackSlotCount());
+ code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -88,8 +100,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
void LCodeGen::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
- SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
- PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LCodeGen in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
@@ -128,16 +140,31 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
+ // Strict mode functions need to replace the receiver with undefined
+ // when called as functions (without an explicit receiver
+ // object). rcx is zero for method calls and non-zero for function
+ // calls.
+ if (info_->is_strict_mode() || info_->is_native()) {
+ Label ok;
+ __ testq(rcx, rcx);
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+ __ bind(&ok);
+ }
+
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
__ push(rdi); // Callee's JS function.
// Reserve space for the stack slots needed by the code.
- int slots = StackSlotCount();
+ int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ movl(rax, Immediate(slots));
+ __ Set(rax, slots);
__ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
Label loop;
__ bind(&loop);
@@ -170,7 +197,7 @@ bool LCodeGen::GeneratePrologue() {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
// Context is returned in both rax and rsi. It replaces the context
@@ -239,9 +266,8 @@ LInstruction* LCodeGen::GetNextInstruction() {
bool LCodeGen::GenerateJumpTable() {
for (int i = 0; i < jump_table_.length(); i++) {
- JumpTableEntry* info = jump_table_[i];
- __ bind(&(info->label_));
- __ Jump(info->address_, RelocInfo::RUNTIME_ENTRY);
+ __ bind(&jump_table_[i].label);
+ __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
}
return !is_aborted();
}
@@ -279,7 +305,7 @@ bool LCodeGen::GenerateSafepointTable() {
while (byte_count-- > 0) {
__ int3();
}
- safepoints_.Emit(masm(), StackSlotCount());
+ safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
@@ -407,7 +433,7 @@ void LCodeGen::AddToTranslation(Translation* translation,
translation->StoreDoubleStackSlot(op->index());
} else if (op->IsArgument()) {
ASSERT(is_tagged);
- int src_index = StackSlotCount() + op->index();
+ int src_index = GetStackSlotCount() + op->index();
translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -442,7 +468,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
- if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+ if (code->kind() == Code::BINARY_OP_IC ||
code->kind() == Code::COMPARE_IC) {
__ nop();
}
@@ -456,7 +482,7 @@ void LCodeGen::CallCode(Handle<Code> code,
}
-void LCodeGen::CallRuntime(Runtime::Function* function,
+void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr) {
ASSERT(instr != NULL);
@@ -549,17 +575,13 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
if (cc == no_condition) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- JumpTableEntry* jump_info = NULL;
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (jump_table_.length() > 0 &&
- jump_table_[jump_table_.length() - 1]->address_ == entry) {
- jump_info = jump_table_[jump_table_.length() - 1];
- } else {
- jump_info = new JumpTableEntry(entry);
- jump_table_.Add(jump_info);
+ if (jump_table_.is_empty() ||
+ jump_table_.last().address != entry) {
+ jump_table_.Add(JumpTableEntry(entry));
}
- __ j(cc, &jump_info->label_);
+ __ j(cc, &jump_table_.last().label);
}
}
@@ -569,14 +591,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
if (length == 0) return;
ASSERT(FLAG_deopt);
Handle<DeoptimizationInputData> data =
- Factory::NewDeoptimizationInputData(length, TENURED);
+ factory()->NewDeoptimizationInputData(length, TENURED);
Handle<ByteArray> translations = translations_.CreateByteArray();
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
- Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
@@ -670,7 +692,7 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
void LCodeGen::RecordPosition(int position) {
- if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+ if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
}
@@ -683,7 +705,7 @@ void LCodeGen::DoLabel(LLabel* label) {
}
__ bind(label->label());
current_block_ = label->block_id();
- LCodeGen::DoGap(label);
+ DoGap(label);
}
@@ -709,6 +731,11 @@ void LCodeGen::DoGap(LGap* gap) {
}
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
void LCodeGen::DoParameter(LParameter* instr) {
// Nothing to do.
}
@@ -732,16 +759,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCharAt: {
- StringCharAtStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::MathPow: {
- MathPowStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::NumberToString: {
NumberToStringStub stub;
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -775,41 +792,114 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
- LOperand* right = instr->InputAt(1);
- ASSERT(ToRegister(instr->result()).is(rdx));
- ASSERT(ToRegister(instr->InputAt(0)).is(rax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
+ if (instr->hydrogen()->HasPowerOf2Divisor()) {
+ Register dividend = ToRegister(instr->InputAt(0));
- Register right_reg = ToRegister(right);
+ int32_t divisor =
+ HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+
+ if (divisor < 0) divisor = -divisor;
+
+ Label positive_dividend, done;
+ __ testl(dividend, dividend);
+ __ j(not_sign, &positive_dividend, Label::kNear);
+ __ negl(dividend);
+ __ andl(dividend, Immediate(divisor - 1));
+ __ negl(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ j(not_zero, &done, Label::kNear);
+ DeoptimizeIf(no_condition, instr->environment());
+ } else {
+ __ jmp(&done, Label::kNear);
+ }
+ __ bind(&positive_dividend);
+ __ andl(dividend, Immediate(divisor - 1));
+ __ bind(&done);
+ } else {
+ Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+ Register left_reg = ToRegister(instr->InputAt(0));
+ Register right_reg = ToRegister(instr->InputAt(1));
+ Register result_reg = ToRegister(instr->result());
+
+ ASSERT(left_reg.is(rax));
+ ASSERT(result_reg.is(rdx));
+ ASSERT(!right_reg.is(rax));
+ ASSERT(!right_reg.is(rdx));
+
+ // Check for x % 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ __ testl(left_reg, left_reg);
+ __ j(zero, &remainder_eq_dividend, Label::kNear);
+ __ j(sign, &slow, Label::kNear);
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
+ __ j(not_sign, &both_positive, Label::kNear);
+ // The sign of the divisor doesn't matter.
+ __ neg(right_reg);
- // Sign extend eax to edx. (We are using only the low 32 bits of the values.)
- __ cdq();
+ __ bind(&both_positive);
+ // If the dividend is smaller than the nonnegative
+ // divisor, the dividend is the result.
+ __ cmpl(left_reg, right_reg);
+ __ j(less, &remainder_eq_dividend, Label::kNear);
+
+ // Check if the divisor is a PowerOfTwo integer.
+ Register scratch = ToRegister(instr->TempAt(0));
+ __ movl(scratch, right_reg);
+ __ subl(scratch, Immediate(1));
+ __ testl(scratch, right_reg);
+ __ j(not_zero, &do_subtraction, Label::kNear);
+ __ andl(left_reg, scratch);
+ __ jmp(&remainder_eq_dividend, Label::kNear);
+
+ __ bind(&do_subtraction);
+ const int kUnfolds = 3;
+ // Try a few subtractions of the dividend.
+ __ movl(scratch, left_reg);
+ for (int i = 0; i < kUnfolds; i++) {
+ // Reduce the dividend by the divisor.
+ __ subl(left_reg, right_reg);
+ // Check if the dividend is less than the divisor.
+ __ cmpl(left_reg, right_reg);
+ __ j(less, &remainder_eq_dividend, Label::kNear);
+ }
+ __ movl(left_reg, scratch);
+
+ // Slow case, using idiv instruction.
+ __ bind(&slow);
+ // Sign extend eax to edx.
+ // (We are using only the low 32 bits of the values.)
+ __ cdq();
+
+ // Check for (0 % -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ Label done;
+ __ testl(left_reg, left_reg);
+ __ j(not_sign, &positive_left, Label::kNear);
+ __ idivl(right_reg);
+
+ // Test the remainder for 0, because then the result would be -0.
+ __ testl(result_reg, result_reg);
+ __ j(not_zero, &done, Label::kNear);
+
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&positive_left);
+ __ idivl(right_reg);
+ __ bind(&done);
+ } else {
+ __ idivl(right_reg);
+ }
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&remainder_eq_dividend);
+ __ movl(result_reg, left_reg);
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel positive_left;
- NearLabel done;
- __ testl(rax, rax);
- __ j(not_sign, &positive_left);
- __ idivl(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
- __ testl(rdx, rdx);
- __ j(not_zero, &done);
-
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&positive_left);
- __ idivl(right_reg);
__ bind(&done);
- } else {
- __ idivl(right_reg);
}
}
@@ -832,9 +922,9 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel left_not_zero;
+ Label left_not_zero;
__ testl(left_reg, left_reg);
- __ j(not_zero, &left_not_zero);
+ __ j(not_zero, &left_not_zero, Label::kNear);
__ testl(right_reg, right_reg);
DeoptimizeIf(sign, instr->environment());
__ bind(&left_not_zero);
@@ -842,9 +932,9 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for (-kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- NearLabel left_not_min_int;
+ Label left_not_min_int;
__ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int);
+ __ j(not_zero, &left_not_min_int, Label::kNear);
__ cmpl(right_reg, Immediate(-1));
DeoptimizeIf(zero, instr->environment());
__ bind(&left_not_min_int);
@@ -868,24 +958,64 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ movl(kScratchRegister, left);
}
+ bool can_overflow =
+ instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (right->IsConstantOperand()) {
int right_value = ToInteger32(LConstantOperand::cast(right));
- __ imull(left, left, Immediate(right_value));
+ if (right_value == -1) {
+ __ negl(left);
+ } else if (right_value == 0) {
+ __ xorl(left, left);
+ } else if (right_value == 2) {
+ __ addl(left, left);
+ } else if (!can_overflow) {
+ // If the multiplication is known to not overflow, we
+ // can use operations that don't set the overflow flag
+ // correctly.
+ switch (right_value) {
+ case 1:
+ // Do nothing.
+ break;
+ case 3:
+ __ leal(left, Operand(left, left, times_2, 0));
+ break;
+ case 4:
+ __ shll(left, Immediate(2));
+ break;
+ case 5:
+ __ leal(left, Operand(left, left, times_4, 0));
+ break;
+ case 8:
+ __ shll(left, Immediate(3));
+ break;
+ case 9:
+ __ leal(left, Operand(left, left, times_8, 0));
+ break;
+ case 16:
+ __ shll(left, Immediate(4));
+ break;
+ default:
+ __ imull(left, left, Immediate(right_value));
+ break;
+ }
+ } else {
+ __ imull(left, left, Immediate(right_value));
+ }
} else if (right->IsStackSlot()) {
__ imull(left, ToOperand(right));
} else {
__ imull(left, ToRegister(right));
}
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ if (can_overflow) {
DeoptimizeIf(overflow, instr->environment());
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Bail out if the result is supposed to be negative zero.
- NearLabel done;
+ Label done;
__ testl(left, left);
- __ j(not_zero, &done);
+ __ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
DeoptimizeIf(no_condition, instr->environment());
@@ -1038,7 +1168,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
ASSERT(instr->result()->IsRegister());
- __ movl(ToRegister(instr->result()), Immediate(instr->value()));
+ __ Set(ToRegister(instr->result()), instr->value());
}
@@ -1050,7 +1180,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
// Use xor to produce +0.0 in a fast and compact way, but avoid to
// do so if the constant is -0.0.
if (int_val == 0) {
- __ xorpd(res, res);
+ __ xorps(res, res);
} else {
Register tmp = ToRegister(instr->TempAt(0));
__ Set(tmp, int_val);
@@ -1079,10 +1209,24 @@ void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
}
-void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
+void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->InputAt(0));
- __ movq(result, FieldOperand(array, PixelArray::kLengthOffset));
+ __ movl(result, FieldOperand(array, ExternalPixelArray::kLengthOffset));
+}
+
+
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+
+ // Load map into |result|.
+ __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte.
+ __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(result, Immediate(Map::kElementsKindMask));
+ __ shr(result, Immediate(Map::kElementsKindShift));
}
@@ -1090,13 +1234,13 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
- NearLabel done;
+ Label done;
// If the object is a smi return the object.
- __ JumpIfSmi(input, &done);
+ __ JumpIfSmi(input, &done, Label::kNear);
// If the object is not a value type, return the object.
__ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
__ movq(result, FieldOperand(input, JSValue::kValueOffset));
__ bind(&done);
@@ -1142,25 +1286,32 @@ void LCodeGen::DoAddI(LAddI* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ XMMRegister left = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister right = ToDoubleRegister(instr->InputAt(1));
+ XMMRegister result = ToDoubleRegister(instr->result());
// All operations except MOD are computed in-place.
- ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
+ ASSERT(instr->op() == Token::MOD || left.is(result));
switch (instr->op()) {
case Token::ADD:
- __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ addsd(left, right);
break;
case Token::SUB:
- __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ subsd(left, right);
break;
case Token::MUL:
- __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ mulsd(left, right);
break;
case Token::DIV:
- __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ divsd(left, right);
break;
case Token::MOD:
- Abort("Unimplemented: %s", "DoArithmeticD MOD");
+ __ PrepareCallCFunction(2);
+ __ movaps(xmm0, left);
+ ASSERT(right.is(xmm1));
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movaps(result, xmm0);
break;
default:
UNREACHABLE();
@@ -1174,7 +1325,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->InputAt(1)).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
- TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -1219,7 +1370,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(true_block, false_block, not_zero);
} else if (r.IsDouble()) {
XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(xmm0, xmm0);
+ __ xorps(xmm0, xmm0);
__ ucomisd(reg, xmm0);
EmitBranch(true_block, false_block, not_equal);
} else {
@@ -1227,7 +1378,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
HType type = instr->hydrogen()->type();
if (type.IsBoolean()) {
- __ Cmp(reg, Factory::true_value());
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
EmitBranch(true_block, false_block, equal);
} else if (type.IsSmi()) {
__ SmiCompare(reg, Smi::FromInt(0));
@@ -1242,19 +1393,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(equal, true_label);
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ j(equal, false_label);
- __ SmiCompare(reg, Smi::FromInt(0));
+ __ Cmp(reg, Smi::FromInt(0));
__ j(equal, false_label);
__ JumpIfSmi(reg, true_label);
// Test for double values. Plus/minus zero and NaN are false.
- NearLabel call_stub;
+ Label call_stub;
__ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_stub);
+ __ j(not_equal, &call_stub, Label::kNear);
// HeapNumber => false iff +0, -0, or NaN. These three cases set the
// zero flag when compared to zero using ucomisd.
- __ xorpd(xmm0, xmm0);
+ __ xorps(xmm0, xmm0);
__ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, false_label);
__ jmp(true_label);
@@ -1262,7 +1413,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
// The conversion stub doesn't cause garbage collections so it's
// safe to not record a safepoint after the call.
__ bind(&call_stub);
- ToBooleanStub stub;
+ ToBooleanStub stub(rax);
__ Pushad();
__ push(reg);
__ CallStub(&stub);
@@ -1274,44 +1425,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+void LCodeGen::EmitGoto(int block) {
block = chunk_->LookupDestination(block);
int next_block = GetNextEmittedBlock(current_block_);
if (block != next_block) {
- // Perform stack overflow check if this goto needs it before jumping.
- if (deferred_stack_check != NULL) {
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, chunk_->GetAssemblyLabel(block));
- __ jmp(deferred_stack_check->entry());
- deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
- } else {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
+ __ jmp(chunk_->GetAssemblyLabel(block));
}
}
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
-}
-
-
void LCodeGen::DoGoto(LGoto* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- private:
- LGoto* instr_;
- };
-
- DeferredStackCheck* deferred = NULL;
- if (instr->include_stack_check()) {
- deferred = new DeferredStackCheck(this, instr);
- }
- EmitGoto(instr->block_id(), deferred);
+ EmitGoto(instr->block_id());
}
@@ -1364,20 +1488,20 @@ void LCodeGen::DoCmpID(LCmpID* instr) {
LOperand* right = instr->InputAt(1);
LOperand* result = instr->result();
- NearLabel unordered;
+ Label unordered;
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the unordered case, which produces a false value.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, &unordered);
+ __ j(parity_even, &unordered, Label::kNear);
} else {
EmitCmpI(left, right);
}
- NearLabel done;
+ Label done;
Condition cc = TokenToCondition(instr->op(), instr->is_double());
__ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
- __ j(cc, &done);
+ __ j(cc, &done, Label::kNear);
__ bind(&unordered);
__ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
@@ -1405,23 +1529,23 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
}
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
- NearLabel different, done;
+ Label different, done;
__ cmpq(left, right);
- __ j(not_equal, &different);
+ __ j(not_equal, &different, Label::kNear);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&different);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1432,6 +1556,29 @@ void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
}
+void LCodeGen::DoCmpConstantEq(LCmpConstantEq* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ Label done;
+ __ cmpq(left, Immediate(instr->hydrogen()->right()));
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ j(equal, &done, Label::kNear);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ cmpq(left, Immediate(instr->hydrogen()->right()));
+ EmitBranch(true_block, false_block, equal);
+}
+
+
void LCodeGen::DoIsNull(LIsNull* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1446,28 +1593,29 @@ void LCodeGen::DoIsNull(LIsNull* instr) {
__ CompareRoot(reg, Heap::kNullValueRootIndex);
if (instr->is_strict()) {
+ ASSERT(Heap::kTrueValueRootIndex >= 0);
__ movl(result, Immediate(Heap::kTrueValueRootIndex));
- NearLabel load;
- __ j(equal, &load);
- __ movl(result, Immediate(Heap::kFalseValueRootIndex));
+ Label load;
+ __ j(equal, &load, Label::kNear);
+ __ Set(result, Heap::kFalseValueRootIndex);
__ bind(&load);
- __ movq(result, Operand(kRootRegister, result, times_pointer_size, 0));
+ __ LoadRootIndexed(result, result, 0);
} else {
- NearLabel true_value, false_value, done;
- __ j(equal, &true_value);
+ Label false_value, true_value, done;
+ __ j(equal, &true_value, Label::kNear);
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, &true_value);
- __ JumpIfSmi(reg, &false_value);
+ __ j(equal, &true_value, Label::kNear);
+ __ JumpIfSmi(reg, &false_value, Label::kNear);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = result;
__ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ testb(FieldOperand(scratch, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &true_value);
+ __ j(not_zero, &true_value, Label::kNear);
__ bind(&false_value);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&true_value);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
@@ -1491,14 +1639,14 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
- __ Cmp(reg, Factory::null_value());
+ __ CompareRoot(reg, Heap::kNullValueRootIndex);
if (instr->is_strict()) {
EmitBranch(true_block, false_block, equal);
} else {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
- __ Cmp(reg, Factory::undefined_value());
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label);
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
@@ -1530,9 +1678,9 @@ Condition LCodeGen::EmitIsObject(Register input,
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, is_not_object);
- __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
+ __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
return below_equal;
}
@@ -1582,8 +1730,7 @@ void LCodeGen::DoIsSmi(LIsSmi* instr) {
}
// result is zero if input is a smi, and one otherwise.
ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
- __ movq(result, Operand(kRootRegister, result, times_pointer_size,
- Heap::kTrueValueRootIndex * kPointerSize));
+ __ LoadRootIndexed(result, result, Heap::kTrueValueRootIndex);
}
@@ -1603,6 +1750,40 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
+void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ Label false_label, done;
+ __ JumpIfSmi(input, &false_label);
+ __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(result, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &false_label);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&false_label);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(temp, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ EmitBranch(true_block, false_block, not_zero);
+}
+
+
static InstanceType TestType(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
@@ -1629,12 +1810,13 @@ void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ testl(input, Immediate(kSmiTagMask));
- NearLabel done, is_false;
+ Label done, is_false;
__ j(zero, &is_false);
__ CmpObjectType(input, TestType(instr->hydrogen()), result);
- __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
+ __ j(NegateCondition(BranchCondition(instr->hydrogen())),
+ &is_false, Label::kNear);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
@@ -1656,6 +1838,20 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
}
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(input);
+ }
+
+ __ movl(result, FieldOperand(input, String::kHashFieldOffset));
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ __ IndexFromHash(result, result);
+}
+
+
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1664,8 +1860,8 @@ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ testl(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- NearLabel done;
- __ j(not_zero, &done);
+ Label done;
+ __ j(zero, &done, Label::kNear);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
@@ -1680,7 +1876,7 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
__ testl(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, not_equal);
+ EmitBranch(true_block, false_block, equal);
}
@@ -1692,26 +1888,27 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
Register input,
Register temp) {
__ JumpIfSmi(input, is_false);
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
// Map is now in temp.
// Functions have class 'Function'.
- __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+ __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ j(equal, is_true);
+ __ j(above_equal, is_true);
} else {
- __ j(equal, is_false);
+ __ j(above_equal, is_false);
}
// Check if the constructor in the map is a function.
__ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
@@ -1744,7 +1941,7 @@ void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
ASSERT(input.is(result));
Register temp = ToRegister(instr->TempAt(0));
Handle<String> class_name = instr->hydrogen()->class_name();
- NearLabel done;
+ Label done;
Label is_true, is_false;
EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
@@ -1753,7 +1950,7 @@ void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
__ bind(&is_true);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
@@ -1793,30 +1990,17 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
__ push(ToRegister(instr->InputAt(0)));
__ push(ToRegister(instr->InputAt(1)));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- NearLabel true_value, done;
+ Label true_value, done;
__ testq(rax, rax);
- __ j(zero, &true_value);
+ __ j(zero, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&true_value);
__ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
__ bind(&done);
}
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->InputAt(0)));
- __ push(ToRegister(instr->InputAt(1)));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ testq(rax, rax);
- EmitBranch(true_block, false_block, zero);
-}
-
-
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
public:
@@ -1824,26 +2008,53 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredLInstanceOfKnownGlobal(instr_);
+ codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
}
+ Label* map_check() { return &map_check_; }
+
private:
LInstanceOfKnownGlobal* instr_;
+ Label map_check_;
};
DeferredInstanceOfKnownGlobal* deferred;
deferred = new DeferredInstanceOfKnownGlobal(this, instr);
- Label false_result;
+ Label done, false_result;
Register object = ToRegister(instr->InputAt(0));
// A Smi is not an instance of anything.
__ JumpIfSmi(object, &false_result);
- // Null is not an instance of anything.
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ Label cache_miss;
+ // Use a temp register to avoid memory operands with variable lengths.
+ Register map = ToRegister(instr->TempAt(0));
+ __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ __ movq(kScratchRegister, factory()->the_hole_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(map, kScratchRegister); // Patched to cached map.
+ __ j(not_equal, &cache_miss, Label::kNear);
+ // Patched to load either true or false.
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
+#ifdef DEBUG
+ // Check that the code size between patch label and patch sites is invariant.
+ Label end_of_patched_code;
+ __ bind(&end_of_patched_code);
+ ASSERT(true);
+#endif
+ __ jmp(&done);
+
+ // The inlined call site cache did not match. Check for null and string
+ // before calling the deferred code.
+ __ bind(&cache_miss); // Null is not an instance of anything.
__ CompareRoot(object, Heap::kNullValueRootIndex);
- __ j(equal, &false_result);
+ __ j(equal, &false_result, Label::kNear);
// String values are not instances of anything.
__ JumpIfNotString(object, kScratchRegister, deferred->entry());
@@ -1852,23 +2063,40 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ bind(deferred->exit());
+ __ bind(&done);
}
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
{
PushSafepointRegistersScope scope(this);
-
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
+ InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
+ InstanceofStub stub(flags);
__ push(ToRegister(instr->InputAt(0)));
__ Push(instr->function());
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+ Register temp = ToRegister(instr->TempAt(0));
+ static const int kAdditionalDelta = 10;
+ int delta =
+ masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
+ ASSERT(delta >= 0);
+ __ push_imm32(delta);
+
+ // We are pushing three values on the stack but recording a
+ // safepoint with two arguments because stub is going to
+ // remove the third argument from the stack before jumping
+ // to instanceof builtin on the slow path.
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS,
2);
+ ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
+ // Move result to a register that survives the end of the
+ // PushSafepointRegisterScope.
__ movq(kScratchRegister, rax);
}
__ testq(kScratchRegister, kScratchRegister);
@@ -1893,36 +2121,17 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
- NearLabel true_value, done;
+ Label true_value, done;
__ testq(rax, rax);
- __ j(condition, &true_value);
+ __ j(condition, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&true_value);
__ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
__ bind(&done);
}
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- // The compare stub expects compare condition and the input operands
- // reversed for GT and LTE.
- Condition condition = TokenToCondition(op, false);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ testq(rax, rax);
- EmitBranch(true_block, false_block, condition);
-}
-
-
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace) {
// Preserve the return value on the stack and rely on the runtime
@@ -1932,11 +2141,11 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ movq(rsp, rbp);
__ pop(rbp);
- __ Ret((ParameterCount() + 1) * kPointerSize, rcx);
+ __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
}
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
if (result.is(rax)) {
__ load_rax(instr->hydrogen()->cell().location(),
@@ -1952,7 +2161,19 @@ void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
}
-void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->global_object()).is(rax));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ __ Move(rcx, instr->name());
+ RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+ RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, mode, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
ASSERT(!value.is(temp));
@@ -1975,6 +2196,18 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
}
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->global_object()).is(rdx));
+ ASSERT(ToRegister(instr->value()).is(rax));
+
+ __ Move(rcx, instr->name());
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -1988,7 +2221,8 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ movq(ContextOperand(context, instr->slot_index()), value);
if (instr->needs_write_barrier()) {
int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWrite(context, offset, value, kScratchRegister);
+ Register scratch = ToRegister(instr->TempAt(0));
+ __ RecordWrite(context, offset, value, scratch);
}
}
@@ -2005,12 +2239,82 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name) {
+ LookupResult lookup;
+ type->LookupInDescriptors(NULL, *name, &lookup);
+ ASSERT(lookup.IsProperty() &&
+ (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+ if (lookup.type() == FIELD) {
+ int index = lookup.GetLocalFieldIndexFromMap(*type);
+ int offset = index * kPointerSize;
+ if (index < 0) {
+ // Negative property indices are in-object properties, indexed
+ // from the end of the fixed part of the object.
+ __ movq(result, FieldOperand(object, offset + type->instance_size()));
+ } else {
+ // Non-negative property indices are in the properties array.
+ __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+ }
+ } else {
+ Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+ LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ }
+}
+
+
+void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+
+ int map_count = instr->hydrogen()->types()->length();
+ Handle<String> name = instr->hydrogen()->name();
+
+ if (map_count == 0) {
+ ASSERT(instr->hydrogen()->need_generic());
+ __ Move(rcx, instr->hydrogen()->name());
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ Label done;
+ for (int i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
+ Label next;
+ __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ __ j(not_equal, &next, Label::kNear);
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ jmp(&done, Label::kNear);
+ __ bind(&next);
+ }
+ Handle<Map> map = instr->hydrogen()->types()->last();
+ __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ if (instr->hydrogen()->need_generic()) {
+ Label generic;
+ __ j(not_equal, &generic, Label::kNear);
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ jmp(&done, Label::kNear);
+ __ bind(&generic);
+ __ Move(rcx, instr->hydrogen()->name());
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ DeoptimizeIf(not_equal, instr->environment());
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ }
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rcx, instr->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2024,10 +2328,10 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
DeoptimizeIf(not_equal, instr->environment());
// Check whether the function has an instance prototype.
- NearLabel non_instance;
+ Label non_instance;
__ testb(FieldOperand(result, Map::kBitFieldOffset),
Immediate(1 << Map::kHasNonInstancePrototype));
- __ j(not_zero, &non_instance);
+ __ j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
__ movq(result,
@@ -2038,13 +2342,13 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
DeoptimizeIf(equal, instr->environment());
// If the function does not have an initial map, we're done.
- NearLabel done;
+ Label done;
__ CmpObjectType(result, MAP_TYPE, kScratchRegister);
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
__ movq(result, FieldOperand(result, Map::kPrototypeOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in the function's map.
@@ -2061,26 +2365,40 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register input = ToRegister(instr->InputAt(0));
__ movq(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
- NearLabel done;
- __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
- Factory::fixed_array_map());
- __ j(equal, &done);
- __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
- Factory::pixel_array_map());
- __ j(equal, &done);
- __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
- Factory::fixed_cow_array_map());
- __ Check(equal, "Check for fast elements failed.");
+ Label done, ok, fail;
+ __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(equal, &done, Label::kNear);
+ __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
+ Heap::kFixedCOWArrayMapRootIndex);
+ __ j(equal, &done, Label::kNear);
+ Register temp((result.is(rax)) ? rbx : rax);
+ __ push(temp);
+ __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
+ __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
+ __ and_(temp, Immediate(Map::kElementsKindMask));
+ __ shr(temp, Immediate(Map::kElementsKindShift));
+ __ cmpl(temp, Immediate(JSObject::FAST_ELEMENTS));
+ __ j(equal, &ok, Label::kNear);
+ __ cmpl(temp, Immediate(JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ j(less, &fail, Label::kNear);
+ __ cmpl(temp, Immediate(JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ j(less_equal, &ok, Label::kNear);
+ __ bind(&fail);
+ __ Abort("Check for fast or external elements failed");
+ __ bind(&ok);
+ __ pop(temp);
__ bind(&done);
}
}
-void LCodeGen::DoLoadPixelArrayExternalPointer(
- LLoadPixelArrayExternalPointer* instr) {
+void LCodeGen::DoLoadExternalArrayPointer(
+ LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->InputAt(0));
- __ movq(result, FieldOperand(input, PixelArray::kExternalPointerOffset));
+ __ movq(result, FieldOperand(input,
+ ExternalPixelArray::kExternalPointerOffset));
}
@@ -2115,19 +2433,80 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
FixedArray::kHeaderSize));
// Check for the hole value.
- __ Cmp(result, Factory::the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
}
-void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
- Register external_elements = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- Register result = ToRegister(instr->result());
- ASSERT(result.is(external_elements));
+Operand LCodeGen::BuildExternalArrayOperand(
+ LOperand* external_pointer,
+ LOperand* key,
+ JSObject::ElementsKind elements_kind) {
+ Register external_pointer_reg = ToRegister(external_pointer);
+ int shift_size = ElementsKindToShiftSize(elements_kind);
+ if (key->IsConstantOperand()) {
+ int constant_value = ToInteger32(LConstantOperand::cast(key));
+ if (constant_value & 0xF0000000) {
+ Abort("array index constant value too big");
+ }
+ return Operand(external_pointer_reg, constant_value * (1 << shift_size));
+ } else {
+ ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+ return Operand(external_pointer_reg, ToRegister(key), scale_factor, 0);
+ }
+}
- // Load the result.
- __ movzxbq(result, Operand(external_elements, key, times_1, 0));
+
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+ LLoadKeyedSpecializedArrayElement* instr) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
+ instr->key(), elements_kind));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movsd(ToDoubleRegister(instr->result()), operand);
+ } else {
+ Register result(ToRegister(instr->result()));
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ __ movsxbq(result, operand);
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ __ movzxbq(result, operand);
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ __ movsxwq(result, operand);
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movzxwq(result, operand);
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ __ movsxlq(result, operand);
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ movl(result, operand);
+ __ testl(result, result);
+ // TODO(danno): we could be more clever here, perhaps having a special
+ // version of the stub that detects if the overflow case actually
+ // happens, and generate code that returns a double rather than int.
+ DeoptimizeIf(negative, instr->environment());
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
}
@@ -2135,7 +2514,7 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rax));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2144,15 +2523,15 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
// Check for arguments adapter frame.
- NearLabel done, adapted;
+ Label done, adapted;
__ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(result, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted);
+ __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adapted, Label::kNear);
// No arguments adaptor frame.
__ movq(result, rbp);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Arguments adaptor frame present.
__ bind(&adapted);
@@ -2167,7 +2546,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
Register result = ToRegister(instr->result());
- NearLabel done;
+ Label done;
// If no arguments adaptor frame the number of arguments is fixed.
if (instr->InputAt(0)->IsRegister()) {
@@ -2175,14 +2554,14 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
} else {
__ cmpq(rbp, ToOperand(instr->InputAt(0)));
}
- __ movq(result, Immediate(scope()->num_parameters()));
- __ j(equal, &done);
+ __ movl(result, Immediate(scope()->num_parameters()));
+ __ j(equal, &done, Label::kNear);
// Arguments adaptor frame present. Get argument length from there.
__ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(result, Operand(result,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger32(result, result);
+ __ SmiToInteger32(result,
+ Operand(result,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
// Argument length is in result register.
__ bind(&done);
@@ -2198,27 +2577,46 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
ASSERT(function.is(rdi)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(rax));
- // If the receiver is null or undefined, we have to pass the global object
- // as a receiver.
- NearLabel global_object, receiver_ok;
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
+ Label global_object, receiver_ok;
+
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ movq(kScratchRegister,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &receiver_ok, Label::kNear);
+
+ // Do not transform the receiver to object for builtins.
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &receiver_ok, Label::kNear);
+
+ // Normal function. Replace undefined or null with global receiver.
__ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ j(equal, &global_object);
+ __ j(equal, &global_object, Label::kNear);
__ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
- __ j(equal, &global_object);
+ __ j(equal, &global_object, Label::kNear);
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
DeoptimizeIf(is_smi, instr->environment());
- __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok);
+ __ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
// TODO(kmillikin): We have a hydrogen value for the global object. See
// if it's better to use it than to explicitly fetch it from the context
// here.
- __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+ __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
// Copy the arguments to this function possibly from the
@@ -2232,10 +2630,10 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Loop through the arguments pushing them onto the execution
// stack.
- NearLabel invoke, loop;
+ Label invoke, loop;
// length is a small non-negative integer, due to the test above.
__ testl(length, length);
- __ j(zero, &invoke);
+ __ j(zero, &invoke, Label::kNear);
__ bind(&loop);
__ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
__ decl(length);
@@ -2252,26 +2650,27 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
pointers,
env->deoptimization_index());
v8::internal::ParameterCount actual(rax);
- __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+ __ InvokeFunction(function, actual, CALL_FUNCTION,
+ safepoint_generator, CALL_AS_METHOD);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->InputAt(0);
- if (argument->IsConstantOperand()) {
- EmitPushConstantOperand(argument);
- } else if (argument->IsRegister()) {
- __ push(ToRegister(argument));
- } else {
- ASSERT(!argument->IsDoubleRegister());
- __ push(ToOperand(argument));
- }
+ EmitPushTaggedOperand(argument);
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movq(result, rsi);
}
@@ -2279,8 +2678,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ movq(result,
- Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ movq(result, FieldOperand(result, JSFunction::kContextOffset));
+ Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
@@ -2291,18 +2689,19 @@ void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+ Register global = ToRegister(instr->global());
Register result = ToRegister(instr->result());
- __ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
+ __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
}
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int arity,
- LInstruction* instr) {
+ LInstruction* instr,
+ CallKind call_kind) {
// Change context if needed.
bool change_context =
- (graph()->info()->closure()->context() != function->context()) ||
+ (info()->closure()->context() != function->context()) ||
scope()->contains_with() ||
(scope()->num_heap_slots() > 0);
if (change_context) {
@@ -2319,7 +2718,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
RecordPosition(pointers->position());
// Invoke function.
- if (*function == *graph()->info()->closure()) {
+ __ SetCallKind(rcx, call_kind);
+ if (*function == *info()->closure()) {
__ CallSelf();
} else {
__ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
@@ -2336,7 +2736,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rdi, instr->function());
- CallKnownFunction(instr->function(), instr->arity(), instr);
+ CallKnownFunction(instr->function(),
+ instr->arity(),
+ instr,
+ CALL_AS_METHOD);
}
@@ -2423,7 +2826,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(scratch, scratch);
+ __ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
__ andpd(input_reg, scratch);
} else if (r.IsInteger32()) {
@@ -2434,7 +2837,9 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
Register input_reg = ToRegister(instr->InputAt(0));
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiToInteger32(input_reg, input_reg);
EmitIntegerMathAbs(instr);
+ __ Integer32ToSmi(input_reg, input_reg);
__ bind(deferred->exit());
}
}
@@ -2444,21 +2849,36 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(below_equal, instr->environment());
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatures::Scope scope(SSE4_1);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Deoptimize if minus zero.
+ __ movq(output_reg, input_reg);
+ __ subq(output_reg, Immediate(1));
+ DeoptimizeIf(overflow, instr->environment());
+ }
+ __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
+ __ cvttsd2si(output_reg, xmm_scratch);
+ __ cmpl(output_reg, Immediate(0x80000000));
+ DeoptimizeIf(equal, instr->environment());
} else {
- DeoptimizeIf(below, instr->environment());
- }
+ __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
+ __ ucomisd(input_reg, xmm_scratch);
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, input_reg);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(below_equal, instr->environment());
+ } else {
+ DeoptimizeIf(below, instr->environment());
+ }
- // Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ // Use truncating instruction (OK because input is positive).
+ __ cvttsd2si(output_reg, input_reg);
+
+ // Overflow is signalled with minint.
+ __ cmpl(output_reg, Immediate(0x80000000));
+ DeoptimizeIf(equal, instr->environment());
+ }
}
@@ -2467,33 +2887,45 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ Label done;
// xmm_scratch = 0.5
__ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
__ movq(xmm_scratch, kScratchRegister);
-
+ Label below_half;
+ __ ucomisd(xmm_scratch, input_reg);
+ // If input_reg is NaN, this doesn't jump.
+ __ j(above, &below_half, Label::kNear);
// input = input + 0.5
+ // This addition might give a result that isn't the correct for
+ // rounding, due to loss of precision, but only for a number that's
+ // so big that the conversion below will overflow anyway.
__ addsd(input_reg, xmm_scratch);
+ // Compute Math.floor(input).
+ // Use truncating instruction (OK because input is positive).
+ __ cvttsd2si(output_reg, input_reg);
+ // Overflow is signalled with minint.
+ __ cmpl(output_reg, Immediate(0x80000000));
+ DeoptimizeIf(equal, instr->environment());
+ __ jmp(&done);
- // We need to return -0 for the input range [-0.5, 0[, otherwise
- // compute Math.floor(value + 0.5).
+ __ bind(&below_half);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below_equal, instr->environment());
+ // Bailout if negative (including -0).
+ __ movq(output_reg, input_reg);
+ __ testq(output_reg, output_reg);
+ DeoptimizeIf(negative, instr->environment());
} else {
- // If we don't need to bailout on -0, we check only bailout
- // on negative inputs.
- __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
+ // Bailout if below -0.5, otherwise round to (positive) zero, even
+ // if negative.
+ // xmm_scrach = -0.5
+ __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
+ __ movq(xmm_scratch, kScratchRegister);
__ ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(below, instr->environment());
}
+ __ xorl(output_reg, output_reg);
- // Compute Math.floor(value + 0.5).
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, input_reg);
-
- // Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ __ bind(&done);
}
@@ -2508,7 +2940,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ xorpd(xmm_scratch, xmm_scratch);
+ __ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
}
@@ -2524,23 +2956,24 @@ void LCodeGen::DoPower(LPower* instr) {
if (exponent_type.IsDouble()) {
__ PrepareCallCFunction(2);
// Move arguments to correct registers
- __ movsd(xmm0, left_reg);
+ __ movaps(xmm0, left_reg);
ASSERT(ToDoubleRegister(right).is(xmm1));
- __ CallCFunction(ExternalReference::power_double_double_function(), 2);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 2);
} else if (exponent_type.IsInteger32()) {
__ PrepareCallCFunction(2);
// Move arguments to correct registers: xmm0 and edi (not rdi).
// On Windows, the registers are xmm0 and edx.
- __ movsd(xmm0, left_reg);
+ __ movaps(xmm0, left_reg);
#ifdef _WIN64
ASSERT(ToRegister(right).is(rdx));
#else
ASSERT(ToRegister(right).is(rdi));
#endif
- __ CallCFunction(ExternalReference::power_double_int_function(), 2);
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(isolate()), 2);
} else {
ASSERT(exponent_type.IsTagged());
- CpuFeatures::Scope scope(SSE2);
Register right_reg = ToRegister(right);
Label non_smi, call;
@@ -2557,12 +2990,15 @@ void LCodeGen::DoPower(LPower* instr) {
__ bind(&call);
__ PrepareCallCFunction(2);
// Move arguments to correct registers xmm0 and xmm1.
- __ movsd(xmm0, left_reg);
+ __ movaps(xmm0, left_reg);
// Right argument is already in xmm1.
- __ CallCFunction(ExternalReference::power_double_double_function(), 2);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 2);
}
// Return value is in xmm0.
- __ movsd(result_reg, xmm0);
+ __ movaps(result_reg, xmm0);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -2576,7 +3012,7 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -2584,7 +3020,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -2623,12 +3059,28 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(rdi));
+ ASSERT(instr->HasPointerMap());
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator generator(this, pointers, env->deoptimization_index());
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+}
+
+
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
+ arity, NOT_IN_LOOP);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -2638,9 +3090,11 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
__ Move(rcx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, mode, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -2649,7 +3103,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ Drop(1);
@@ -2659,9 +3113,11 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
__ Move(rcx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+ CallCode(ic, mode, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -2669,7 +3125,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rdi, instr->target());
- CallKnownFunction(instr->target(), instr->arity(), instr);
+ CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -2677,7 +3133,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
- Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
__ Set(rax, instr->arity());
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -2723,28 +3179,50 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic(Builtins::builtin(
- info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- Register value = ToRegister(instr->value());
-
- { // Clamp the value to [0..255].
- NearLabel done;
- __ testl(value, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, value); // 1 if negative, 0 if positive.
- __ decb(value); // 0 if negative, 255 if positive.
- __ bind(&done);
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+ LStoreKeyedSpecializedArrayElement* instr) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
+ instr->key(), elements_kind));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ XMMRegister value(ToDoubleRegister(instr->value()));
+ __ cvtsd2ss(value, value);
+ __ movss(operand, value);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movsd(operand, ToDoubleRegister(instr->value()));
+ } else {
+ Register value(ToRegister(instr->value()));
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ movb(operand, value);
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movw(operand, value);
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ movl(operand, value);
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
}
-
- __ movb(Operand(external_pointer, key, times_1, 0), value);
}
@@ -2794,13 +3272,21 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax));
- Handle<Code> ic(Builtins::builtin(
- info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ EmitPushTaggedOperand(instr->left());
+ EmitPushTaggedOperand(instr->right());
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt: public LDeferredCode {
public:
@@ -2835,7 +3321,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
- NearLabel flat_string, ascii_string, done;
+ Label flat_string, ascii_string, done;
// Fetch the instance type of the receiver into result register.
__ movq(result, FieldOperand(string, HeapObject::kMapOffset));
@@ -2844,7 +3330,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
// We need special handling for non-sequential strings.
STATIC_ASSERT(kSeqStringTag == 0);
__ testb(result, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
+ __ j(zero, &flat_string, Label::kNear);
// Handle cons strings and go to deferred code for the rest.
__ testb(result, Immediate(kIsConsStringMask));
@@ -2871,7 +3357,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
__ bind(&flat_string);
STATIC_ASSERT(kAsciiStringTag != 0);
__ testb(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
+ __ j(not_zero, &ascii_string, Label::kNear);
// Two-byte string.
// Load the two-byte character code into the result register.
@@ -2887,7 +3373,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
times_2,
SeqTwoByteString::kHeaderSize));
}
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// ASCII string.
// Load the byte into the result register.
@@ -2937,6 +3423,53 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
}
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode: public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new DeferredStringCharFromCode(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+ ASSERT(!char_code.is(result));
+
+ __ cmpl(char_code, Immediate(String::kMaxAsciiCharCode));
+ __ j(above, deferred->entry());
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ movq(result, FieldOperand(result,
+ char_code, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+ __ j(equal, deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, 0);
+
+ PushSafepointRegistersScope scope(this);
+ __ Integer32ToSmi(char_code, char_code);
+ __ push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ __ StoreToSafepointRegisterSlot(result, rax);
+}
+
+
void LCodeGen::DoStringLength(LStringLength* instr) {
Register string = ToRegister(instr->string());
Register result = ToRegister(instr->result());
@@ -3029,29 +3562,35 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
+ bool deoptimize_on_undefined,
LEnvironment* env) {
- NearLabel load_smi, heap_number, done;
+ Label load_smi, done;
// Smi check.
- __ JumpIfSmi(input_reg, &load_smi);
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- __ j(equal, &heap_number);
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(not_equal, env);
+ } else {
+ Label heap_number;
+ __ j(equal, &heap_number, Label::kNear);
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, env);
- // Convert undefined to NaN. Compute NaN as 0/0.
- __ xorpd(result_reg, result_reg);
- __ divsd(result_reg, result_reg);
- __ jmp(&done);
+ // Convert undefined to NaN. Compute NaN as 0/0.
+ __ xorps(result_reg, result_reg);
+ __ divsd(result_reg, result_reg);
+ __ jmp(&done, Label::kNear);
+ __ bind(&heap_number);
+ }
// Heap number to XMM conversion.
- __ bind(&heap_number);
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Smi to XMM conversion
__ bind(&load_smi);
@@ -3072,7 +3611,7 @@ class DeferredTaggedToI: public LDeferredCode {
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- NearLabel done, heap_number;
+ Label done, heap_number;
Register input_reg = ToRegister(instr->InputAt(0));
// Heap number map check.
@@ -3080,20 +3619,20 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Heap::kHeapNumberMapRootIndex);
if (instr->truncating()) {
- __ j(equal, &heap_number);
+ __ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(not_equal, instr->environment());
- __ movl(input_reg, Immediate(0));
- __ jmp(&done);
+ __ Set(input_reg, 0);
+ __ jmp(&done, Label::kNear);
__ bind(&heap_number);
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2siq(input_reg, xmm0);
__ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpl(input_reg, kScratchRegister);
+ __ cmpq(input_reg, kScratchRegister);
DeoptimizeIf(equal, instr->environment());
} else {
// Deoptimize if we don't have a heap number.
@@ -3140,7 +3679,9 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
- EmitNumberUntagD(input_reg, result_reg, instr->environment());
+ EmitNumberUntagD(input_reg, result_reg,
+ instr->hydrogen()->deoptimize_on_undefined(),
+ instr->environment());
}
@@ -3158,8 +3699,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
// the JS bitwise operations.
__ cvttsd2siq(result_reg, input_reg);
__ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
- __ cmpl(result_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
+ __ cmpq(result_reg, kScratchRegister);
+ DeoptimizeIf(equal, instr->environment());
} else {
__ cvttsd2si(result_reg, input_reg);
__ cvtlsi2sd(xmm0, result_reg);
@@ -3167,11 +3708,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
DeoptimizeIf(not_equal, instr->environment());
DeoptimizeIf(parity_even, instr->environment()); // NaN.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel done;
+ Label done;
// The integer converted back is equal to the original. We
// only have to test if we got -0 as an input.
__ testl(result_reg, result_reg);
- __ j(not_zero, &done);
+ __ j(not_zero, &done, Label::kNear);
__ movmskpd(result_reg, input_reg);
// Bit 0 contains the sign of the double in input_reg.
// If input was positive, we are ok and return 0, otherwise
@@ -3186,41 +3727,59 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
Condition cc = masm()->CheckSmi(ToRegister(input));
- if (instr->condition() != equal) {
- cc = NegateCondition(cc);
- }
+ DeoptimizeIf(NegateCondition(cc), instr->environment());
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ LOperand* input = instr->InputAt(0);
+ Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(cc, instr->environment());
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->InputAt(0));
- InstanceType first = instr->hydrogen()->first();
- InstanceType last = instr->hydrogen()->last();
__ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
- // If there is only one type in the interval check for equality.
- if (first == last) {
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first;
+ InstanceType last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(first)));
- DeoptimizeIf(not_equal, instr->environment());
- } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
- // String has a dedicated bit in instance type.
- __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(kIsNotStringMask));
- DeoptimizeIf(not_zero, instr->environment());
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ DeoptimizeIf(not_equal, instr->environment());
+ } else {
+ DeoptimizeIf(below, instr->environment());
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(static_cast<int8_t>(last)));
+ DeoptimizeIf(above, instr->environment());
+ }
+ }
} else {
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(first)));
- DeoptimizeIf(below, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(last)));
- DeoptimizeIf(above, instr->environment());
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT(tag == 0 || IsPowerOf2(tag));
+ __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(mask));
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+ } else {
+ __ movzxbl(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+ __ andb(kScratchRegister, Immediate(mask));
+ __ cmpb(kScratchRegister, Immediate(tag));
+ DeoptimizeIf(not_equal, instr->environment());
}
}
}
@@ -3244,10 +3803,61 @@ void LCodeGen::DoCheckMap(LCheckMap* instr) {
}
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ Register temp_reg = ToRegister(instr->TempAt(0));
+ __ ClampDoubleToUint8(value_reg, xmm0, result_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ ASSERT(instr->unclamped()->Equals(instr->result()));
+ Register value_reg = ToRegister(instr->result());
+ __ ClampUint8(value_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ ASSERT(instr->unclamped()->Equals(instr->result()));
+ Register input_reg = ToRegister(instr->unclamped());
+ Register temp_reg = ToRegister(instr->TempAt(0));
+ XMMRegister temp_xmm_reg = ToDoubleRegister(instr->TempAt(1));
+ Label is_smi, done, heap_number;
+
+ __ JumpIfSmi(input_reg, &is_smi);
+
+ // Check for heap number
+ __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ __ Cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, instr->environment());
+ __ movq(input_reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+
+ // Heap number
+ __ bind(&heap_number);
+ __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg, temp_reg);
+ __ jmp(&done, Label::kNear);
+
+ // smi
+ __ bind(&is_smi);
+ __ SmiToInteger32(input_reg, input_reg);
+ __ ClampUint8(input_reg);
+
+ __ bind(&done);
+}
+
+
void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (Heap::InNewSpace(*object)) {
+ if (heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
- Factory::NewJSGlobalPropertyCell(object);
+ factory()->NewJSGlobalPropertyCell(object);
__ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
__ movq(result, Operand(result, 0));
} else {
@@ -3328,8 +3938,15 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
}
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->InputAt(0)).is(rax));
+ __ push(rax);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- NearLabel materialized;
+ Label materialized;
// Registers will be used as follows:
// rdi = JS function.
// rcx = literals array.
@@ -3341,7 +3958,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
instr->hydrogen()->literal_index() * kPointerSize;
__ movq(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &materialized);
+ __ j(not_equal, &materialized, Label::kNear);
// Create regexp literal using runtime function
// Result will be in rax.
@@ -3385,14 +4002,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
- if (shared_info->num_literals() == 0 && !pretenure) {
- FastNewClosureStub stub;
+ if (!pretenure && shared_info->num_literals() == 0) {
+ FastNewClosureStub stub(
+ shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
__ Push(shared_info);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ push(rsi);
__ Push(shared_info);
- __ Push(pretenure ? Factory::true_value() : Factory::false_value());
+ __ PushRoot(pretenure ?
+ Heap::kTrueValueRootIndex :
+ Heap::kFalseValueRootIndex);
CallRuntime(Runtime::kNewClosure, 3, instr);
}
}
@@ -3400,14 +4020,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
LOperand* input = instr->InputAt(0);
- if (input->IsConstantOperand()) {
- __ Push(ToHandle(LConstantOperand::cast(input)));
- } else if (input->IsRegister()) {
- __ push(ToRegister(input));
- } else {
- ASSERT(input->IsStackSlot());
- __ push(ToOperand(input));
- }
+ EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
@@ -3417,7 +4030,7 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
- NearLabel done;
+ Label done;
Condition final_branch_condition = EmitTypeofIs(&true_label,
&false_label,
@@ -3426,7 +4039,7 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
__ j(final_branch_condition, &true_label);
__ bind(&false_label);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&true_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
@@ -3435,19 +4048,14 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
}
-void LCodeGen::EmitPushConstantOperand(LOperand* operand) {
- ASSERT(operand->IsConstantOperand());
- LConstantOperand* const_op = LConstantOperand::cast(operand);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ push(Immediate(static_cast<int32_t>(literal->Number())));
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+ ASSERT(!operand->IsDoubleRegister());
+ if (operand->IsConstantOperand()) {
+ __ Push(ToHandle(LConstantOperand::cast(operand)));
+ } else if (operand->IsRegister()) {
+ __ push(ToRegister(operand));
} else {
- ASSERT(r.IsTagged());
- __ Push(literal);
+ __ push(ToOperand(operand));
}
}
@@ -3473,28 +4081,28 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Register input,
Handle<String> type_name) {
Condition final_branch_condition = no_condition;
- if (type_name->Equals(Heap::number_symbol())) {
+ if (type_name->Equals(heap()->number_symbol())) {
__ JumpIfSmi(input, true_label);
- __ Cmp(FieldOperand(input, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+
final_branch_condition = equal;
- } else if (type_name->Equals(Heap::string_symbol())) {
+ } else if (type_name->Equals(heap()->string_symbol())) {
__ JumpIfSmi(input, false_label);
- __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
+ __ j(above_equal, false_label);
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, false_label);
- __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
- final_branch_condition = below;
+ final_branch_condition = zero;
- } else if (type_name->Equals(Heap::boolean_symbol())) {
+ } else if (type_name->Equals(heap()->boolean_symbol())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ j(equal, true_label);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(Heap::undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_symbol())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label);
__ JumpIfSmi(input, false_label);
@@ -3504,24 +4112,23 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
- } else if (type_name->Equals(Heap::function_symbol())) {
+ } else if (type_name->Equals(heap()->function_symbol())) {
__ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
+ __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
final_branch_condition = above_equal;
- } else if (type_name->Equals(Heap::object_symbol())) {
+ } else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
- __ Cmp(input, Factory::null_value());
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, true_label);
+ __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
+ __ j(below, false_label);
+ __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, false_label);
// Check for undetectable objects => false.
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, false_label);
- // Check for JS objects that are not RegExp or Function => true.
- __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
- __ j(below, false_label);
- __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
- final_branch_condition = below_equal;
+ final_branch_condition = zero;
} else {
final_branch_condition = never;
@@ -3534,15 +4141,14 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
Register result = ToRegister(instr->result());
- NearLabel true_label;
- NearLabel false_label;
- NearLabel done;
+ Label true_label;
+ Label done;
EmitIsConstructCall(result);
- __ j(equal, &true_label);
+ __ j(equal, &true_label, Label::kNear);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&true_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
@@ -3567,16 +4173,16 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
__ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
- NearLabel check_frame_marker;
- __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
+ Label check_frame_marker;
+ __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &check_frame_marker, Label::kNear);
__ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
- __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
+ __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
}
@@ -3594,20 +4200,8 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
LOperand* obj = instr->object();
LOperand* key = instr->key();
- // Push object.
- if (obj->IsRegister()) {
- __ push(ToRegister(obj));
- } else {
- __ push(ToOperand(obj));
- }
- // Push key.
- if (key->IsConstantOperand()) {
- EmitPushConstantOperand(key);
- } else if (key->IsRegister()) {
- __ push(ToRegister(key));
- } else {
- __ push(ToOperand(key));
- }
+ EmitPushTaggedOperand(obj);
+ EmitPushTaggedOperand(key);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
LEnvironment* env = instr->deoptimization_environment();
@@ -3620,19 +4214,64 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
pointers,
env->deoptimization_index());
__ Push(Smi::FromInt(strict_mode_flag()));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoIn(LIn* instr) {
+ LOperand* obj = instr->object();
+ LOperand* key = instr->key();
+ EmitPushTaggedOperand(key);
+ EmitPushTaggedOperand(obj);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ // Create safepoint generator that will also ensure enough space in the
+ // reloc info for patching in deoptimization (since this is invoking a
+ // builtin)
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this);
+ CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
}
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- // Perform stack overflow check.
- NearLabel done;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &done);
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ private:
+ LStackCheck* instr_;
+ };
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &done, Label::kNear);
+ StackCheckStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new DeferredStackCheck(this, instr);
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(below, deferred_stack_check->entry());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ }
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 88832faf7..feacc2c82 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -56,7 +56,7 @@ class LCodeGen BASE_EMBEDDED {
jump_table_(4),
deoptimization_literals_(8),
inlined_function_count_(0),
- scope_(chunk->graph()->info()->scope()),
+ scope_(info->scope()),
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
@@ -67,6 +67,10 @@ class LCodeGen BASE_EMBEDDED {
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info_->isolate(); }
+ Factory* factory() const { return isolate()->factory(); }
+ Heap* heap() const { return isolate()->heap(); }
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
@@ -77,7 +81,6 @@ class LCodeGen BASE_EMBEDDED {
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
-
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@@ -91,12 +94,15 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
+ void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -120,7 +126,7 @@ class LCodeGen BASE_EMBEDDED {
bool is_aborted() const { return status_ == ABORTED; }
int strict_mode_flag() const {
- return info_->is_strict() ? kStrictMode : kNonStrictMode;
+ return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
LChunk* chunk() const { return chunk_; }
@@ -136,8 +142,8 @@ class LCodeGen BASE_EMBEDDED {
Register input,
Register temporary);
- int StackSlotCount() const { return chunk()->spill_slot_count(); }
- int ParameterCount() const { return scope()->num_parameters(); }
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* format, ...);
void Comment(const char* format, ...);
@@ -168,14 +174,14 @@ class LCodeGen BASE_EMBEDDED {
RelocInfo::Mode mode,
LInstruction* instr);
- void CallRuntime(Runtime::Function* function,
+ void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
@@ -188,7 +194,8 @@ class LCodeGen BASE_EMBEDDED {
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
- LInstruction* instr);
+ LInstruction* instr,
+ CallKind call_kind);
void LoadHeapObject(Register result, Handle<HeapObject> object);
@@ -208,6 +215,10 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
+ Operand BuildExternalArrayOperand(
+ LOperand* external_pointer,
+ LOperand* key,
+ JSObject::ElementsKind elements_kind);
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -231,12 +242,18 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
int deoptimization_index);
void RecordPosition(int position);
+ int LastSafepointEnd() {
+ return static_cast<int>(safepoints_.GetPcAfterGap());
+ }
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+ void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(Register input, XMMRegister result, LEnvironment* env);
+ void EmitNumberUntagD(Register input,
+ XMMRegister result,
+ bool deoptimize_on_undefined,
+ LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -255,15 +272,21 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
- // Emits code for pushing a constant operand.
- void EmitPushConstantOperand(LOperand* operand);
+ void EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name);
+
+ // Emits code for pushing either a tagged constant, a (non-double)
+ // register, or a stack slot operand.
+ void EmitPushTaggedOperand(LOperand* operand);
struct JumpTableEntry {
- inline JumpTableEntry(Address address)
- : label_(),
- address_(address) { }
- Label label_;
- Address address_;
+ explicit inline JumpTableEntry(Address entry)
+ : label(),
+ address(entry) { }
+ Label label;
+ Address address;
};
LChunk* const chunk_;
@@ -274,7 +297,7 @@ class LCodeGen BASE_EMBEDDED {
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry*> jump_table_;
+ ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index cedd0256d..c3c617c45 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -214,7 +214,7 @@ void LGapResolver::EmitMove(int index) {
} else if (source->IsDoubleRegister()) {
XMMRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
- __ movsd(cgen_->ToDoubleRegister(destination), src);
+ __ movaps(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
__ movsd(cgen_->ToOperand(destination), src);
@@ -273,9 +273,9 @@ void LGapResolver::EmitSwap(int index) {
// Swap two double registers.
XMMRegister source_reg = cgen_->ToDoubleRegister(source);
XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
- __ movsd(xmm0, source_reg);
- __ movsd(source_reg, destination_reg);
- __ movsd(destination_reg, xmm0);
+ __ movaps(xmm0, source_reg);
+ __ movaps(source_reg, destination_reg);
+ __ movaps(destination_reg, xmm0);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
// Swap a double register and a double stack slot.
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 2f413feb9..42e60c3d9 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -71,22 +71,21 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
#ifdef DEBUG
void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as
- // temporaries and outputs because all registers
- // are blocked by the calling convention.
- // Inputs must use a fixed register.
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
}
- for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
#endif
@@ -114,21 +113,18 @@ void LInstruction::PrintTo(StringStream* stream) {
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- inputs_.PrintOperandsTo(stream);
+ for (int i = 0; i < inputs_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ inputs_[i]->PrintTo(stream);
+ }
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
- for (int i = 0; i < N; i++) {
+ for (int i = 0; i < results_.length(); i++) {
if (i > 0) stream->Add(" ");
- elems_[i]->PrintTo(stream);
+ results_[i]->PrintTo(stream);
}
}
@@ -240,6 +236,13 @@ void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
}
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
@@ -303,6 +306,13 @@ void LStoreContextSlot::PrintDataTo(StringStream* stream) {
}
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[rcx] #%d / ", arity());
}
@@ -380,8 +390,7 @@ void LChunk::MarkEmptyBlocks() {
LLabel* label = LLabel::cast(first_instr);
if (last_instr->IsGoto()) {
LGoto* goto_instr = LGoto::cast(last_instr);
- if (!goto_instr->include_stack_check() &&
- label->IsRedundant() &&
+ if (label->IsRedundant() &&
!label->is_loop_header()) {
bool can_eliminate = true;
for (int i = first + 1; i < last && can_eliminate; ++i) {
@@ -442,7 +451,7 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LGap* gap = new LGap(block);
+ LInstructionGap* gap = new LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap);
@@ -470,7 +479,7 @@ int LChunk::GetParameterStackSlot(int index) const {
// shift all parameter indexes down by the number of parameters, and
// make sure they end up negative so they are distinguishable from
// spill slots.
- int result = index - graph()->info()->scope()->num_parameters() - 1;
+ int result = index - info()->scope()->num_parameters() - 1;
ASSERT(result < 0);
return result;
}
@@ -478,7 +487,7 @@ int LChunk::GetParameterStackSlot(int index) const {
// A parameter relative to ebp in the arguments stub.
int LChunk::ParameterAt(int index) {
ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + graph()->info()->scope()->num_parameters() - index) *
+ return (1 + info()->scope()->num_parameters() - index) *
kPointerSize;
}
@@ -517,7 +526,7 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new LChunk(graph());
+ chunk_ = new LChunk(info(), graph());
HPhase phase("Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -534,8 +543,8 @@ LChunk* LChunkBuilder::Build() {
void LChunkBuilder::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
- SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
- PrintF("Aborting LChunk building in @\"%s\": ", *debug_name);
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LChunk building in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
@@ -790,6 +799,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new LDeoptimize);
}
@@ -845,24 +859,22 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
right = UseFixed(right_value, rcx);
}
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool can_deopt = (op == Token::SHR && constant_value == 0);
- if (can_deopt) {
- bool can_truncate = true;
- for (int i = 0; i < instr->uses()->length(); i++) {
- if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
- can_truncate = false;
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ bool may_deopt = (op == Token::SHR && constant_value == 0);
+ bool does_deopt = false;
+ if (may_deopt) {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
break;
}
}
- can_deopt = !can_truncate;
}
- LShiftI* result = new LShiftI(op, left, right, can_deopt);
- return can_deopt
- ? AssignEnvironment(DefineSameAsFirst(result))
- : DefineSameAsFirst(result);
+ LInstruction* result =
+ DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
}
@@ -871,9 +883,7 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- if (op == Token::MOD) {
- Abort("Unimplemented: %s", "DoArithmeticD MOD");
- }
+ ASSERT(op != Token::MOD);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LArithmeticD* result = new LArithmeticD(op, left, right);
@@ -1008,6 +1018,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
outer);
int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
+ if (hydrogen_env->is_special_index(i)) continue;
+
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
@@ -1025,108 +1037,85 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- return (instr->include_stack_check())
- ? AssignPointerMap(result)
- : result;
+ return new LGoto(instr->FirstSuccessor()->block_id());
}
LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HValue* v = instr->value();
- if (v->EmitAtUses()) {
- if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- Token::Value op = compare->token();
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
-
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right));
- } else if (r.IsDouble()) {
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
-
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- bool reversed = op == Token::GT || op == Token::LTE;
- LOperand* left_operand = UseFixed(left, reversed ? rax : rdx);
- LOperand* right_operand = UseFixed(right, reversed ? rdx : rax);
- LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
- right_operand);
- return MarkAsCall(result, instr);
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasInstanceTypeAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- // We only need a temp register for non-strict compare.
- LOperand* temp = compare->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
- temp);
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsInstanceOf()) {
- HInstanceOf* instance_of = HInstanceOf::cast(v);
- LInstanceOfAndBranch* result =
- new LInstanceOfAndBranch(UseFixed(instance_of->left(), rax),
- UseFixed(instance_of->right(), rdx));
- return MarkAsCall(result, instr);
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
+ if (!v->EmitAtUses()) return new LBranch(UseRegisterAtStart(v));
+ ASSERT(!v->HasSideEffects());
+ if (v->IsClassOfTest()) {
+ HClassOfTest* compare = HClassOfTest::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+ TempRegister());
+ } else if (v->IsCompare()) {
+ HCompare* compare = HCompare::cast(v);
+ HValue* left = compare->left();
+ HValue* right = compare->right();
+ Representation r = compare->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(left->representation().IsInteger32());
+ ASSERT(right->representation().IsInteger32());
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseOrConstantAtStart(right));
} else {
- if (v->IsConstant()) {
- if (HConstant::cast(v)->handle()->IsTrue()) {
- return new LGoto(instr->FirstSuccessor()->block_id());
- } else if (HConstant::cast(v)->handle()->IsFalse()) {
- return new LGoto(instr->SecondSuccessor()->block_id());
- }
- }
- Abort("Undefined compare before branch");
- return NULL;
+ ASSERT(r.IsDouble());
+ ASSERT(left->representation().IsDouble());
+ ASSERT(right->representation().IsDouble());
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
}
+ } else if (v->IsIsSmi()) {
+ HIsSmi* compare = HIsSmi::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsSmiAndBranch(Use(compare->value()));
+ } else if (v->IsIsUndetectable()) {
+ HIsUndetectable* compare = HIsUndetectable::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
+ TempRegister());
+ } else if (v->IsHasInstanceType()) {
+ HHasInstanceType* compare = HHasInstanceType::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()));
+ } else if (v->IsHasCachedArrayIndex()) {
+ HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(compare->value()));
+ } else if (v->IsIsNull()) {
+ HIsNull* compare = HIsNull::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ // We only need a temp register for non-strict compare.
+ LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), temp);
+ } else if (v->IsIsObject()) {
+ HIsObject* compare = HIsObject::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
+ } else if (v->IsCompareObjectEq()) {
+ HCompareObjectEq* compare = HCompareObjectEq::cast(v);
+ return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+ UseRegisterAtStart(compare->right()));
+ } else if (v->IsCompareConstantEq()) {
+ HCompareConstantEq* compare = HCompareConstantEq::cast(v);
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(compare->value()));
+ } else if (v->IsTypeofIs()) {
+ HTypeofIs* typeof_is = HTypeofIs::cast(v);
+ return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
+ } else if (v->IsIsConstructCall()) {
+ return new LIsConstructCallAndBranch(TempRegister());
+ } else if (v->IsConstant()) {
+ HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+ ? instr->FirstSuccessor()
+ : instr->SecondSuccessor();
+ return new LGoto(successor->block_id());
+ } else {
+ Abort("Undefined compare before branch");
+ return NULL;
}
- return new LBranch(UseRegisterAtStart(v));
}
@@ -1158,7 +1147,8 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new LInstanceOfKnownGlobal(UseFixed(instr->value(), rax));
+ new LInstanceOfKnownGlobal(UseFixed(instr->value(), rax),
+ FixedTemp(rdi));
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1183,8 +1173,13 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return DefineAsRegister(new LContext);
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
@@ -1200,7 +1195,8 @@ LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- return DefineAsRegister(new LGlobalReceiver);
+ LOperand* global_object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalReceiver(global_object));
}
@@ -1211,6 +1207,14 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
}
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), rdi);
+ argument_count_ -= instr->argument_count();
+ LInvokeFunction* result = new LInvokeFunction(function);
+ return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
if (op == kMathLog || op == kMathSin || op == kMathCos) {
@@ -1348,13 +1352,23 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* value = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new LModI(value, divisor, temp);
- LInstruction* result = DefineFixed(mod, rdx);
+
+ LInstruction* result;
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
+ result = DefineSameAsFirst(mod);
+ } else {
+ // The temporary operand is necessary to ensure that right is not
+ // allocated into edx.
+ LOperand* temp = FixedTemp(rdx);
+ LOperand* value = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LModI* mod = new LModI(value, divisor, temp);
+ result = DefineFixed(mod, rdx);
+ }
+
return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
instr->CheckFlag(HValue::kCanBeDivByZero))
? AssignEnvironment(result)
@@ -1366,8 +1380,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
// We call a C function for double modulo. It can't trigger a GC.
// We need to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm1);
- LOperand* right = UseFixedDouble(instr->right(), xmm2);
+ LOperand* left = UseFixedDouble(instr->left(), xmm2);
+ LOperand* right = UseFixedDouble(instr->right(), xmm1);
LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
}
@@ -1481,15 +1495,21 @@ LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
}
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
+ LCmpObjectEq* result = new LCmpObjectEq(left, right);
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoCompareConstantEq(
+ HCompareConstantEq* instr) {
+ LOperand* left = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LCmpConstantEq(left));
+}
+
+
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1514,6 +1534,14 @@ LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
}
+LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LIsUndetectable(value));
+}
+
+
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1524,8 +1552,10 @@ LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
HGetCachedArrayIndex* instr) {
- Abort("Unimplemented: %s", "DoGetCachedArrayIndex");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LGetCachedArrayIndex(value));
}
@@ -1555,9 +1585,16 @@ LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
}
-LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
+LInstruction* LChunkBuilder::DoExternalArrayLength(
+ HExternalArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LPixelArrayLength(array));
+ return DefineAsRegister(new LExternalArrayLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+ LOperand* object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LElementsKind(object));
}
@@ -1587,6 +1624,19 @@ LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
}
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
@@ -1600,10 +1650,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
- LOperand* xmm_temp =
- (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
- ? NULL
- : FixedTemp(xmm1);
+ bool truncating = instr->CanTruncateToInt32();
+ LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
} else {
@@ -1646,7 +1694,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value, zero));
+ return AssignEnvironment(new LCheckNonSmi(value));
}
@@ -1666,7 +1714,7 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value, not_zero));
+ return AssignEnvironment(new LCheckSmi(value));
}
@@ -1683,6 +1731,53 @@ LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
}
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return DefineAsRegister(new LClampDToUint8(reg,
+ TempRegister()));
+ } else if (input_rep.IsInteger32()) {
+ return DefineSameAsFirst(new LClampIToUint8(reg));
+ } else {
+ ASSERT(input_rep.IsTagged());
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve xmm1 explicitly.
+ LClampTToUint8* result = new LClampTToUint8(reg,
+ TempRegister(),
+ FixedTemp(xmm1));
+ return AssignEnvironment(DefineSameAsFirst(result));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return AssignEnvironment(DefineAsRegister(new LDoubleToI(reg)));
+ } else if (input_rep.IsInteger32()) {
+ // Canonicalization should already have removed the hydrogen instruction in
+ // this case, since it is a noop.
+ UNREACHABLE();
+ return NULL;
+ } else {
+ ASSERT(input_rep.IsTagged());
+ LOperand* reg = UseRegister(value);
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve xmm1 explicitly.
+ LOperand* xmm_temp =
+ CpuFeatures::IsSupported(SSE3)
+ ? NULL
+ : FixedTemp(xmm1);
+ return AssignEnvironment(
+ DefineSameAsFirst(new LTaggedToI(reg, xmm_temp)));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new LReturn(UseFixed(instr->value(), rax));
}
@@ -1704,21 +1799,36 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- LLoadGlobal* result = new LLoadGlobal;
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
-LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
- LStoreGlobal* result = new LStoreGlobal(UseRegister(instr->value()),
- TempRegister());
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* global_object = UseFixed(instr->global_object(), rax);
+ LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LStoreGlobalCell* result =
+ new LStoreGlobalCell(UseRegister(instr->value()), TempRegister());
return instr->check_hole_value() ? AssignEnvironment(result) : result;
}
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* global_object = UseFixed(instr->global_object(), rdx);
+ LOperand* value = UseFixed(instr->value(), rax);
+ LStoreGlobalGeneric* result = new LStoreGlobalGeneric(global_object, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadContextSlot(context));
@@ -1726,17 +1836,19 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- Abort("Unimplemented: DoStoreContextSlot"); // Temporarily disabled (whesse).
LOperand* context;
LOperand* value;
+ LOperand* temp;
if (instr->NeedsWriteBarrier()) {
context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
+ temp = TempRegister();
} else {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
+ temp = NULL;
}
- return new LStoreContextSlot(context, value);
+ return new LStoreContextSlot(context, value, temp);
}
@@ -1747,6 +1859,21 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
+LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
+ HLoadNamedFieldPolymorphic* instr) {
+ ASSERT(instr->representation().IsTagged());
+ if (instr->need_generic()) {
+ LOperand* obj = UseFixed(instr->object(), rax);
+ LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ } else {
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), rax);
LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
@@ -1767,10 +1894,10 @@ LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
}
-LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer(
- HLoadPixelArrayExternalPointer* instr) {
+LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
+ HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadPixelArrayExternalPointer(input));
+ return DefineAsRegister(new LLoadExternalArrayPointer(input));
}
@@ -1785,16 +1912,27 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
}
-LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
- HLoadPixelArrayElement* instr) {
- ASSERT(instr->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+ HLoadKeyedSpecializedArrayElement* instr) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Representation representation(instr->representation());
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer =
- UseRegisterAtStart(instr->external_pointer());
- LOperand* key = UseRegisterAtStart(instr->key());
- LLoadPixelArrayElement* result =
- new LLoadPixelArrayElement(external_pointer, key);
- return DefineSameAsFirst(result);
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LLoadKeyedSpecializedArrayElement* result =
+ new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ LInstruction* load_instr = DefineAsRegister(result);
+ // An unsigned int array load might overflow and cause a deopt, make sure it
+ // has an environment.
+ return (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
+ AssignEnvironment(load_instr) : load_instr;
}
@@ -1826,17 +1964,32 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
}
-LInstruction* LChunkBuilder::DoStorePixelArrayElement(
- HStorePixelArrayElement* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+ HStoreKeyedSpecializedArrayElement* instr) {
+ Representation representation(instr->value()->representation());
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegister(instr->key());
+ bool val_is_temp_register =
+ elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS;
+ LOperand* val = val_is_temp_register
+ ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstant(instr->key());
- return new LStorePixelArrayElement(external_pointer, key, val);
+ return new LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
}
@@ -1883,6 +2036,13 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* left = UseOrConstantAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ return MarkAsCall(DefineFixed(new LStringAdd(left, right), rax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
@@ -1891,6 +2051,13 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
}
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LStringCharFromCode* result = new LStringCharFromCode(char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LOperand* string = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LStringLength(string));
@@ -1919,7 +2086,8 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LDeleteProperty* result =
- new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+ new LDeleteProperty(UseAtStart(instr->object()),
+ UseOrConstantAtStart(instr->key()));
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1971,6 +2139,13 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), rax);
+ LToFastProperties* result = new LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LTypeof* result = new LTypeof(UseAtStart(instr->value()));
return MarkAsCall(DefineFixed(result, rax), instr);
@@ -2002,7 +2177,6 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->Push(value);
}
}
- ASSERT(env->length() == instr->environment_length());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
@@ -2020,7 +2194,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- return MarkAsCall(new LStackCheck, instr);
+ if (instr->is_function_entry()) {
+ return MarkAsCall(new LStackCheck, instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ return AssignEnvironment(AssignPointerMap(new LStackCheck));
+ }
}
@@ -2029,8 +2208,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->function(),
- false,
- undefined);
+ undefined,
+ instr->call_kind());
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2043,6 +2222,15 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
return NULL;
}
+
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+ LOperand* key = UseOrConstantAtStart(instr->key());
+ LOperand* object = UseOrConstantAtStart(instr->object());
+ LIn* result = new LIn(key, object);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index fed5b8cb8..d500dfd87 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -32,6 +32,7 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -70,15 +71,22 @@ class LCodeGen;
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMap) \
+ V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTest) \
+ V(ClassOfTestAndBranch) \
+ V(CmpConstantEq) \
+ V(CmpConstantEqAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
+ V(CmpObjectEq) \
+ V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
- V(CmpTAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
@@ -87,41 +95,49 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(ElementsKind) \
+ V(ExternalArrayLength) \
+ V(FixedArrayLength) \
V(FunctionLiteral) \
- V(Gap) \
+ V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(FixedArrayLength) \
+ V(HasCachedArrayIndex) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceType) \
+ V(HasInstanceTypeAndBranch) \
+ V(In) \
V(InstanceOf) \
- V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCall) \
+ V(IsConstructCallAndBranch) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
+ V(IsUndetectable) \
+ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
- V(HasInstanceType) \
- V(HasInstanceTypeAndBranch) \
- V(HasCachedArrayIndex) \
- V(HasCachedArrayIndexAndBranch) \
- V(ClassOfTest) \
- V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
- V(LoadGlobal) \
+ V(LoadExternalArrayPointer) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
+ V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
- V(LoadFunctionPrototype) \
- V(LoadPixelArrayElement) \
- V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -131,7 +147,6 @@ class LCodeGen;
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
- V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@@ -141,41 +156,40 @@ class LCodeGen;
V(SmiUntag) \
V(StackCheck) \
V(StoreContextSlot) \
- V(StoreGlobal) \
+ V(StoreGlobalCell) \
+ V(StoreGlobalGeneric) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
- V(StorePixelArrayElement) \
+ V(StringAdd) \
V(StringCharCodeAt) \
+ V(StringCharFromCode) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
+ V(ThisFunction) \
V(Throw) \
+ V(ToFastProperties) \
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
- V(IsConstructCall) \
- V(IsConstructCallAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
-#define DECLARE_INSTRUCTION(type) \
- virtual bool Is##type() const { return true; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const { return LInstruction::k##type; } \
+ virtual void CompileToNative(LCodeGen* generator); \
+ virtual const char* Mnemonic() const { return mnemonic; } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- DECLARE_INSTRUCTION(type)
-
-
#define DECLARE_HYDROGEN_ACCESSOR(type) \
H##type* hydrogen() const { \
return H##type::cast(hydrogen_value()); \
@@ -198,10 +212,25 @@ class LInstruction: public ZoneObject {
virtual void PrintDataTo(StringStream* stream) = 0;
virtual void PrintOutputOperandTo(StringStream* stream) = 0;
- // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
- LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
@@ -259,37 +288,6 @@ class LInstruction: public ZoneObject {
};
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
- OperandContainer() {
- for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
- }
- int length() { return NumElements; }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
- void PrintOperandsTo(StringStream* stream);
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
- int length() { return 0; }
- void PrintOperandsTo(StringStream* stream) { }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
@@ -312,9 +310,9 @@ class LTemplateInstruction: public LInstruction {
virtual void PrintOutputOperandTo(StringStream* stream);
protected:
- OperandContainer<LOperand*, R> results_;
- OperandContainer<LOperand*, I> inputs_;
- OperandContainer<LOperand*, T> temps_;
+ EmbeddedContainer<LOperand*, R> results_;
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
};
@@ -328,8 +326,13 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
parallel_moves_[AFTER] = NULL;
}
- DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const { return true; }
virtual void PrintDataTo(StringStream* stream);
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
bool IsRedundant() const;
@@ -359,21 +362,26 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
+class LInstructionGap: public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
- LGoto(int block_id, bool include_stack_check = false)
- : block_id_(block_id), include_stack_check_(include_stack_check) { }
+ explicit LGoto(int block_id) : block_id_(block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
- bool include_stack_check() const { return include_stack_check_; }
private:
int block_id_;
- bool include_stack_check_;
};
@@ -447,7 +455,6 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
- DECLARE_INSTRUCTION(ControlInstruction)
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -608,26 +615,49 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ LCmpObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
+};
+
+
+class LCmpConstantEq: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCmpConstantEq(LOperand* left) {
+ inputs_[0] = left;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEq, "cmp-constant-eq")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
+};
+
+
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpConstantEqAndBranch(LOperand* left) {
+ inputs_[0] = left;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+ "cmp-constant-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
};
@@ -705,6 +735,31 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
};
+class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LIsUndetectable(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
@@ -730,6 +785,17 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
};
+class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
@@ -796,17 +862,17 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
+class LIn: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpTAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LIn(LOperand* key, LOperand* object) {
+ inputs_[0] = key;
+ inputs_[1] = object;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ LOperand* key() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
- Token::Value op() const { return hydrogen()->token(); }
+ DECLARE_CONCRETE_INSTRUCTION(In, "in")
};
@@ -821,21 +887,11 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
- public:
- LInstanceOfAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 0> {
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LInstanceOfKnownGlobal(LOperand* value) {
+ LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@@ -984,14 +1040,14 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
};
-class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LPixelArrayLength(LOperand* value) {
+ explicit LExternalArrayLength(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
- DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
+ DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength)
};
@@ -1006,6 +1062,17 @@ class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
};
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LElementsKind(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
class LValueOf: public LTemplateInstruction<1, 1, 0> {
public:
explicit LValueOf(LOperand* value) {
@@ -1071,6 +1138,7 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
Token::Value op() const { return op_; }
+ virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@@ -1087,6 +1155,7 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@@ -1118,6 +1187,19 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedFieldPolymorphic(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
+
+ LOperand* object() { return inputs_[0]; }
+};
+
+
class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
@@ -1155,14 +1237,14 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadPixelArrayExternalPointer(LOperand* object) {
+ explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
- "load-pixel-array-external-pointer")
+ DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
+ "load-external-array-pointer")
};
@@ -1181,19 +1263,23 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) {
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
- "load-pixel-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+ "load-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
};
@@ -1211,22 +1297,55 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LStoreGlobal(LOperand* value, LOperand* temp) {
+ explicit LLoadGlobalGeneric(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ LOperand* global_object() { return inputs_[0]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+ explicit LStoreGlobalGeneric(LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = global_object;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+ LOperand* global_object() { return InputAt(0); }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ LOperand* value() { return InputAt(1); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -1246,11 +1365,12 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
public:
- LStoreContextSlot(LOperand* context, LOperand* value) {
+ LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
inputs_[1] = value;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
@@ -1275,6 +1395,11 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+};
+
+
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@@ -1299,9 +1424,15 @@ class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
};
-class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LGlobalReceiver(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+ LOperand* global() { return InputAt(0); }
};
@@ -1317,6 +1448,23 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
};
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInvokeFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ LOperand* function() { return inputs_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
@@ -1401,7 +1549,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- Runtime::Function* function() const { return hydrogen()->function(); }
+ const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
@@ -1445,7 +1593,7 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1460,7 +1608,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
}
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1483,6 +1631,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change);
};
@@ -1541,6 +1690,7 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -1564,23 +1714,26 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
};
-class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStorePixelArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val) {
+ LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* val) {
inputs_[0] = external_pointer;
inputs_[1] = key;
inputs_[2] = val;
}
- DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement,
- "store-pixel-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+ "store-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
};
@@ -1593,12 +1746,29 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringAdd(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
};
@@ -1617,6 +1787,19 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
+class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringCharFromCode(LOperand* char_code) {
+ inputs_[0] = char_code;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+
+ LOperand* char_code() { return inputs_[0]; }
+};
+
+
class LStringLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringLength(LOperand* string) {
@@ -1679,20 +1862,62 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
- LCheckSmi(LOperand* value, Condition condition)
- : condition_(condition) {
+ explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
}
- Condition condition() const { return condition_; }
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const {
- return (condition_ == zero) ? "check-non-smi" : "check-smi";
+
+class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampDToUint8(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
}
- private:
- Condition condition_;
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LClampTToUint8(LOperand* value,
+ LOperand* temp,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
@@ -1726,6 +1951,17 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
};
+class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
@@ -1824,14 +2060,21 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
};
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
- explicit LChunk(HGraph* graph)
+ explicit LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
+ info_(info),
graph_(graph),
instructions_(32),
pointer_maps_(8),
@@ -1848,6 +2091,7 @@ class LChunk: public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
+ CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
void AddGapMove(int index, LOperand* from, LOperand* to);
@@ -1884,6 +2128,7 @@ class LChunk: public ZoneObject {
private:
int spill_slot_count_;
+ CompilationInfo* info_;
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
@@ -1893,8 +2138,9 @@ class LChunk: public ZoneObject {
class LChunkBuilder BASE_EMBEDDED {
public:
- LChunkBuilder(HGraph* graph, LAllocator* allocator)
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
+ info_(info),
graph_(graph),
status_(UNUSED),
current_instruction_(NULL),
@@ -1923,6 +2169,7 @@ class LChunkBuilder BASE_EMBEDDED {
};
LChunk* chunk() const { return chunk_; }
+ CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
bool is_unused() const { return status_ == UNUSED; }
@@ -2029,6 +2276,7 @@ class LChunkBuilder BASE_EMBEDDED {
HArithmeticBinaryOperation* instr);
LChunk* chunk_;
+ CompilationInfo* info_;
HGraph* const graph_;
Status status_;
HInstruction* current_instruction_;
@@ -2044,7 +2292,6 @@ class LChunkBuilder BASE_EMBEDDED {
};
#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
} } // namespace v8::int
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 8845bbb77..7c8a3667e 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "assembler-x64.h"
#include "macro-assembler-x64.h"
#include "serialize.h"
@@ -40,36 +40,156 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ root_array_available_(true) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
+}
+
+
+static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
+ Address roots_register_value = kRootRegisterBias +
+ reinterpret_cast<Address>(isolate->heap()->roots_address());
+ intptr_t delta = other.address() - roots_register_value;
+ return delta;
+}
+
+
+Operand MacroAssembler::ExternalOperand(ExternalReference target,
+ Register scratch) {
+ if (root_array_available_ && !Serializer::enabled()) {
+ intptr_t delta = RootRegisterDelta(target, isolate());
+ if (is_int32(delta)) {
+ Serializer::TooLateToEnableNow();
+ return Operand(kRootRegister, static_cast<int32_t>(delta));
+ }
+ }
+ movq(scratch, target);
+ return Operand(scratch, 0);
+}
+
+
+void MacroAssembler::Load(Register destination, ExternalReference source) {
+ if (root_array_available_ && !Serializer::enabled()) {
+ intptr_t delta = RootRegisterDelta(source, isolate());
+ if (is_int32(delta)) {
+ Serializer::TooLateToEnableNow();
+ movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ return;
+ }
+ }
+ // Safe code.
+ if (destination.is(rax)) {
+ load_rax(source);
+ } else {
+ movq(kScratchRegister, source);
+ movq(destination, Operand(kScratchRegister, 0));
+ }
+}
+
+
+void MacroAssembler::Store(ExternalReference destination, Register source) {
+ if (root_array_available_ && !Serializer::enabled()) {
+ intptr_t delta = RootRegisterDelta(destination, isolate());
+ if (is_int32(delta)) {
+ Serializer::TooLateToEnableNow();
+ movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
+ return;
+ }
+ }
+ // Safe code.
+ if (source.is(rax)) {
+ store_rax(destination);
+ } else {
+ movq(kScratchRegister, destination);
+ movq(Operand(kScratchRegister, 0), source);
+ }
+}
+
+
+void MacroAssembler::LoadAddress(Register destination,
+ ExternalReference source) {
+ if (root_array_available_ && !Serializer::enabled()) {
+ intptr_t delta = RootRegisterDelta(source, isolate());
+ if (is_int32(delta)) {
+ Serializer::TooLateToEnableNow();
+ lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ return;
+ }
+ }
+ // Safe code.
+ movq(destination, source);
+}
+
+
+int MacroAssembler::LoadAddressSize(ExternalReference source) {
+ if (root_array_available_ && !Serializer::enabled()) {
+ // This calculation depends on the internals of LoadAddress.
+ // It's correctness is ensured by the asserts in the Call
+ // instruction below.
+ intptr_t delta = RootRegisterDelta(source, isolate());
+ if (is_int32(delta)) {
+ Serializer::TooLateToEnableNow();
+ // Operand is lea(scratch, Operand(kRootRegister, delta));
+ // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
+ int size = 4;
+ if (!is_int8(static_cast<int32_t>(delta))) {
+ size += 3; // Need full four-byte displacement in lea.
+ }
+ return size;
+ }
+ }
+ // Size of movq(destination, src);
+ return 10;
}
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- movq(destination, Operand(kRootRegister, index << kPointerSizeLog2));
+ ASSERT(root_array_available_);
+ movq(destination, Operand(kRootRegister,
+ (index << kPointerSizeLog2) - kRootRegisterBias));
+}
+
+
+void MacroAssembler::LoadRootIndexed(Register destination,
+ Register variable_offset,
+ int fixed_offset) {
+ ASSERT(root_array_available_);
+ movq(destination,
+ Operand(kRootRegister,
+ variable_offset, times_pointer_size,
+ (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
- movq(Operand(kRootRegister, index << kPointerSizeLog2), source);
+ ASSERT(root_array_available_);
+ movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
+ source);
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
- push(Operand(kRootRegister, index << kPointerSizeLog2));
+ ASSERT(root_array_available_);
+ push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- cmpq(with, Operand(kRootRegister, index << kPointerSizeLog2));
+ ASSERT(root_array_available_);
+ cmpq(with, Operand(kRootRegister,
+ (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
+ ASSERT(root_array_available_);
ASSERT(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
cmpq(with, kScratchRegister);
@@ -79,10 +199,10 @@ void MacroAssembler::CompareRoot(const Operand& with,
void MacroAssembler::RecordWriteHelper(Register object,
Register addr,
Register scratch) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check that the object is not in new space.
- NearLabel not_in_new_space;
- InNewSpace(object, scratch, not_equal, &not_in_new_space);
+ Label not_in_new_space;
+ InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space);
}
@@ -101,6 +221,42 @@ void MacroAssembler::RecordWriteHelper(Register object,
}
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch,
+ Label::Distance near_jump) {
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
+ // The mask isn't really an address. We load it as an external reference in
+ // case the size of the new space is different between the snapshot maker
+ // and the running system.
+ if (scratch.is(object)) {
+ movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
+ and_(scratch, kScratchRegister);
+ } else {
+ movq(scratch, ExternalReference::new_space_mask(isolate()));
+ and_(scratch, object);
+ }
+ movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
+ cmpq(scratch, kScratchRegister);
+ j(cc, branch, near_jump);
+ } else {
+ ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
+ intptr_t new_space_start =
+ reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
+ movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+ if (scratch.is(object)) {
+ addq(scratch, kScratchRegister);
+ } else {
+ lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ }
+ and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
+ j(cc, branch, near_jump);
+ }
+}
+
+
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
@@ -111,7 +267,7 @@ void MacroAssembler::RecordWrite(Register object,
ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
// First, check if a write barrier is even needed. The tests below
- // catch stores of Smis and stores into young gen.
+ // catch stores of smis and stores into the young generation.
Label done;
JumpIfSmi(value, &done);
@@ -123,7 +279,7 @@ void MacroAssembler::RecordWrite(Register object,
// clobbering done inside RecordWriteNonSmi but it's necessary to
// avoid having the fast case for smis leave the registers
// unchanged.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
@@ -140,7 +296,7 @@ void MacroAssembler::RecordWrite(Register object,
ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
// First, check if a write barrier is even needed. The tests below
- // catch stores of Smis and stores into young gen.
+ // catch stores of smis and stores into the young generation.
Label done;
JumpIfSmi(value, &done);
@@ -152,7 +308,7 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
@@ -166,9 +322,9 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
Register index) {
Label done;
- if (FLAG_debug_code) {
- NearLabel okay;
- JumpIfNotSmi(object, &okay);
+ if (emit_debug_code()) {
+ Label okay;
+ JumpIfNotSmi(object, &okay, Label::kNear);
Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
bind(&okay);
@@ -210,7 +366,7 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
@@ -218,19 +374,19 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
}
void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (FLAG_debug_code) Check(cc, msg);
+ if (emit_debug_code()) Check(cc, msg);
}
void MacroAssembler::AssertFastElements(Register elements) {
- if (FLAG_debug_code) {
- NearLabel ok;
+ if (emit_debug_code()) {
+ Label ok;
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
- j(equal, &ok);
+ j(equal, &ok, Label::kNear);
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedCOWArrayMapRootIndex);
- j(equal, &ok);
+ j(equal, &ok, Label::kNear);
Abort("JSObject with fast elements map has slow elements");
bind(&ok);
}
@@ -238,8 +394,8 @@ void MacroAssembler::AssertFastElements(Register elements) {
void MacroAssembler::Check(Condition cc, const char* msg) {
- NearLabel L;
- j(cc, &L);
+ Label L;
+ j(cc, &L, Label::kNear);
Abort(msg);
// will not return here
bind(&L);
@@ -251,9 +407,9 @@ void MacroAssembler::CheckStackAlignment() {
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
- NearLabel alignment_as_expected;
+ Label alignment_as_expected;
testq(rsp, Immediate(frame_alignment_mask));
- j(zero, &alignment_as_expected);
+ j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
bind(&alignment_as_expected);
@@ -264,9 +420,9 @@ void MacroAssembler::CheckStackAlignment() {
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
- NearLabel ok;
+ Label ok;
testl(result, result);
- j(not_zero, &ok);
+ j(not_zero, &ok, Label::kNear);
testl(op, op);
j(sign, then_label);
bind(&ok);
@@ -305,9 +461,9 @@ void MacroAssembler::Abort(const char* msg) {
}
-void MacroAssembler::CallStub(CodeStub* stub) {
+void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
- Call(stub->GetCode(), RelocInfo::CODE_TARGET);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
@@ -378,9 +534,9 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
Set(rax, function->nargs);
- movq(rbx, ExternalReference(function));
+ LoadAddress(rbx, ExternalReference(function, isolate()));
CEntryStub ces(1);
ces.SaveDoubles();
CallStub(&ces);
@@ -393,7 +549,8 @@ MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
}
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -407,19 +564,19 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// should remove this need and make the runtime routine entry code
// smarter.
Set(rax, num_arguments);
- movq(rbx, ExternalReference(f));
+ LoadAddress(rbx, ExternalReference(f, isolate()));
CEntryStub ces(f->result_size);
CallStub(&ces);
}
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
+MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
int num_arguments) {
if (f->nargs >= 0 && f->nargs != num_arguments) {
IllegalOperation(num_arguments);
// Since we did not call the stub, there was no allocation failure.
// Return some non-failure object.
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
// TODO(1236192): Most runtime routines don't need the number of
@@ -427,7 +584,7 @@ MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
// should remove this need and make the runtime routine entry code
// smarter.
Set(rax, num_arguments);
- movq(rbx, ExternalReference(f));
+ LoadAddress(rbx, ExternalReference(f, isolate()));
CEntryStub ces(f->result_size);
return TryCallStub(&ces);
}
@@ -436,7 +593,7 @@ MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments) {
Set(rax, num_arguments);
- movq(rbx, ext);
+ LoadAddress(rbx, ext);
CEntryStub stub(1);
CallStub(&stub);
@@ -483,14 +640,16 @@ MaybeObject* MacroAssembler::TryTailCallExternalReference(
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
}
MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- return TryTailCallExternalReference(ExternalReference(fid),
+ return TryTailCallExternalReference(ExternalReference(fid, isolate()),
num_arguments,
result_size);
}
@@ -527,6 +686,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
Label leave_exit_frame;
Label write_back;
+ Factory* factory = isolate()->factory();
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
const int kNextOffset = 0;
@@ -537,12 +697,12 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
ExternalReference::handle_scope_level_address(),
next_address);
ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address();
+ ExternalReference::scheduled_exception_address(isolate());
// Allocate HandleScope in callee-save registers.
Register prev_next_address_reg = r14;
Register prev_limit_reg = rbx;
- Register base_reg = r12;
+ Register base_reg = r15;
movq(base_reg, next_address);
movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
@@ -574,7 +734,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
// Check if the function scheduled an exception.
movq(rsi, scheduled_exception_address);
- Cmp(Operand(rsi, 0), Factory::the_hole_value());
+ Cmp(Operand(rsi, 0), factory->the_hole_value());
j(not_equal, &promote_scheduled_exception);
LeaveApiExitFrame();
@@ -589,14 +749,20 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
bind(&empty_result);
// It was zero; the result is undefined.
- Move(rax, Factory::undefined_value());
+ Move(rax, factory->undefined_value());
jmp(&prologue);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
movq(prev_limit_reg, rax);
- movq(rax, ExternalReference::delete_handle_scope_extensions());
+#ifdef _WIN64
+ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+ LoadAddress(rax,
+ ExternalReference::delete_handle_scope_extensions(isolate()));
call(rax);
movq(rax, prev_limit_reg);
jmp(&leave_exit_frame);
@@ -608,7 +774,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
int result_size) {
// Set the entry point and jump to the C entry runtime stub.
- movq(rbx, ext);
+ LoadAddress(rbx, ext);
CEntryStub ces(result_size);
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -617,7 +783,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
MaybeObject* MacroAssembler::TryJumpToExternalReference(
const ExternalReference& ext, int result_size) {
// Set the entry point and jump to the C entry runtime stub.
- movq(rbx, ext);
+ LoadAddress(rbx, ext);
CEntryStub ces(result_size);
return TryTailCallStub(&ces);
}
@@ -625,7 +791,7 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper) {
// Calls are not allowed in some stubs.
ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
@@ -634,7 +800,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinEntry(rdx, id);
- InvokeCode(rdx, expected, expected, flag, post_call_generator);
+ InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
}
@@ -659,10 +825,10 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
- } else if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
} else if (is_uint32(x)) {
movl(dst, Immediate(static_cast<uint32_t>(x)));
+ } else if (is_int32(x)) {
+ movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
movq(dst, x, RelocInfo::NONE);
}
@@ -672,7 +838,7 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
- movq(kScratchRegister, x, RelocInfo::NONE);
+ Set(kScratchRegister, x);
movq(dst, kScratchRegister);
}
}
@@ -694,7 +860,7 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
}
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
movq(dst,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
RelocInfo::NONE);
@@ -702,17 +868,17 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (allow_stub_calls()) {
Assert(equal, "Uninitialized kSmiConstantRegister");
} else {
- NearLabel ok;
- j(equal, &ok);
+ Label ok;
+ j(equal, &ok, Label::kNear);
int3();
bind(&ok);
}
}
- if (source->value() == 0) {
+ int value = source->value();
+ if (value == 0) {
xorl(dst, dst);
return;
}
- int value = source->value();
bool negative = value < 0;
unsigned int uvalue = negative ? -value : value;
@@ -763,10 +929,10 @@ void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
testb(dst, Immediate(0x01));
- NearLabel ok;
- j(zero, &ok);
+ Label ok;
+ j(zero, &ok, Label::kNear);
if (allow_stub_calls()) {
Abort("Integer32ToSmiField writing to non-smi location");
} else {
@@ -783,9 +949,9 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
Register src,
int constant) {
if (dst.is(src)) {
- addq(dst, Immediate(constant));
+ addl(dst, Immediate(constant));
} else {
- lea(dst, Operand(src, constant));
+ leal(dst, Operand(src, constant));
}
shl(dst, Immediate(kSmiShift));
}
@@ -824,12 +990,24 @@ void MacroAssembler::SmiTest(Register src) {
}
-void MacroAssembler::SmiCompare(Register dst, Register src) {
- cmpq(dst, src);
+void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
+ if (emit_debug_code()) {
+ AbortIfNotSmi(smi1);
+ AbortIfNotSmi(smi2);
+ }
+ cmpq(smi1, smi2);
}
void MacroAssembler::SmiCompare(Register dst, Smi* src) {
+ if (emit_debug_code()) {
+ AbortIfNotSmi(dst);
+ }
+ Cmp(dst, src);
+}
+
+
+void MacroAssembler::Cmp(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
if (src->value() == 0) {
testq(dst, dst);
@@ -841,20 +1019,39 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
+ if (emit_debug_code()) {
+ AbortIfNotSmi(dst);
+ AbortIfNotSmi(src);
+ }
cmpq(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
+ if (emit_debug_code()) {
+ AbortIfNotSmi(dst);
+ AbortIfNotSmi(src);
+ }
cmpq(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
+ if (emit_debug_code()) {
+ AbortIfNotSmi(dst);
+ }
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
}
+void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
+ // The Operand cannot use the smi register.
+ Register smi_reg = GetSmiConstant(src);
+ ASSERT(!dst.AddressUsesRegister(smi_reg));
+ cmpq(dst, smi_reg);
+}
+
+
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
}
@@ -892,6 +1089,24 @@ void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
}
+void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
+ Label* on_not_smis,
+ Label::Distance near_jump) {
+ if (dst.is(src1) || dst.is(src2)) {
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ movq(kScratchRegister, src1);
+ or_(kScratchRegister, src2);
+ JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
+ movq(dst, kScratchRegister);
+ } else {
+ movq(dst, src1);
+ or_(dst, src2);
+ JumpIfNotSmi(dst, on_not_smis, near_jump);
+ }
+}
+
+
Condition MacroAssembler::CheckSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testb(src, Immediate(kSmiTagMask));
@@ -908,7 +1123,7 @@ Condition MacroAssembler::CheckSmi(const Operand& src) {
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
- // Make mask 0x8000000000000001 and test that both bits are zero.
+ // Test that both bits of the mask 0x8000000000000001 are zero.
movq(kScratchRegister, src);
rol(kScratchRegister, Immediate(1));
testb(kScratchRegister, Immediate(3));
@@ -1002,6 +1217,95 @@ void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
}
+void MacroAssembler::JumpIfNotValidSmiValue(Register src,
+ Label* on_invalid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid, near_jump);
+}
+
+
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+ Label* on_invalid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckUInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid, near_jump);
+}
+
+
+void MacroAssembler::JumpIfSmi(Register src,
+ Label* on_smi,
+ Label::Distance near_jump) {
+ Condition smi = CheckSmi(src);
+ j(smi, on_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register src,
+ Label* on_not_smi,
+ Label::Distance near_jump) {
+ Condition smi = CheckSmi(src);
+ j(NegateCondition(smi), on_not_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpUnlessNonNegativeSmi(
+ Register src, Label* on_not_smi_or_negative,
+ Label::Distance near_jump) {
+ Condition non_negative_smi = CheckNonNegativeSmi(src);
+ j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
+}
+
+
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+ Smi* constant,
+ Label* on_equals,
+ Label::Distance near_jump) {
+ SmiCompare(src, constant);
+ j(equal, on_equals, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+ Register src2,
+ Label* on_not_both_smi,
+ Label::Distance near_jump) {
+ Condition both_smi = CheckBothSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
+ Register src2,
+ Label* on_not_both_smi,
+ Label::Distance near_jump) {
+ Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi, near_jump);
+}
+
+
+void MacroAssembler::SmiTryAddConstant(Register dst,
+ Register src,
+ Smi* constant,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ // Does not assume that src is a smi.
+ ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src.is(kScratchRegister));
+
+ JumpIfNotSmi(src, on_not_smi_result, near_jump);
+ Register tmp = (dst.is(src) ? kScratchRegister : dst);
+ LoadSmiConstant(tmp, constant);
+ addq(tmp, src);
+ j(overflow, on_not_smi_result, near_jump);
+ if (dst.is(src)) {
+ movq(dst, tmp);
+ }
+}
+
+
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1058,6 +1362,30 @@ void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
}
+void MacroAssembler::SmiAddConstant(Register dst,
+ Register src,
+ Smi* constant,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ LoadSmiConstant(kScratchRegister, constant);
+ addq(kScratchRegister, src);
+ j(overflow, on_not_smi_result, near_jump);
+ movq(dst, kScratchRegister);
+ } else {
+ LoadSmiConstant(dst, constant);
+ addq(dst, src);
+ j(overflow, on_not_smi_result, near_jump);
+ }
+}
+
+
void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1082,19 +1410,148 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
}
+void MacroAssembler::SmiSubConstant(Register dst,
+ Register src,
+ Smi* constant,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result, near_jump);
+ LoadSmiConstant(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ } else {
+ // Subtract by adding the negation.
+ LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
+ addq(kScratchRegister, dst);
+ j(overflow, on_not_smi_result, near_jump);
+ movq(dst, kScratchRegister);
+ }
+ } else {
+ if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result, near_jump);
+ LoadSmiConstant(dst, constant);
+ // Adding and subtracting the min-value gives the same result, it only
+ // differs on the overflow bit, which we don't check here.
+ addq(dst, src);
+ } else {
+ // Subtract by adding the negation.
+ LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
+ addq(dst, src);
+ j(overflow, on_not_smi_result, near_jump);
+ }
+ }
+}
+
+
+void MacroAssembler::SmiNeg(Register dst,
+ Register src,
+ Label* on_smi_result,
+ Label::Distance near_jump) {
+ if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ movq(kScratchRegister, src);
+ neg(dst); // Low 32 bits are retained as zero by negation.
+ // Test if result is zero or Smi::kMinValue.
+ cmpq(dst, kScratchRegister);
+ j(not_equal, on_smi_result, near_jump);
+ movq(src, kScratchRegister);
+ } else {
+ movq(dst, src);
+ neg(dst);
+ cmpq(dst, src);
+ // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+ j(not_equal, on_smi_result, near_jump);
+ }
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ movq(kScratchRegister, src1);
+ addq(kScratchRegister, src2);
+ j(overflow, on_not_smi_result, near_jump);
+ movq(dst, kScratchRegister);
+ } else {
+ movq(dst, src1);
+ addq(dst, src2);
+ j(overflow, on_not_smi_result, near_jump);
+ }
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+ Register src1,
+ const Operand& src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ if (dst.is(src1)) {
+ movq(kScratchRegister, src1);
+ addq(kScratchRegister, src2);
+ j(overflow, on_not_smi_result, near_jump);
+ movq(dst, kScratchRegister);
+ } else {
+ ASSERT(!src2.AddressUsesRegister(dst));
+ movq(dst, src1);
+ addq(dst, src2);
+ j(overflow, on_not_smi_result, near_jump);
+ }
+}
+
+
void MacroAssembler::SmiAdd(Register dst,
Register src1,
Register src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible.
+ if (!dst.is(src1)) {
+ if (emit_debug_code()) {
+ movq(kScratchRegister, src1);
+ addq(kScratchRegister, src2);
+ Check(no_overflow, "Smi addition overflow");
+ }
+ lea(dst, Operand(src1, src2, times_1, 0));
+ } else {
+ addq(dst, src2);
+ Assert(no_overflow, "Smi addition overflow");
+ }
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT_NOT_NULL(on_not_smi_result);
ASSERT(!dst.is(src2));
if (dst.is(src1)) {
- addq(dst, src2);
+ cmpq(dst, src2);
+ j(overflow, on_not_smi_result, near_jump);
+ subq(dst, src2);
} else {
movq(dst, src1);
- addq(dst, src2);
+ subq(dst, src2);
+ j(overflow, on_not_smi_result, near_jump);
}
- Assert(no_overflow, "Smi addition overflow");
}
@@ -1102,13 +1559,30 @@ void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
ASSERT(!dst.is(src2));
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ subq(dst, src2);
+ Assert(no_overflow, "Smi subtraction overflow");
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ const Operand& src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT_NOT_NULL(on_not_smi_result);
if (dst.is(src1)) {
- subq(dst, src2);
+ movq(kScratchRegister, src2);
+ cmpq(src1, kScratchRegister);
+ j(overflow, on_not_smi_result, near_jump);
+ subq(src1, kScratchRegister);
} else {
movq(dst, src1);
subq(dst, src2);
+ j(overflow, on_not_smi_result, near_jump);
}
- Assert(no_overflow, "Smi subtraction overflow");
}
@@ -1117,16 +1591,188 @@ void MacroAssembler::SmiSub(Register dst,
const Operand& src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
- if (dst.is(src1)) {
- subq(dst, src2);
- } else {
+ if (!dst.is(src1)) {
movq(dst, src1);
- subq(dst, src2);
}
+ subq(dst, src2);
Assert(no_overflow, "Smi subtraction overflow");
}
+void MacroAssembler::SmiMul(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT(!dst.is(src2));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+
+ if (dst.is(src1)) {
+ Label failure, zero_correct_result;
+ movq(kScratchRegister, src1); // Create backup for later testing.
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, &failure, Label::kNear);
+
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ Label correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result, Label::kNear);
+
+ movq(dst, kScratchRegister);
+ xor_(dst, src2);
+ // Result was positive zero.
+ j(positive, &zero_correct_result, Label::kNear);
+
+ bind(&failure); // Reused failure exit, restores src1.
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result, near_jump);
+
+ bind(&zero_correct_result);
+ Set(dst, 0);
+
+ bind(&correct_result);
+ } else {
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, on_not_smi_result, near_jump);
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ Label correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result, Label::kNear);
+ // One of src1 and src2 is zero, the check whether the other is
+ // negative.
+ movq(kScratchRegister, src1);
+ xor_(kScratchRegister, src2);
+ j(negative, on_not_smi_result, near_jump);
+ bind(&correct_result);
+ }
+}
+
+
+void MacroAssembler::SmiDiv(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+
+ // Check for 0 divisor (result is +/-Infinity).
+ testq(src2, src2);
+ j(zero, on_not_smi_result, near_jump);
+
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ // We need to rule out dividing Smi::kMinValue by -1, since that would
+ // overflow in idiv and raise an exception.
+ // We combine this with negative zero test (negative zero only happens
+ // when dividing zero by a negative number).
+
+ // We overshoot a little and go to slow case if we divide min-value
+ // by any negative value, not just -1.
+ Label safe_div;
+ testl(rax, Immediate(0x7fffffff));
+ j(not_zero, &safe_div, Label::kNear);
+ testq(src2, src2);
+ if (src1.is(rax)) {
+ j(positive, &safe_div, Label::kNear);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result, near_jump);
+ } else {
+ j(negative, on_not_smi_result, near_jump);
+ }
+ bind(&safe_div);
+
+ SmiToInteger32(src2, src2);
+ // Sign extend src1 into edx:eax.
+ cdq();
+ idivl(src2);
+ Integer32ToSmi(src2, src2);
+ // Check that the remainder is zero.
+ testl(rdx, rdx);
+ if (src1.is(rax)) {
+ Label smi_result;
+ j(zero, &smi_result, Label::kNear);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result, near_jump);
+ bind(&smi_result);
+ } else {
+ j(not_zero, on_not_smi_result, near_jump);
+ }
+ if (!dst.is(src1) && src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ Integer32ToSmi(dst, rax);
+}
+
+
+void MacroAssembler::SmiMod(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+ ASSERT(!src1.is(src2));
+
+ testq(src2, src2);
+ j(zero, on_not_smi_result, near_jump);
+
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ SmiToInteger32(src2, src2);
+
+ // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+ Label safe_div;
+ cmpl(rax, Immediate(Smi::kMinValue));
+ j(not_equal, &safe_div, Label::kNear);
+ cmpl(src2, Immediate(-1));
+ j(not_equal, &safe_div, Label::kNear);
+ // Retag inputs and go slow case.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&safe_div);
+
+ // Sign extend eax into edx:eax.
+ cdq();
+ idivl(src2);
+ // Restore smi tags on inputs.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ // Check for a negative zero result. If the result is zero, and the
+ // dividend is negative, go slow to return a floating point negative zero.
+ Label smi_result;
+ testl(rdx, rdx);
+ j(not_zero, &smi_result, Label::kNear);
+ testq(src1, src1);
+ j(negative, on_not_smi_result, near_jump);
+ bind(&smi_result);
+ Integer32ToSmi(dst, rdx);
+}
+
+
void MacroAssembler::SmiNot(Register dst, Register src) {
ASSERT(!dst.is(kScratchRegister));
ASSERT(!src.is(kScratchRegister));
@@ -1166,6 +1812,7 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
+ ASSERT(!src1.is(src2));
movq(dst, src1);
}
or_(dst, src2);
@@ -1186,6 +1833,7 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
+ ASSERT(!src1.is(src2));
movq(dst, src1);
}
xor_(dst, src2);
@@ -1231,11 +1879,28 @@ void MacroAssembler::SmiShiftLeftConstant(Register dst,
}
+void MacroAssembler::SmiShiftLogicalRightConstant(
+ Register dst, Register src, int shift_value,
+ Label* on_not_smi_result, Label::Distance near_jump) {
+ // Logic right shift interprets its result as an *unsigned* number.
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ movq(dst, src);
+ if (shift_value == 0) {
+ testq(dst, dst);
+ j(negative, on_not_smi_result, near_jump);
+ }
+ shr(dst, Immediate(shift_value + kSmiShift));
+ shl(dst, Immediate(kSmiShift));
+ }
+}
+
+
void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
Register src2) {
ASSERT(!dst.is(rcx));
- NearLabel result_ok;
// Untag shift amount.
if (!dst.is(src1)) {
movq(dst, src1);
@@ -1247,6 +1912,45 @@ void MacroAssembler::SmiShiftLeft(Register dst,
}
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(rcx));
+ // dst and src1 can be the same, because the one case that bails out
+ // is a shift by 0, which leaves dst, and therefore src1, unchanged.
+ if (src1.is(rcx) || src2.is(rcx)) {
+ movq(kScratchRegister, rcx);
+ }
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ SmiToInteger32(rcx, src2);
+ orl(rcx, Immediate(kSmiShift));
+ shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
+ shl(dst, Immediate(kSmiShift));
+ testq(dst, dst);
+ if (src1.is(rcx) || src2.is(rcx)) {
+ Label positive_result;
+ j(positive, &positive_result, Label::kNear);
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&positive_result);
+ } else {
+ // src2 was zero and src1 negative.
+ j(negative, on_not_smi_result, near_jump);
+ }
+}
+
+
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
Register src1,
Register src2) {
@@ -1274,6 +1978,45 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
}
+void MacroAssembler::SelectNonSmi(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smis,
+ Label::Distance near_jump) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(src1));
+ ASSERT(!dst.is(src2));
+ // Both operands must not be smis.
+#ifdef DEBUG
+ if (allow_stub_calls()) { // Check contains a stub call.
+ Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+ Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+ }
+#endif
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ movl(kScratchRegister, Immediate(kSmiTagMask));
+ and_(kScratchRegister, src1);
+ testl(kScratchRegister, src2);
+ // If non-zero then both are smis.
+ j(not_zero, on_not_smis, near_jump);
+
+ // Exactly one operand is a smi.
+ ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+ // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+ subq(kScratchRegister, Immediate(1));
+ // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+ movq(dst, src1);
+ xor_(dst, src2);
+ and_(dst, kScratchRegister);
+ // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+ xor_(dst, src1);
+ // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
+}
+
+
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
@@ -1309,6 +2052,104 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
}
+void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
+ ASSERT_EQ(0, kSmiShift % kBitsPerByte);
+ addl(dst, Operand(src, kSmiShift / kBitsPerByte));
+}
+
+
+void MacroAssembler::JumpIfNotString(Register object,
+ Register object_map,
+ Label* not_string,
+ Label::Distance near_jump) {
+ Condition is_smi = CheckSmi(object);
+ j(is_smi, not_string, near_jump);
+ CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
+ j(above_equal, not_string, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
+ Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ Label* on_fail,
+ Label::Distance near_jump) {
+ // Check that both objects are not smis.
+ Condition either_smi = CheckEitherSmi(first_object, second_object);
+ j(either_smi, on_fail, near_jump);
+
+ // Load instance type for both strings.
+ movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail, near_jump);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+ Register instance_type,
+ Register scratch,
+ Label* failure,
+ Label::Distance near_jump) {
+ if (!scratch.is(instance_type)) {
+ movl(scratch, instance_type);
+ }
+
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+
+ andl(scratch, Immediate(kFlatAsciiStringMask));
+ cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ j(not_equal, failure, near_jump);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* on_fail,
+ Label::Distance near_jump) {
+ // Load instance type for both strings.
+ movq(scratch1, first_object_instance_type);
+ movq(scratch2, second_object_instance_type);
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail, near_jump);
+}
+
+
+
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
movq(dst, src);
@@ -1339,7 +2180,7 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
if (source->IsSmi()) {
- SmiCompare(dst, Smi::cast(*source));
+ Cmp(dst, Smi::cast(*source));
} else {
Move(kScratchRegister, source);
cmpq(dst, kScratchRegister);
@@ -1349,7 +2190,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
if (source->IsSmi()) {
- SmiCompare(dst, Smi::cast(*source));
+ Cmp(dst, Smi::cast(*source));
} else {
ASSERT(source->IsHeapObject());
movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
@@ -1393,7 +2234,7 @@ void MacroAssembler::Test(const Operand& src, Smi* source) {
void MacroAssembler::Jump(ExternalReference ext) {
- movq(kScratchRegister, ext);
+ LoadAddress(kScratchRegister, ext);
jmp(kScratchRegister);
}
@@ -1410,21 +2251,48 @@ void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
}
+int MacroAssembler::CallSize(ExternalReference ext) {
+ // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
+ const int kCallInstructionSize = 3;
+ return LoadAddressSize(ext) + kCallInstructionSize;
+}
+
+
void MacroAssembler::Call(ExternalReference ext) {
- movq(kScratchRegister, ext);
+#ifdef DEBUG
+ int end_position = pc_offset() + CallSize(ext);
+#endif
+ LoadAddress(kScratchRegister, ext);
call(kScratchRegister);
+#ifdef DEBUG
+ CHECK_EQ(end_position, pc_offset());
+#endif
}
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
+#ifdef DEBUG
+ int end_position = pc_offset() + CallSize(destination, rmode);
+#endif
movq(kScratchRegister, destination, rmode);
call(kScratchRegister);
+#ifdef DEBUG
+ CHECK_EQ(pc_offset(), end_position);
+#endif
}
-void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
+void MacroAssembler::Call(Handle<Code> code_object,
+ RelocInfo::Mode rmode,
+ unsigned ast_id) {
+#ifdef DEBUG
+ int end_position = pc_offset() + CallSize(code_object);
+#endif
ASSERT(RelocInfo::IsCodeTarget(rmode));
- call(code_object, rmode);
+ call(code_object, rmode, ast_id);
+#ifdef DEBUG
+ CHECK_EQ(end_position, pc_offset());
+#endif
}
@@ -1440,10 +2308,10 @@ void MacroAssembler::Pushad() {
push(r9);
// r10 is kScratchRegister.
push(r11);
- push(r12);
+ // r12 is kSmiConstantRegister.
// r13 is kRootRegister.
push(r14);
- // r15 is kSmiConstantRegister
+ push(r15);
STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
int sp_delta =
@@ -1457,8 +2325,8 @@ void MacroAssembler::Popad() {
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
lea(rsp, Operand(rsp, sp_delta));
+ pop(r15);
pop(r14);
- pop(r12);
pop(r11);
pop(r9);
pop(r8);
@@ -1477,7 +2345,7 @@ void MacroAssembler::Dropad() {
// Order general registers are pushed by Pushad:
-// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
+// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
0,
1,
@@ -1491,10 +2359,10 @@ int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
7,
-1,
8,
- 9,
-1,
- 10,
- -1
+ -1,
+ 9,
+ 10
};
@@ -1544,18 +2412,20 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
push(Immediate(0)); // NULL frame pointer.
}
// Save the current handler.
- movq(kScratchRegister, ExternalReference(Top::k_handler_address));
- push(Operand(kScratchRegister, 0));
+ Operand handler_operand =
+ ExternalOperand(ExternalReference(Isolate::k_handler_address, isolate()));
+ push(handler_operand);
// Link this handler.
- movq(Operand(kScratchRegister, 0), rsp);
+ movq(handler_operand, rsp);
}
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
// Unlink this handler.
- movq(kScratchRegister, ExternalReference(Top::k_handler_address));
- pop(Operand(kScratchRegister, 0));
+ Operand handler_operand =
+ ExternalOperand(ExternalReference(Isolate::k_handler_address, isolate()));
+ pop(handler_operand);
// Remove the remaining fields.
addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
@@ -1573,21 +2443,20 @@ void MacroAssembler::Throw(Register value) {
movq(rax, value);
}
- ExternalReference handler_address(Top::k_handler_address);
- movq(kScratchRegister, handler_address);
- movq(rsp, Operand(kScratchRegister, 0));
+ ExternalReference handler_address(Isolate::k_handler_address, isolate());
+ Operand handler_operand = ExternalOperand(handler_address);
+ movq(rsp, handler_operand);
// get next in chain
- pop(rcx);
- movq(Operand(kScratchRegister, 0), rcx);
+ pop(handler_operand);
pop(rbp); // pop frame pointer
pop(rdx); // remove state
// Before returning we restore the context from the frame pointer if not NULL.
// The frame pointer is NULL in the exception handler of a JS entry frame.
Set(rsi, 0); // Tentatively set context pointer to NULL
- NearLabel skip;
+ Label skip;
cmpq(rbp, Immediate(0));
- j(equal, &skip);
+ j(equal, &skip, Label::kNear);
movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
bind(&skip);
ret(0);
@@ -1601,17 +2470,16 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
movq(rax, value);
}
// Fetch top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
- movq(kScratchRegister, handler_address);
- movq(rsp, Operand(kScratchRegister, 0));
+ ExternalReference handler_address(Isolate::k_handler_address, isolate());
+ Load(rsp, handler_address);
// Unwind the handlers until the ENTRY handler is found.
- NearLabel loop, done;
+ Label loop, done;
bind(&loop);
// Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset;
cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
- j(equal, &done);
+ j(equal, &done, Label::kNear);
// Fetch the next handler in the list.
const int kNextOffset = StackHandlerConstants::kNextOffset;
movq(rsp, Operand(rsp, kNextOffset));
@@ -1619,19 +2487,21 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
- movq(kScratchRegister, handler_address);
- pop(Operand(kScratchRegister, 0));
+ Operand handler_operand = ExternalOperand(handler_address);
+ pop(handler_operand);
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- movq(rax, Immediate(false));
- store_rax(external_caught);
+ ExternalReference external_caught(
+ Isolate::k_external_caught_exception_address, isolate());
+ Set(rax, static_cast<int64_t>(false));
+ Store(external_caught, rax);
// Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
+ ExternalReference pending_exception(Isolate::k_pending_exception_address,
+ isolate());
movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
- store_rax(pending_exception);
+ Store(pending_exception, rax);
}
// Clear the context pointer.
@@ -1639,14 +2509,14 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
// Restore registers from handler.
STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
- StackHandlerConstants::kFPOffset);
+ StackHandlerConstants::kFPOffset);
pop(rbp); // FP
STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
- StackHandlerConstants::kStateOffset);
+ StackHandlerConstants::kStateOffset);
pop(rdx); // State
STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
- StackHandlerConstants::kPCOffset);
+ StackHandlerConstants::kPCOffset);
ret(0);
}
@@ -1688,11 +2558,21 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
+void MacroAssembler::CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Immediate(Map::kMaximumBitField2FastElementValue));
+ j(above, fail, distance);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- bool is_heap_object) {
- if (!is_heap_object) {
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
@@ -1700,26 +2580,87 @@ void MacroAssembler::CheckMap(Register obj,
}
+void MacroAssembler::ClampUint8(Register reg) {
+ Label done;
+ testl(reg, Immediate(0xFFFFFF00));
+ j(zero, &done, Label::kNear);
+ setcc(negative, reg); // 1 if negative, 0 if positive.
+ decb(reg); // 0 if negative, 255 if positive.
+ bind(&done);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
+ XMMRegister temp_xmm_reg,
+ Register result_reg,
+ Register temp_reg) {
+ Label done;
+ Set(result_reg, 0);
+ xorps(temp_xmm_reg, temp_xmm_reg);
+ ucomisd(input_reg, temp_xmm_reg);
+ j(below, &done, Label::kNear);
+ uint64_t one_half = BitCast<uint64_t, double>(0.5);
+ Set(temp_reg, one_half);
+ movq(temp_xmm_reg, temp_reg);
+ addsd(temp_xmm_reg, input_reg);
+ cvttsd2si(result_reg, temp_xmm_reg);
+ testl(result_reg, Immediate(0xFFFFFF00));
+ j(zero, &done, Label::kNear);
+ Set(result_reg, 255);
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ movq(descriptors, FieldOperand(map,
+ Map::kInstanceDescriptorsOrBitField3Offset));
+ Label not_smi;
+ JumpIfNotSmi(descriptors, &not_smi, Label::kNear);
+ Move(descriptors, isolate()->factory()->empty_descriptor_array());
+ bind(&not_smi);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+ j(equal, success, RelocInfo::CODE_TARGET);
+
+ bind(&fail);
+}
+
+
void MacroAssembler::AbortIfNotNumber(Register object) {
- NearLabel ok;
+ Label ok;
Condition is_smi = CheckSmi(object);
- j(is_smi, &ok);
+ j(is_smi, &ok, Label::kNear);
Cmp(FieldOperand(object, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ isolate()->factory()->heap_number_map());
Assert(equal, "Operand not a number");
bind(&ok);
}
void MacroAssembler::AbortIfSmi(Register object) {
- NearLabel ok;
Condition is_smi = CheckSmi(object);
Assert(NegateCondition(is_smi), "Operand is a smi");
}
void MacroAssembler::AbortIfNotSmi(Register object) {
- NearLabel ok;
+ Condition is_smi = CheckSmi(object);
+ Assert(is_smi, "Operand is not a smi");
+}
+
+
+void MacroAssembler::AbortIfNotSmi(const Operand& object) {
Condition is_smi = CheckSmi(object);
Assert(is_smi, "Operand is not a smi");
}
@@ -1770,10 +2711,10 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_equal, miss);
// Make sure that the function has an instance prototype.
- NearLabel non_instance;
+ Label non_instance;
testb(FieldOperand(result, Map::kBitFieldOffset),
Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance);
+ j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
movq(result,
@@ -1786,13 +2727,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(equal, miss);
// If the function does not have an initial map, we're done.
- NearLabel done;
+ Label done;
CmpObjectType(result, MAP_TYPE, kScratchRegister);
- j(not_equal, &done);
+ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
movq(result, FieldOperand(result, Map::kPrototypeOffset));
- jmp(&done);
+ jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
@@ -1806,8 +2747,8 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
- movq(kScratchRegister, ExternalReference(counter));
- movl(Operand(kScratchRegister, 0), Immediate(value));
+ Operand counter_operand = ExternalOperand(ExternalReference(counter));
+ movl(counter_operand, Immediate(value));
}
}
@@ -1815,12 +2756,11 @@ void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
ASSERT(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
- movq(kScratchRegister, ExternalReference(counter));
- Operand operand(kScratchRegister, 0);
+ Operand counter_operand = ExternalOperand(ExternalReference(counter));
if (value == 1) {
- incl(operand);
+ incl(counter_operand);
} else {
- addl(operand, Immediate(value));
+ addl(counter_operand, Immediate(value));
}
}
}
@@ -1829,12 +2769,11 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
ASSERT(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
- movq(kScratchRegister, ExternalReference(counter));
- Operand operand(kScratchRegister, 0);
+ Operand counter_operand = ExternalOperand(ExternalReference(counter));
if (value == 1) {
- decl(operand);
+ decl(counter_operand);
} else {
- subl(operand, Immediate(value));
+ subl(counter_operand, Immediate(value));
}
}
}
@@ -1844,31 +2783,51 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
Set(rax, 0); // No arguments.
- movq(rbx, ExternalReference(Runtime::kDebugBreak));
+ LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif // ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+ // This macro takes the dst register to make the code more readable
+ // at the call sites. However, the dst register has to be rcx to
+ // follow the calling convention which requires the call type to be
+ // in rcx.
+ ASSERT(dst.is(rcx));
+ if (call_kind == CALL_AS_FUNCTION) {
+ LoadSmiConstant(dst, Smi::FromInt(1));
+ } else {
+ LoadSmiConstant(dst, Smi::FromInt(0));
+ }
+}
+
+
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- NearLabel done;
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ Label done;
InvokePrologue(expected,
actual,
Handle<Code>::null(),
code,
&done,
flag,
- post_call_generator);
+ Label::kNear,
+ call_wrapper,
+ call_kind);
if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(rcx, call_kind);
call(code);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(rcx, call_kind);
jmp(code);
}
bind(&done);
@@ -1880,8 +2839,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- NearLabel done;
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ Label done;
Register dummy = rax;
InvokePrologue(expected,
actual,
@@ -1889,12 +2849,17 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
dummy,
&done,
flag,
- post_call_generator);
+ Label::kNear,
+ call_wrapper,
+ call_kind);
if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(rcx, call_kind);
Call(code, rmode);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(rcx, call_kind);
Jump(code, rmode);
}
bind(&done);
@@ -1904,7 +2869,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
ASSERT(function.is(rdi));
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -1915,14 +2881,15 @@ void MacroAssembler::InvokeFunction(Register function,
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag, post_call_generator);
+ InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
}
void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
ASSERT(function->is_compiled());
// Get the function and setup the context.
Move(rdi, Handle<JSFunction>(function));
@@ -1933,7 +2900,7 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
// the Code object every time we call the function.
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(rdx, expected, actual, flag, post_call_generator);
+ InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
} else {
// Invoke the cached code.
Handle<Code> code(function->code());
@@ -1943,7 +2910,79 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
actual,
RelocInfo::CODE_TARGET,
flag,
- post_call_generator);
+ call_wrapper,
+ call_kind);
+ }
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_register,
+ Label* done,
+ InvokeFlag flag,
+ Label::Distance near_jump,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ bool definitely_matches = false;
+ Label invoke;
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ Set(rax, actual.immediate());
+ if (expected.immediate() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Don't worry about adapting arguments for built-ins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ Set(rbx, expected.immediate());
+ }
+ }
+ } else {
+ if (actual.is_immediate()) {
+ // Expected is in register, actual is immediate. This is the
+ // case when we invoke function values without going through the
+ // IC mechanism.
+ cmpq(expected.reg(), Immediate(actual.immediate()));
+ j(equal, &invoke, Label::kNear);
+ ASSERT(expected.reg().is(rbx));
+ Set(rax, actual.immediate());
+ } else if (!expected.reg().is(actual.reg())) {
+ // Both expected and actual are in (different) registers. This
+ // is the case when we invoke functions using call and apply.
+ cmpq(expected.reg(), actual.reg());
+ j(equal, &invoke, Label::kNear);
+ ASSERT(actual.reg().is(rax));
+ ASSERT(expected.reg().is(rbx));
+ }
+ }
+
+ if (!definitely_matches) {
+ Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ if (!code_constant.is_null()) {
+ movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+ addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ } else if (!code_register.is(rdx)) {
+ movq(rdx, code_register);
+ }
+
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor));
+ SetCallKind(rcx, call_kind);
+ Call(adaptor, RelocInfo::CODE_TARGET);
+ call_wrapper.AfterCall();
+ jmp(done, near_jump);
+ } else {
+ SetCallKind(rcx, call_kind);
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ bind(&invoke);
}
}
@@ -1955,9 +2994,9 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
Push(Smi::FromInt(type));
movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
push(kScratchRegister);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
movq(kScratchRegister,
- Factory::undefined_value(),
+ isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
cmpq(Operand(rsp, 0), kScratchRegister);
Check(not_equal, "code object not properly patched");
@@ -1966,7 +3005,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, "stack frame types must match");
@@ -1992,16 +3031,12 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
push(kScratchRegister); // Accessed from EditFrame::code_slot.
// Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
- ExternalReference context_address(Top::k_context_address);
if (save_rax) {
- movq(r14, rax); // Backup rax before we use it.
+ movq(r14, rax); // Backup rax in callee-save register.
}
- movq(rax, rbp);
- store_rax(c_entry_fp_address);
- movq(rax, rsi);
- store_rax(context_address);
+ Store(ExternalReference(Isolate::k_c_entry_fp_address, isolate()), rbp);
+ Store(ExternalReference(Isolate::k_context_address, isolate()), rsi);
}
@@ -2013,7 +3048,6 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- CpuFeatures::Scope scope(SSE2);
int space = XMMRegister::kNumRegisters * kDoubleSize +
arg_stack_space * kPointerSize;
subq(rsp, Immediate(space));
@@ -2027,11 +3061,11 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
}
// Get the required frame alignment for the OS.
- static const int kFrameAlignment = OS::ActivationFrameAlignment();
+ const int kFrameAlignment = OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
- movq(kScratchRegister, Immediate(-kFrameAlignment));
- and_(rsp, kScratchRegister);
+ ASSERT(is_int8(kFrameAlignment));
+ and_(rsp, Immediate(-kFrameAlignment));
}
// Patch the saved entry sp.
@@ -2042,10 +3076,10 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
EnterExitFramePrologue(true);
- // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
+ // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r12, Operand(rbp, r14, times_pointer_size, offset));
+ lea(r15, Operand(rbp, r14, times_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
@@ -2059,7 +3093,7 @@ void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Registers:
- // r12 : argv
+ // r15 : argv
if (save_doubles) {
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
@@ -2073,7 +3107,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
- lea(rsp, Operand(r12, 1 * kPointerSize));
+ lea(rsp, Operand(r15, 1 * kPointerSize));
// Push the return address to get ready to return.
push(rcx);
@@ -2092,17 +3126,18 @@ void MacroAssembler::LeaveApiExitFrame() {
void MacroAssembler::LeaveExitFrameEpilogue() {
// Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Top::k_context_address);
- movq(kScratchRegister, context_address);
- movq(rsi, Operand(kScratchRegister, 0));
+ ExternalReference context_address(Isolate::k_context_address, isolate());
+ Operand context_operand = ExternalOperand(context_address);
+ movq(rsi, context_operand);
#ifdef DEBUG
- movq(Operand(kScratchRegister, 0), Immediate(0));
+ movq(context_operand, Immediate(0));
#endif
// Clear the top frame.
- ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
- movq(kScratchRegister, c_entry_fp_address);
- movq(Operand(kScratchRegister, 0), Immediate(0));
+ ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address,
+ isolate());
+ Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
+ movq(c_entry_fp_operand, Immediate(0));
}
@@ -2117,7 +3152,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
cmpq(scratch, Immediate(0));
Check(not_equal, "we should not have an empty lexical context");
}
@@ -2127,9 +3162,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
- Factory::global_context_map());
+ isolate()->factory()->global_context_map());
Check(equal, "JSGlobalObject::global_context should be a global context.");
}
@@ -2143,7 +3178,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// object.
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Preserve original value of holder_reg.
push(holder_reg);
movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
@@ -2173,7 +3208,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
@@ -2181,8 +3216,8 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
ASSERT(!scratch.is_valid());
#ifdef DEBUG
// Assert that result actually contains top on entry.
- movq(kScratchRegister, new_space_allocation_top);
- cmpq(result, Operand(kScratchRegister, 0));
+ Operand top_operand = ExternalOperand(new_space_allocation_top);
+ cmpq(result, top_operand);
Check(equal, "Unexpected allocation top");
#endif
return;
@@ -2191,39 +3226,30 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
// Move address of new object to result. Use scratch register if available,
// and keep address in scratch until call to UpdateAllocationTopHelper.
if (scratch.is_valid()) {
- movq(scratch, new_space_allocation_top);
+ LoadAddress(scratch, new_space_allocation_top);
movq(result, Operand(scratch, 0));
- } else if (result.is(rax)) {
- load_rax(new_space_allocation_top);
} else {
- movq(kScratchRegister, new_space_allocation_top);
- movq(result, Operand(kScratchRegister, 0));
+ Load(result, new_space_allocation_top);
}
}
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
testq(result_end, Immediate(kObjectAlignmentMask));
Check(zero, "Unaligned allocation in new space");
}
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
// Update new top.
- if (result_end.is(rax)) {
- // rax can be stored directly to a memory location.
- store_rax(new_space_allocation_top);
+ if (scratch.is_valid()) {
+ // Scratch already contains address of allocation top.
+ movq(Operand(scratch, 0), result_end);
} else {
- // Register required - use scratch provided if available.
- if (scratch.is_valid()) {
- movq(Operand(scratch, 0), result_end);
- } else {
- movq(kScratchRegister, new_space_allocation_top);
- movq(Operand(kScratchRegister, 0), result_end);
- }
+ Store(new_space_allocation_top, result_end);
}
}
@@ -2235,7 +3261,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
movl(result, Immediate(0x7091));
if (result_end.is_valid()) {
@@ -2255,7 +3281,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
Register top_reg = result_end.is_valid() ? result_end : result;
@@ -2264,8 +3290,8 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
}
addq(top_reg, Immediate(object_size));
j(carry, gc_required);
- movq(kScratchRegister, new_space_allocation_limit);
- cmpq(top_reg, Operand(kScratchRegister, 0));
+ Operand limit_operand = ExternalOperand(new_space_allocation_limit);
+ cmpq(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -2293,7 +3319,7 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
movl(result, Immediate(0x7091));
movl(result_end, Immediate(0x7191));
@@ -2312,15 +3338,15 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
// We assume that element_count*element_size + header_size does not
// overflow.
lea(result_end, Operand(element_count, element_size, header_size));
addq(result_end, result);
j(carry, gc_required);
- movq(kScratchRegister, new_space_allocation_limit);
- cmpq(result_end, Operand(kScratchRegister, 0));
+ Operand limit_operand = ExternalOperand(new_space_allocation_limit);
+ cmpq(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -2340,7 +3366,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
movl(result, Immediate(0x7091));
movl(result_end, Immediate(0x7191));
@@ -2359,14 +3385,14 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
if (!object_size.is(result_end)) {
movq(result_end, object_size);
}
addq(result_end, result);
j(carry, gc_required);
- movq(kScratchRegister, new_space_allocation_limit);
- cmpq(result_end, Operand(kScratchRegister, 0));
+ Operand limit_operand = ExternalOperand(new_space_allocation_limit);
+ cmpq(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -2381,16 +3407,16 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
and_(object, Immediate(~kHeapObjectTagMask));
- movq(kScratchRegister, new_space_allocation_top);
+ Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
- cmpq(object, Operand(kScratchRegister, 0));
+ cmpq(object, top_operand);
Check(below, "Undo allocation of non allocated memory");
#endif
- movq(Operand(kScratchRegister, 0), object);
+ movq(top_operand, object);
}
@@ -2524,18 +3550,77 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
}
+// Copy memory, byte-by-byte, from source to destination. Not optimized for
+// long or aligned copies. The contents of scratch and length are destroyed.
+// Destination is incremented by length, source, length and scratch are
+// clobbered.
+// A simpler loop is faster on small copies, but slower on large ones.
+// The cld() instruction must have been emitted, to set the direction flag(),
+// before calling this function.
+void MacroAssembler::CopyBytes(Register destination,
+ Register source,
+ Register length,
+ int min_length,
+ Register scratch) {
+ ASSERT(min_length >= 0);
+ if (FLAG_debug_code) {
+ cmpl(length, Immediate(min_length));
+ Assert(greater_equal, "Invalid min_length");
+ }
+ Label loop, done, short_string, short_loop;
+
+ const int kLongStringLimit = 20;
+ if (min_length <= kLongStringLimit) {
+ cmpl(length, Immediate(kLongStringLimit));
+ j(less_equal, &short_string);
+ }
+
+ ASSERT(source.is(rsi));
+ ASSERT(destination.is(rdi));
+ ASSERT(length.is(rcx));
+
+ // Because source is 8-byte aligned in our uses of this function,
+ // we keep source aligned for the rep movs operation by copying the odd bytes
+ // at the end of the ranges.
+ movq(scratch, length);
+ shrl(length, Immediate(3));
+ repmovsq();
+ // Move remaining bytes of length.
+ andl(scratch, Immediate(0x7));
+ movq(length, Operand(source, scratch, times_1, -8));
+ movq(Operand(destination, scratch, times_1, -8), length);
+ addq(destination, scratch);
+
+ if (min_length <= kLongStringLimit) {
+ jmp(&done);
+
+ bind(&short_string);
+ if (min_length == 0) {
+ testl(length, length);
+ j(zero, &done);
+ }
+ lea(scratch, Operand(destination, length, times_1, 0));
+
+ bind(&short_loop);
+ movb(length, Operand(source, 0));
+ movb(Operand(destination, 0), length);
+ incq(source);
+ incq(destination);
+ cmpq(destination, scratch);
+ j(not_equal, &short_loop);
+
+ bind(&done);
+ }
+}
+
+
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
- // The context may be an intermediate context, not a function context.
- movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
} else {
// Slot is in the current function context. Move it into the
// destination register in case we store into it (the write barrier
@@ -2543,17 +3628,25 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
movq(dst, rsi);
}
- // We should not have found a 'with' context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (FLAG_debug_code) {
- cmpq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- Check(equal, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts");
+ // We should not have found a with or catch context by walking the context
+ // chain (i.e., the static scope chain and runtime context chain do not
+ // agree). A variable occurring in such a scope should have slot type
+ // LOOKUP and not CONTEXT.
+ if (emit_debug_code()) {
+ CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
+ Heap::kWithContextMapRootIndex);
+ Check(not_equal, "Variable resolved to with context.");
+ CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
+ Heap::kCatchContextMapRootIndex);
+ Check(not_equal, "Variable resolved to catch context.");
}
}
+#ifdef _WIN64
+static const int kRegisterPassedArguments = 4;
+#else
+static const int kRegisterPassedArguments = 6;
+#endif
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
@@ -2569,9 +3662,9 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok, fail;
- CheckMap(map, Factory::meta_map(), &fail, false);
+ CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
Abort("Global functions must have initial map");
@@ -2589,11 +3682,10 @@ int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
// and the caller does not reserve stack slots for them.
ASSERT(num_arguments >= 0);
#ifdef _WIN64
- static const int kMinimumStackSlots = 4;
+ const int kMinimumStackSlots = kRegisterPassedArguments;
if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
return num_arguments;
#else
- static const int kRegisterPassedArguments = 6;
if (num_arguments < kRegisterPassedArguments) return 0;
return num_arguments - kRegisterPassedArguments;
#endif
@@ -2604,6 +3696,7 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) {
int frame_alignment = OS::ActivationFrameAlignment();
ASSERT(frame_alignment != 0);
ASSERT(num_arguments >= 0);
+
// Make stack end at alignment and allocate space for arguments and old rsp.
movq(kScratchRegister, rsp);
ASSERT(IsPowerOf2(frame_alignment));
@@ -2617,14 +3710,14 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) {
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
- movq(rax, function);
+ LoadAddress(rax, function);
CallCFunction(rax, num_arguments);
}
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
// Check stack alignment.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
CheckStackAlignment();
}
@@ -2638,7 +3731,9 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) {
CodePatcher::CodePatcher(byte* address, int size)
- : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+ : address_(address),
+ size_(size),
+ masm_(Isolate::Current(), address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 9557940a9..f09fafc20 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -29,6 +29,7 @@
#define V8_X64_MACRO_ASSEMBLER_X64_H_
#include "assembler.h"
+#include "v8globals.h"
namespace v8 {
namespace internal {
@@ -44,21 +45,24 @@ enum AllocationFlags {
RESULT_CONTAINS_TOP = 1 << 1
};
+
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
static const Register kScratchRegister = { 10 }; // r10.
-static const Register kSmiConstantRegister = { 15 }; // r15 (callee save).
+static const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
static const Register kRootRegister = { 13 }; // r13 (callee save).
// Value of smi in kSmiConstantRegister.
static const int kSmiConstantRegisterValue = 1;
+// Actual value of root register is offset from the root array's start
+// to take advantage of negitive 8-bit displacement values.
+static const int kRootRegisterBias = 128;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
// Forward declaration.
class JumpTarget;
-class PostCallGenerator;
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
@@ -71,13 +75,64 @@ struct SmiIndex {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- MacroAssembler(void* buffer, int size);
-
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
+
+ // Prevent the use of the RootArray during the lifetime of this
+ // scope object.
+ class NoRootArrayScope BASE_EMBEDDED {
+ public:
+ explicit NoRootArrayScope(MacroAssembler* assembler)
+ : variable_(&assembler->root_array_available_),
+ old_value_(assembler->root_array_available_) {
+ assembler->root_array_available_ = false;
+ }
+ ~NoRootArrayScope() {
+ *variable_ = old_value_;
+ }
+ private:
+ bool* variable_;
+ bool old_value_;
+ };
+
+ // Operand pointing to an external reference.
+ // May emit code to set up the scratch register. The operand is
+ // only guaranteed to be correct as long as the scratch register
+ // isn't changed.
+ // If the operand is used more than once, use a scratch register
+ // that is guaranteed not to be clobbered.
+ Operand ExternalOperand(ExternalReference reference,
+ Register scratch = kScratchRegister);
+ // Loads and stores the value of an external reference.
+ // Special case code for load and store to take advantage of
+ // load_rax/store_rax if possible/necessary.
+ // For other operations, just use:
+ // Operand operand = ExternalOperand(extref);
+ // operation(operand, ..);
+ void Load(Register destination, ExternalReference source);
+ void Store(ExternalReference destination, Register source);
+ // Loads the address of the external reference into the destination
+ // register.
+ void LoadAddress(Register destination, ExternalReference source);
+ // Returns the size of the code generated by LoadAddress.
+ // Used by CallSize(ExternalReference) to find the size of a call.
+ int LoadAddressSize(ExternalReference source);
+
+ // Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
+ void StoreRoot(Register source, Heap::RootListIndex index);
+ // Load a root value where the index (or part of it) is variable.
+ // The variable_offset register is added to the fixed_offset value
+ // to get the index into the root-array.
+ void LoadRootIndexed(Register destination,
+ Register variable_offset,
+ int fixed_offset);
void CompareRoot(Register with, Heap::RootListIndex index);
void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
- void StoreRoot(Register source, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// GC Support
@@ -92,11 +147,11 @@ class MacroAssembler: public Assembler {
// Check if object is in new space. The condition cc can be equal or
// not_equal. If it is equal a jump will be done if the object is on new
// space. The register scratch can be object itself, but it will be clobbered.
- template <typename LabelType>
void InNewSpace(Register object,
Register scratch,
Condition cc,
- LabelType* branch);
+ Label* branch,
+ Label::Distance near_jump = Label::kFar);
// For page containing |object| mark region covering [object+offset]
// dirty. |object| is the object being stored into, |value| is the
@@ -176,40 +231,56 @@ class MacroAssembler: public Assembler {
void StoreToSafepointRegisterSlot(Register dst, Register src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ void InitializeRootRegister() {
+ ExternalReference roots_address =
+ ExternalReference::roots_address(isolate());
+ movq(kRootRegister, roots_address);
+ addq(kRootRegister, Immediate(kRootRegisterBias));
+ }
+
// ---------------------------------------------------------------------------
// JavaScript invokes
+ // Setup call kind marking in rcx. The method takes rcx as an
+ // explicit first parameter to make the code more readable at the
+ // call sites.
+ void SetCallKind(Register dst, CallKind kind);
+
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper = NullCallWrapper());
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -262,9 +333,20 @@ class MacroAssembler: public Assembler {
Register src,
int power);
+ // Perform the logical or of two smi values and return a smi value.
+ // If either argument is not a smi, jump to on_not_smis and retain
+ // the original values of source registers. The destination register
+ // may be changed if it's not one of the source registers.
+ void SmiOrIfSmis(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smis,
+ Label::Distance near_jump = Label::kFar);
- // Simple comparison of smis.
- void SmiCompare(Register dst, Register src);
+
+ // Simple comparison of smis. Both sides must be known smis to use these,
+ // otherwise use Cmp.
+ void SmiCompare(Register smi1, Register smi2);
void SmiCompare(Register dst, Smi* src);
void SmiCompare(Register dst, const Operand& src);
void SmiCompare(const Operand& dst, Register src);
@@ -317,42 +399,45 @@ class MacroAssembler: public Assembler {
// above with a conditional jump.
// Jump if the value cannot be represented by a smi.
- template <typename LabelType>
- void JumpIfNotValidSmiValue(Register src, LabelType* on_invalid);
+ void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
+ Label::Distance near_jump = Label::kFar);
// Jump if the unsigned integer value cannot be represented by a smi.
- template <typename LabelType>
- void JumpIfUIntNotValidSmiValue(Register src, LabelType* on_invalid);
+ void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
+ Label::Distance near_jump = Label::kFar);
// Jump to label if the value is a tagged smi.
- template <typename LabelType>
- void JumpIfSmi(Register src, LabelType* on_smi);
+ void JumpIfSmi(Register src,
+ Label* on_smi,
+ Label::Distance near_jump = Label::kFar);
// Jump to label if the value is not a tagged smi.
- template <typename LabelType>
- void JumpIfNotSmi(Register src, LabelType* on_not_smi);
+ void JumpIfNotSmi(Register src,
+ Label* on_not_smi,
+ Label::Distance near_jump = Label::kFar);
// Jump to label if the value is not a non-negative tagged smi.
- template <typename LabelType>
- void JumpUnlessNonNegativeSmi(Register src, LabelType* on_not_smi);
+ void JumpUnlessNonNegativeSmi(Register src,
+ Label* on_not_smi,
+ Label::Distance near_jump = Label::kFar);
// Jump to label if the value, which must be a tagged smi, has value equal
// to the constant.
- template <typename LabelType>
void JumpIfSmiEqualsConstant(Register src,
Smi* constant,
- LabelType* on_equals);
+ Label* on_equals,
+ Label::Distance near_jump = Label::kFar);
// Jump if either or both register are not smi values.
- template <typename LabelType>
void JumpIfNotBothSmi(Register src1,
Register src2,
- LabelType* on_not_both_smi);
+ Label* on_not_both_smi,
+ Label::Distance near_jump = Label::kFar);
// Jump if either or both register are not non-negative smi values.
- template <typename LabelType>
void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
- LabelType* on_not_both_smi);
+ Label* on_not_both_smi,
+ Label::Distance near_jump = Label::kFar);
// Operations on tagged smi values.
@@ -362,11 +447,11 @@ class MacroAssembler: public Assembler {
// Optimistically adds an integer constant to a supposed smi.
// If the src is not a smi, or the result is not a smi, jump to
// the label.
- template <typename LabelType>
void SmiTryAddConstant(Register dst,
Register src,
Smi* constant,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Add an integer constant to a tagged smi, giving a tagged smi as result.
// No overflow testing on the result is done.
@@ -378,11 +463,11 @@ class MacroAssembler: public Assembler {
// Add an integer constant to a tagged smi, giving a tagged smi as result,
// or jumping to a label if the result cannot be represented by a smi.
- template <typename LabelType>
void SmiAddConstant(Register dst,
Register src,
Smi* constant,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result. No testing on the result is done. Sets the N and Z flags
@@ -391,27 +476,32 @@ class MacroAssembler: public Assembler {
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result, or jumping to a label if the result cannot be represented by a smi.
- template <typename LabelType>
void SmiSubConstant(Register dst,
Register src,
Smi* constant,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Negating a smi can give a negative zero or too large positive value.
// NOTICE: This operation jumps on success, not failure!
- template <typename LabelType>
void SmiNeg(Register dst,
Register src,
- LabelType* on_smi_result);
+ Label* on_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Adds smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
- template <typename LabelType>
void SmiAdd(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
+ void SmiAdd(Register dst,
+ Register src1,
+ const Operand& src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
void SmiAdd(Register dst,
Register src1,
@@ -420,21 +510,21 @@ class MacroAssembler: public Assembler {
// Subtracts smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
- template <typename LabelType>
void SmiSub(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
void SmiSub(Register dst,
Register src1,
Register src2);
- template <typename LabelType>
void SmiSub(Register dst,
Register src1,
const Operand& src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
void SmiSub(Register dst,
Register src1,
@@ -444,27 +534,27 @@ class MacroAssembler: public Assembler {
// if possible.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
- template <typename LabelType>
void SmiMul(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Divides one smi by another and returns the quotient.
// Clobbers rax and rdx registers.
- template <typename LabelType>
void SmiDiv(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Divides one smi by another and returns the remainder.
// Clobbers rax and rdx registers.
- template <typename LabelType>
void SmiMod(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Bitwise operations.
void SmiNot(Register dst, Register src);
@@ -478,11 +568,11 @@ class MacroAssembler: public Assembler {
void SmiShiftLeftConstant(Register dst,
Register src,
int shift_value);
- template <typename LabelType>
void SmiShiftLogicalRightConstant(Register dst,
Register src,
int shift_value,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
void SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value);
@@ -495,11 +585,11 @@ class MacroAssembler: public Assembler {
// Shifts a smi value to the right, shifting in zero bits at the top, and
// returns the unsigned intepretation of the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
- template <typename LabelType>
void SmiShiftLogicalRight(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Shifts a smi value to the right, sign extending the top, and
// returns the signed intepretation of the result. That will always
// be a valid smi value, since it's numerically smaller than the
@@ -513,11 +603,11 @@ class MacroAssembler: public Assembler {
// Select the non-smi register of two registers where exactly one is a
// smi. If neither are smis, jump to the failure label.
- template <typename LabelType>
void SelectNonSmi(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smis);
+ Label* on_not_smis,
+ Label::Distance near_jump = Label::kFar);
// Converts, if necessary, a smi to a combination of number and
// multiplier to be used as a scaled index.
@@ -532,6 +622,10 @@ class MacroAssembler: public Assembler {
// Converts a positive smi to a negative index.
SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
+ // Add the value of a smi in memory to an int32 register.
+ // Sets flags as a normal add.
+ void AddSmiField(Register dst, const Operand& src);
+
// Basic Smi operations.
void Move(Register dst, Smi* source) {
LoadSmiConstant(dst, source);
@@ -549,35 +643,36 @@ class MacroAssembler: public Assembler {
// String macros.
// If object is a string, its map is loaded into object_map.
- template <typename LabelType>
void JumpIfNotString(Register object,
Register object_map,
- LabelType* not_string);
+ Label* not_string,
+ Label::Distance near_jump = Label::kFar);
- template <typename LabelType>
- void JumpIfNotBothSequentialAsciiStrings(Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- LabelType* on_not_both_flat_ascii);
+ void JumpIfNotBothSequentialAsciiStrings(
+ Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ Label* on_not_both_flat_ascii,
+ Label::Distance near_jump = Label::kFar);
// Check whether the instance type represents a flat ascii string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
- template <typename LabelType>
void JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
- LabelType *on_not_flat_ascii_string);
+ Label*on_not_flat_ascii_string,
+ Label::Distance near_jump = Label::kFar);
- template <typename LabelType>
void JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first_object_instance_type,
Register second_object_instance_type,
Register scratch1,
Register scratch2,
- LabelType* on_fail);
+ Label* on_fail,
+ Label::Distance near_jump = Label::kFar);
// ---------------------------------------------------------------------------
// Macro instructions.
@@ -594,6 +689,8 @@ class MacroAssembler: public Assembler {
void Move(const Operand& dst, Handle<Object> source);
void Cmp(Register dst, Handle<Object> source);
void Cmp(const Operand& dst, Handle<Object> source);
+ void Cmp(Register dst, Smi* src);
+ void Cmp(const Operand& dst, Smi* src);
void Push(Handle<Object> source);
// Emit code to discard a non-negative number of pointer-sized elements
@@ -609,7 +706,27 @@ class MacroAssembler: public Assembler {
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
- void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+ void Call(Handle<Code> code_object,
+ RelocInfo::Mode rmode,
+ unsigned ast_id = kNoASTId);
+
+ // The size of the code generated for different call instructions.
+ int CallSize(Address destination, RelocInfo::Mode rmode) {
+ return kCallInstructionLength;
+ }
+ int CallSize(ExternalReference ext);
+ int CallSize(Handle<Code> code_object) {
+ // Code calls use 32-bit relative addressing.
+ return kShortCallInstructionLength;
+ }
+ int CallSize(Register target) {
+ // Opcode: REX_opt FF /2 m64
+ return (target.high_bit() != 0) ? 3 : 2;
+ }
+ int CallSize(const Operand& target) {
+ // Opcode: REX_opt FF /2 m64
+ return (target.requires_rex() ? 2 : 1) + target.operand_size();
+ }
// Emit call to the code we are currently generating.
void CallSelf() {
@@ -637,13 +754,27 @@ class MacroAssembler: public Assembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
// Check if the map of an object is equal to a specified map and
// branch to label if not. Skip the smi check if not required
// (object is known to be a heap object)
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- bool is_heap_object);
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
// Check if the object in register heap_object is a string. Afterwards the
// register map contains the object map and the register instance_type
@@ -659,6 +790,15 @@ class MacroAssembler: public Assembler {
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
+ void ClampUint8(Register reg);
+
+ void ClampDoubleToUint8(XMMRegister input_reg,
+ XMMRegister temp_xmm_reg,
+ Register result_reg,
+ Register temp_reg);
+
+ void LoadInstanceDescriptors(Register map, Register descriptors);
+
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object);
@@ -667,6 +807,7 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object);
+ void AbortIfNotSmi(const Operand& object);
// Abort execution if argument is a string. Used in debug code.
void AbortIfNotString(Register object);
@@ -830,7 +971,7 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub.
- void CallStub(CodeStub* stub);
+ void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
// Call a code stub and return the code object called. Try to generate
// the code if necessary. Do not perform a GC but instead return a retry
@@ -849,7 +990,7 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Call a runtime function and save the value of XMM registers.
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -857,7 +998,7 @@ class MacroAssembler: public Assembler {
// Call a runtime function, returning the CodeStub object called.
// Try to generate the stub code if necessary. Do not perform a GC
// but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::Function* f,
+ MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
@@ -906,7 +1047,7 @@ class MacroAssembler: public Assembler {
// Calls an API function. Allocates HandleScope, extracts
// returned value from handle and propagates exceptions.
- // Clobbers r12, r14, rbx and caller-save registers. Restores context.
+ // Clobbers r14, r15, rbx and caller-save registers. Restores context.
// On return removes stack_space * kPointerSize (GCed).
MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
ApiFunction* function, int stack_space);
@@ -941,7 +1082,22 @@ class MacroAssembler: public Assembler {
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
- Handle<Object> CodeObject() { return code_object_; }
+ Handle<Object> CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+ }
+
+ // Copy length bytes from source to destination.
+ // Uses scratch register internally (if you have a low-eight register
+ // free, do use it, otherwise kScratchRegister will be used).
+ // The min_length is a minimum limit on the value that length will have.
+ // The algorithm has some special cases that might be omitted if the string
+ // is known to always be long.
+ void CopyBytes(Register destination,
+ Register source,
+ Register length,
+ int min_length = 0,
+ Register scratch = kScratchRegister);
// ---------------------------------------------------------------------------
@@ -982,12 +1138,14 @@ class MacroAssembler: public Assembler {
private:
// Order general registers are pushed by Pushad.
- // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
+ // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
static int kSafepointPushRegisterIndices[Register::kNumRegisters];
static const int kNumSafepointSavedRegisters = 11;
+ static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
bool generating_stub_;
bool allow_stub_calls_;
+ bool root_array_available_;
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
@@ -1000,14 +1158,15 @@ class MacroAssembler: public Assembler {
Handle<Object> code_object_;
// Helper functions for generating invokes.
- template <typename LabelType>
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_register,
- LabelType* done,
+ Label* done,
InvokeFlag flag,
- PostCallGenerator* post_call_generator);
+ Label::Distance near_jump = Label::kFar,
+ const CallWrapper& call_wrapper = NullCallWrapper(),
+ CallKind call_kind = CALL_AS_METHOD);
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -1072,17 +1231,6 @@ class CodePatcher {
};
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
- public:
- PostCallGenerator() { }
- virtual ~PostCallGenerator() { }
- virtual void Generate() = 0;
-};
-
-
// -----------------------------------------------------------------------------
// Static helper functions.
@@ -1144,713 +1292,6 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define ACCESS_MASM(masm) masm->
#endif
-// -----------------------------------------------------------------------------
-// Template implementations.
-
-static int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
-
-template <typename LabelType>
-void MacroAssembler::SmiNeg(Register dst,
- Register src,
- LabelType* on_smi_result) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- movq(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
- // Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
- j(not_equal, on_smi_result);
- movq(src, kScratchRegister);
- } else {
- movq(dst, src);
- neg(dst);
- cmpq(dst, src);
- // If the result is zero or Smi::kMinValue, negation failed to create a smi.
- j(not_equal, on_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- cmpq(dst, src2);
- j(overflow, on_not_smi_result);
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2,
- LabelType* on_not_smi_result) {
- ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src2);
- cmpq(src1, kScratchRegister);
- j(overflow, on_not_smi_result);
- subq(src1, kScratchRegister);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiMul(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT(!dst.is(src2));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
-
- if (dst.is(src1)) {
- NearLabel failure, zero_correct_result;
- movq(kScratchRegister, src1); // Create backup for later testing.
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, &failure);
-
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- NearLabel correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result);
-
- movq(dst, kScratchRegister);
- xor_(dst, src2);
- j(positive, &zero_correct_result); // Result was positive zero.
-
- bind(&failure); // Reused failure exit, restores src1.
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
-
- bind(&zero_correct_result);
- Set(dst, 0);
-
- bind(&correct_result);
- } else {
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, on_not_smi_result);
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- NearLabel correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result);
- // One of src1 and src2 is zero, the check whether the other is
- // negative.
- movq(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
- j(negative, on_not_smi_result);
- bind(&correct_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- LabelType* on_not_smi_result) {
- // Does not assume that src is a smi.
- ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
- ASSERT_EQ(0, kSmiTag);
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
-
- JumpIfNotSmi(src, on_not_smi_result);
- Register tmp = (dst.is(src) ? kScratchRegister : dst);
- LoadSmiConstant(tmp, constant);
- addq(tmp, src);
- j(overflow, on_not_smi_result);
- if (dst.is(src)) {
- movq(dst, tmp);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- LabelType* on_not_smi_result) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
-
- LoadSmiConstant(kScratchRegister, constant);
- addq(kScratchRegister, src);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- } else {
- LoadSmiConstant(dst, constant);
- addq(dst, src);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- LabelType* on_not_smi_result) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result);
- LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
- addq(kScratchRegister, dst);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- }
- } else {
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result);
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
- j(overflow, on_not_smi_result);
- }
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiDiv(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
-
- // Check for 0 divisor (result is +/-Infinity).
- NearLabel positive_divisor;
- testq(src2, src2);
- j(zero, on_not_smi_result);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- // We need to rule out dividing Smi::kMinValue by -1, since that would
- // overflow in idiv and raise an exception.
- // We combine this with negative zero test (negative zero only happens
- // when dividing zero by a negative number).
-
- // We overshoot a little and go to slow case if we divide min-value
- // by any negative value, not just -1.
- NearLabel safe_div;
- testl(rax, Immediate(0x7fffffff));
- j(not_zero, &safe_div);
- testq(src2, src2);
- if (src1.is(rax)) {
- j(positive, &safe_div);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
- } else {
- j(negative, on_not_smi_result);
- }
- bind(&safe_div);
-
- SmiToInteger32(src2, src2);
- // Sign extend src1 into edx:eax.
- cdq();
- idivl(src2);
- Integer32ToSmi(src2, src2);
- // Check that the remainder is zero.
- testl(rdx, rdx);
- if (src1.is(rax)) {
- NearLabel smi_result;
- j(zero, &smi_result);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
- bind(&smi_result);
- } else {
- j(not_zero, on_not_smi_result);
- }
- if (!dst.is(src1) && src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- Integer32ToSmi(dst, rax);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiMod(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
- ASSERT(!src1.is(src2));
-
- testq(src2, src2);
- j(zero, on_not_smi_result);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- SmiToInteger32(src2, src2);
-
- // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
- NearLabel safe_div;
- cmpl(rax, Immediate(Smi::kMinValue));
- j(not_equal, &safe_div);
- cmpl(src2, Immediate(-1));
- j(not_equal, &safe_div);
- // Retag inputs and go slow case.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- jmp(on_not_smi_result);
- bind(&safe_div);
-
- // Sign extend eax into edx:eax.
- cdq();
- idivl(src2);
- // Restore smi tags on inputs.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- // Check for a negative zero result. If the result is zero, and the
- // dividend is negative, go slow to return a floating point negative zero.
- NearLabel smi_result;
- testl(rdx, rdx);
- j(not_zero, &smi_result);
- testq(src1, src1);
- j(negative, on_not_smi_result);
- bind(&smi_result);
- Integer32ToSmi(dst, rdx);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiShiftLogicalRightConstant(
- Register dst, Register src, int shift_value, LabelType* on_not_smi_result) {
- // Logic right shift interprets its result as an *unsigned* number.
- if (dst.is(src)) {
- UNIMPLEMENTED(); // Not used.
- } else {
- movq(dst, src);
- if (shift_value == 0) {
- testq(dst, dst);
- j(negative, on_not_smi_result);
- }
- shr(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiShiftLogicalRight(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- // dst and src1 can be the same, because the one case that bails out
- // is a shift by 0, which leaves dst, and therefore src1, unchanged.
- NearLabel result_ok;
- if (src1.is(rcx) || src2.is(rcx)) {
- movq(kScratchRegister, rcx);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shl(dst, Immediate(kSmiShift));
- testq(dst, dst);
- if (src1.is(rcx) || src2.is(rcx)) {
- NearLabel positive_result;
- j(positive, &positive_result);
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else {
- movq(src2, kScratchRegister);
- }
- jmp(on_not_smi_result);
- bind(&positive_result);
- } else {
- j(negative, on_not_smi_result); // src2 was zero and src1 negative.
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SelectNonSmi(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smis) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(src1));
- ASSERT(!dst.is(src2));
- // Both operands must not be smis.
-#ifdef DEBUG
- if (allow_stub_calls()) { // Check contains a stub call.
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
- }
-#endif
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(0, Smi::FromInt(0));
- movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
- testl(kScratchRegister, src2);
- // If non-zero then both are smis.
- j(not_zero, on_not_smis);
-
- // Exactly one operand is a smi.
- ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
- // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
- // If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movq(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
- // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
- // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfSmi(Register src, LabelType* on_smi) {
- ASSERT_EQ(0, kSmiTag);
- Condition smi = CheckSmi(src);
- j(smi, on_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotSmi(Register src, LabelType* on_not_smi) {
- Condition smi = CheckSmi(src);
- j(NegateCondition(smi), on_not_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpUnlessNonNegativeSmi(
- Register src, LabelType* on_not_smi_or_negative) {
- Condition non_negative_smi = CheckNonNegativeSmi(src);
- j(NegateCondition(non_negative_smi), on_not_smi_or_negative);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
- Smi* constant,
- LabelType* on_equals) {
- SmiCompare(src, constant);
- j(equal, on_equals);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotValidSmiValue(Register src,
- LabelType* on_invalid) {
- Condition is_valid = CheckInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
- LabelType* on_invalid) {
- Condition is_valid = CheckUInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotBothSmi(Register src1,
- Register src2,
- LabelType* on_not_both_smi) {
- Condition both_smi = CheckBothSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
- Register src2,
- LabelType* on_not_both_smi) {
- Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotString(Register object,
- Register object_map,
- LabelType* not_string) {
- Condition is_smi = CheckSmi(object);
- j(is_smi, not_string);
- CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
- j(above_equal, not_string);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- LabelType* on_fail) {
- // Check that both objects are not smis.
- Condition either_smi = CheckEitherSmi(first_object, second_object);
- j(either_smi, on_fail);
-
- // Load instance type for both strings.
- movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
- movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- LabelType *failure) {
- if (!scratch.is(instance_type)) {
- movl(scratch, instance_type);
- }
-
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
- andl(scratch, Immediate(kFlatAsciiStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
- j(not_equal, failure);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- LabelType* on_fail) {
- // Load instance type for both strings.
- movq(scratch1, first_object_instance_type);
- movq(scratch2, second_object_instance_type);
-
- // Check that both are flat ascii strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- LabelType* branch) {
- if (Serializer::enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- if (scratch.is(object)) {
- movq(kScratchRegister, ExternalReference::new_space_mask());
- and_(scratch, kScratchRegister);
- } else {
- movq(scratch, ExternalReference::new_space_mask());
- and_(scratch, object);
- }
- movq(kScratchRegister, ExternalReference::new_space_start());
- cmpq(scratch, kScratchRegister);
- j(cc, branch);
- } else {
- ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
- intptr_t new_space_start =
- reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
- if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
- } else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
- }
- and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
- j(cc, branch);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
- LabelType* done,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- bool definitely_matches = false;
- NearLabel invoke;
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- Set(rax, actual.immediate());
- if (expected.immediate() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
- // Don't worry about adapting arguments for built-ins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- Set(rbx, expected.immediate());
- }
- }
- } else {
- if (actual.is_immediate()) {
- // Expected is in register, actual is immediate. This is the
- // case when we invoke function values without going through the
- // IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
- j(equal, &invoke);
- ASSERT(expected.reg().is(rbx));
- Set(rax, actual.immediate());
- } else if (!expected.reg().is(actual.reg())) {
- // Both expected and actual are in (different) registers. This
- // is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
- j(equal, &invoke);
- ASSERT(actual.reg().is(rax));
- ASSERT(expected.reg().is(rbx));
- }
- }
-
- if (!definitely_matches) {
- Handle<Code> adaptor =
- Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- if (!code_constant.is_null()) {
- movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_register.is(rdx)) {
- movq(rdx, code_register);
- }
-
- if (flag == CALL_FUNCTION) {
- Call(adaptor, RelocInfo::CODE_TARGET);
- if (post_call_generator != NULL) post_call_generator->Generate();
- jmp(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&invoke);
- }
-}
-
-
} } // namespace v8::internal
#endif // V8_X64_MACRO_ASSEMBLER_X64_H_
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index cd3bfbd42..2ea17f0e9 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -63,11 +63,16 @@ namespace internal {
*
* The registers rax, rbx, r9 and r11 are free to use for computations.
* If changed to use r12+, they should be saved as callee-save registers.
+ * The macro assembler special registers r12 and r13 (kSmiConstantRegister,
+ * kRootRegister) aren't special during execution of RegExp code (they don't
+ * hold the values assumed when creating JS code), so no Smi or Root related
+ * macro operations can be used.
*
* Each call to a C++ method should retain these registers.
*
* The stack will have the following content, in some order, indexable from the
* frame pointer (see, e.g., kStackHighEnd):
+ * - Isolate* isolate (Address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0 call
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
@@ -104,12 +109,13 @@ namespace internal {
* bool direct_call)
*/
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM((&masm_))
RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
Mode mode,
int registers_to_save)
- : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+ : masm_(Isolate::Current(), NULL, kRegExpCodeSize),
+ no_root_array_scope_(&masm_),
code_relative_fixup_positions_(4),
mode_(mode),
num_registers_(registers_to_save),
@@ -126,7 +132,6 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
RegExpMacroAssemblerX64::~RegExpMacroAssemblerX64() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -397,13 +402,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
#endif
__ push(backtrack_stackpointer());
- static const int num_arguments = 3;
+ static const int num_arguments = 4;
__ PrepareCallCFunction(num_arguments);
// Put arguments into parameter registers. Parameters are
// Address byte_offset1 - Address captured substring's start.
// Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!)
+ // Isolate* isolate
#ifdef _WIN64
// Compute and set byte_offset1 (start of capture).
__ lea(rcx, Operand(rsi, rdx, times_1, 0));
@@ -411,6 +417,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ lea(rdx, Operand(rsi, rdi, times_1, 0));
// Set byte_length.
__ movq(r8, rbx);
+ // Isolate.
+ __ LoadAddress(r9, ExternalReference::isolate_address());
#else // AMD64 calling convention
// Compute byte_offset2 (current position = rsi+rdi).
__ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -420,13 +428,15 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ movq(rsi, rax);
// Set byte_length.
__ movq(rdx, rbx);
+ // Isolate.
+ __ LoadAddress(rcx, ExternalReference::isolate_address());
#endif
ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16();
+ ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
__ CallCFunction(compare, num_arguments);
// Restore original values before reacting on result value.
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
__ pop(backtrack_stackpointer());
#ifndef _WIN64
__ pop(rdi);
@@ -693,7 +703,7 @@ void RegExpMacroAssemblerX64::Fail() {
}
-Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
+Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Finalize code - write the entry point code now we know how many
// registers we need.
// Entry code:
@@ -740,7 +750,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Label stack_ok;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
+ ExternalReference::address_of_stack_limit(masm_.isolate());
__ movq(rcx, rsp);
__ movq(kScratchRegister, stack_limit);
__ subq(rcx, Operand(kScratchRegister, 0));
@@ -752,11 +762,11 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
- __ movq(rax, Immediate(EXCEPTION));
+ __ Set(rax, EXCEPTION);
__ jmp(&exit_label_);
__ bind(&stack_limit_hit);
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
__ testq(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
@@ -789,7 +799,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
- __ movq(rcx, Immediate(kRegisterZero));
+ __ Set(rcx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
__ movq(Operand(rbp, rcx, times_1, 0), rax);
@@ -811,7 +821,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Initialize backtrack stack pointer.
__ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
// Initialize code object pointer.
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
// Load previous char as initial value of current-character.
Label at_start;
__ cmpb(Operand(rbp, kStartIndex), Immediate(0));
@@ -819,7 +829,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
- __ movq(current_character(), Immediate('\n'));
+ __ Set(current_character(), '\n');
__ jmp(&start_label_);
@@ -847,7 +857,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movl(Operand(rbx, i * kIntSize), rax);
}
}
- __ movq(rax, Immediate(SUCCESS));
+ __ Set(rax, SUCCESS);
}
// Exit and return rax
@@ -892,7 +902,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ j(not_zero, &exit_label_);
// Restore registers.
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
__ pop(rdi);
__ pop(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
@@ -914,18 +924,21 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#endif
// Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 2;
+ static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
- // Microsoft passes parameters in rcx, rdx.
+ // Microsoft passes parameters in rcx, rdx, r8.
// First argument, backtrack stackpointer, is already in rcx.
__ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
+ __ LoadAddress(r8, ExternalReference::isolate_address());
#else
- // AMD64 ABI passes parameters in rdi, rsi.
+ // AMD64 ABI passes parameters in rdi, rsi, rdx.
__ movq(rdi, backtrack_stackpointer()); // First argument.
__ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
+ __ LoadAddress(rdx, ExternalReference::isolate_address());
#endif
- ExternalReference grow_stack = ExternalReference::re_grow_stack();
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(masm_.isolate());
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
@@ -934,7 +947,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Otherwise use return value as new stack pointer.
__ movq(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
#ifndef _WIN64
__ pop(rdi);
__ pop(rsi);
@@ -946,19 +959,20 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// If any of the code above needed to exit with an exception.
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ movq(rax, Immediate(EXCEPTION));
+ __ Set(rax, EXCEPTION);
__ jmp(&exit_label_);
}
FixupCodeRelativePositions();
CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code = Factory::NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(RegExpCodeCreateEvent(*code, *source));
- return Handle<Object>::cast(code);
+ masm_.GetCode(&code_desc);
+ Isolate* isolate = ISOLATE;
+ Handle<Code> code = isolate->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP),
+ masm_.CodeObject());
+ PROFILE(isolate, RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
}
@@ -1051,9 +1065,9 @@ void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
- NearLabel after_position;
+ Label after_position;
__ cmpq(rdi, Immediate(-by * char_size()));
- __ j(greater_equal, &after_position);
+ __ j(greater_equal, &after_position, Label::kNear);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
// the current position is expected to be already loaded.
@@ -1126,7 +1140,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ lea(rdi, Operand(rsp, -kPointerSize));
#endif
ExternalReference stack_check =
- ExternalReference::re_check_stack_guard_state();
+ ExternalReference::re_check_stack_guard_state(masm_.isolate());
__ CallCFunction(stack_check, num_arguments);
}
@@ -1141,8 +1155,10 @@ static T& frame_entry(Address re_frame, int frame_offset) {
int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
- if (StackGuard::IsStackOverflow()) {
- Top::StackOverflow();
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ ASSERT(isolate == Isolate::Current());
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
return EXCEPTION;
}
@@ -1289,8 +1305,8 @@ void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
// Patch the relative offset to be relative to the Code object pointer
// instead.
int patch_position = position - kIntSize;
- int offset = masm_->long_at(patch_position);
- masm_->long_at_put(patch_position,
+ int offset = masm_.long_at(patch_position);
+ masm_.long_at_put(patch_position,
offset
+ position
+ Code::kHeaderSize
@@ -1324,7 +1340,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
// Check for preemption.
Label no_preempt;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
+ ExternalReference::address_of_stack_limit(masm_.isolate());
__ load_rax(stack_limit);
__ cmpq(rsp, rax);
__ j(above, &no_preempt);
@@ -1338,7 +1354,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
void RegExpMacroAssemblerX64::CheckStackLimit() {
Label no_stack_overflow;
ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit();
+ ExternalReference::address_of_regexp_stack_limit(masm_.isolate());
__ load_rax(stack_limit);
__ cmpq(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index 421a22944..02b510fa0 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -75,7 +75,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
- virtual Handle<Object> GetCode(Handle<String> source);
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
@@ -104,7 +104,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
Handle<String> subject,
int* offsets_vector,
int offsets_vector_length,
- int previous_index);
+ int previous_index,
+ Isolate* isolate);
static Result Execute(Code* code,
String* input,
@@ -142,6 +143,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
// are passed as registers, and caller must allocate space on the stack
@@ -153,6 +155,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kRegisterOutput = kInputEnd - kPointerSize;
static const int kStackHighEnd = kRegisterOutput - kPointerSize;
static const int kDirectCall = kFrameAlign;
+ static const int kIsolate = kDirectCall + kPointerSize;
#endif
#ifdef _WIN64
@@ -215,7 +218,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
void BranchOrBacktrack(Condition condition, Label* to);
void MarkPositionForCodeRelativeFixup() {
- code_relative_fixup_positions_.Add(masm_->pc_offset());
+ code_relative_fixup_positions_.Add(masm_.pc_offset());
}
void FixupCodeRelativePositions();
@@ -247,7 +250,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Increments the stack pointer (rcx) by a word size.
inline void Drop();
- MacroAssembler* masm_;
+ MacroAssembler masm_;
+ MacroAssembler::NoRootArrayScope no_root_array_scope_;
ZoneList<int> code_relative_fixup_positions_;
diff --git a/deps/v8/src/x64/register-allocator-x64-inl.h b/deps/v8/src/x64/register-allocator-x64-inl.h
deleted file mode 100644
index c6bea3ab0..000000000
--- a/deps/v8/src/x64/register-allocator-x64-inl.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
-#define V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
- reg.is(kScratchRegister) || reg.is(kRootRegister) ||
- reg.is(kSmiConstantRegister);
-}
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers.
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // rax
- 2, // rcx
- 3, // rdx
- 1, // rbx
- -1, // rsp Stack pointer.
- -1, // rbp Frame pointer.
- -1, // rsi Context.
- 4, // rdi
- 5, // r8
- 6, // r9
- -1, // r10 Scratch register.
- 8, // r11
- 9, // r12
- -1, // r13 Roots array. This is callee saved.
- 7, // r14
- -1 // r15 Smi constant register.
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] =
- { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r11, r12 };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The non-reserved rdi register is live on JS function entry.
- Use(rdi); // JS function.
-}
-} } // namespace v8::internal
-
-#endif // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
diff --git a/deps/v8/src/x64/register-allocator-x64.cc b/deps/v8/src/x64/register-allocator-x64.cc
deleted file mode 100644
index 1f5467e13..000000000
--- a/deps/v8/src/x64/register-allocator-x64.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
- ASSERT(is_valid());
- if (is_constant()) {
- Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
- // This result becomes a copy of the fresh one.
- fresh.set_type_info(type_info());
- *this = fresh;
- }
- ASSERT(is_register());
-}
-
-
-void Result::ToRegister(Register target) {
- ASSERT(is_valid());
- if (!is_register() || !reg().is(target)) {
- Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(target);
- ASSERT(fresh.is_valid());
- if (is_register()) {
- CodeGeneratorScope::Current()->masm()->movq(fresh.reg(), reg());
- } else {
- ASSERT(is_constant());
- CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
- }
- fresh.set_type_info(type_info());
- *this = fresh;
- } else if (is_register() && reg().is(target)) {
- ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
- CodeGeneratorScope::Current()->frame()->Spill(target);
- ASSERT(CodeGeneratorScope::Current()->allocator()->count(target) == 1);
- }
- ASSERT(is_register());
- ASSERT(reg().is(target));
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- // This function is not used in 64-bit code.
- UNREACHABLE();
- return Result();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index aa2994f26..df8423a65 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -40,12 +40,12 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, Address, int);
+ const byte*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6))
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))
@@ -55,7 +55,8 @@ typedef int (*regexp_matcher)(String*, int, const byte*,
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+ uintptr_t c_limit) {
return c_limit;
}
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 109985c72..da27fdf05 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "stub-cache.h"
namespace v8 {
@@ -39,7 +39,8 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void ProbeTable(MacroAssembler* masm,
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register name,
@@ -48,10 +49,10 @@ static void ProbeTable(MacroAssembler* masm,
ASSERT_EQ(16, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
// and shifting optimizations).
- ExternalReference key_offset(SCTableReference::keyReference(table));
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
Label miss;
- __ movq(kScratchRegister, key_offset);
+ __ LoadAddress(kScratchRegister, key_offset);
// Check that the key in the entry matches the name.
// Multiply entry offset by 16 to get the entry address. Since the
// offset register already holds the entry offset times four, multiply
@@ -81,17 +82,18 @@ static void ProbeTable(MacroAssembler* masm,
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register r0,
- Register r1) {
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ String* name,
+ Register r0,
+ Register r1) {
ASSERT(name->IsSymbol());
- __ IncrementCounter(&Counters::negative_lookups, 1);
- __ IncrementCounter(&Counters::negative_lookups_miss, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1);
- Label done;
__ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
@@ -103,7 +105,7 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
__ j(below, miss_label);
// Load properties array.
@@ -115,64 +117,20 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
Heap::kHashTableMapRootIndex);
__ j(not_equal, miss_label);
- // Compute the capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- // Generate an unrolled loop that performs a few probes before
- // giving up.
- static const int kProbes = 4;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = 0; i < kProbes; i++) {
- // r0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r1;
- // Capacity is smi 2^n.
- __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
- __ decl(index);
- __ and_(index,
- Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
-
- Register entity_name = r1;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ movq(entity_name, Operand(properties, index, times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ Cmp(entity_name, Factory::undefined_value());
- // __ jmp(miss_label);
- if (i != kProbes - 1) {
- __ j(equal, &done);
-
- // Stop if found the property.
- __ Cmp(entity_name, Handle<String>(name));
- __ j(equal, miss_label);
-
- // Check if the entry name is not a symbol.
- __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- Immediate(kIsSymbolMask));
- __ j(zero, miss_label);
- } else {
- // Give up probing if still not found the undefined value.
- __ j(not_equal, miss_label);
- }
- }
+ Label done;
+ MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+ masm,
+ miss_label,
+ &done,
+ properties,
+ name,
+ r1);
+ if (result->IsFailure()) return result;
__ bind(&done);
- __ DecrementCounter(&Counters::negative_lookups_miss, 1);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1);
+
+ return result;
}
@@ -183,6 +141,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register scratch,
Register extra,
Register extra2) {
+ Isolate* isolate = masm->isolate();
Label miss;
USE(extra); // The register extra is not used on the X64 platform.
USE(extra2); // The register extra2 is not used on the X64 platform.
@@ -212,7 +171,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
- ProbeTable(masm, flags, kPrimary, name, scratch);
+ ProbeTable(isolate, masm, flags, kPrimary, name, scratch);
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
@@ -224,7 +183,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
- ProbeTable(masm, flags, kSecondary, name, scratch);
+ ProbeTable(isolate, masm, flags, kSecondary, name, scratch);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
@@ -253,13 +212,15 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ Isolate* isolate = masm->isolate();
// Check we're still in the same context.
- __ Move(prototype, Top::global());
+ __ Move(prototype, isolate->global());
__ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)),
prototype);
__ j(not_equal, miss);
// Get the global function with the given index.
- JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
+ JSFunction* function =
+ JSFunction::cast(isolate->global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -375,7 +336,7 @@ static void PushInterceptorArguments(MacroAssembler* masm,
JSObject* holder_obj) {
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!Heap::InNewSpace(interceptor));
+ ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
__ Move(kScratchRegister, Handle<Object>(interceptor));
__ push(kScratchRegister);
__ push(receiver);
@@ -392,9 +353,10 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
- __ movq(rax, Immediate(5));
- __ movq(rbx, ref);
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
+ masm->isolate());
+ __ Set(rax, 5);
+ __ LoadAddress(rbx, ref);
CEntryStub stub(1);
__ CallStub(&stub);
@@ -466,7 +428,7 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
__ movq(Operand(rsp, 2 * kPointerSize), rdi);
Object* call_data = optimization.api_call_info()->data();
Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (Heap::InNewSpace(call_data)) {
+ if (masm->isolate()->heap()->InNewSpace(call_data)) {
__ Move(rcx, api_call_info_handle);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
__ movq(Operand(rsp, 3 * kPointerSize), rbx);
@@ -516,10 +478,12 @@ class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
const ParameterCount& arguments,
- Register name)
+ Register name,
+ Code::ExtraICState extra_ic_state)
: stub_compiler_(stub_compiler),
arguments_(arguments),
- name_(name) {}
+ name_(name),
+ extra_ic_state_(extra_ic_state) {}
MaybeObject* Compile(MacroAssembler* masm,
JSObject* object,
@@ -561,7 +525,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name,
holder,
miss);
- return Heap::undefined_value(); // Success.
+ return masm->isolate()->heap()->undefined_value(); // Success.
}
}
@@ -597,10 +561,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
(depth2 != kInvalidProtoDepth);
}
- __ IncrementCounter(&Counters::call_const_interceptor, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->call_const_interceptor(), 1);
if (can_do_fast_api_call) {
- __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1);
+ __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
ReserveSpaceForFastApiCall(masm, scratch1);
}
@@ -643,8 +608,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
arguments_.immediate());
if (result->IsFailure()) return result;
} else {
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
__ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION);
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -660,7 +628,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
FreeSpaceForFastApiCall(masm, scratch1);
}
- return Heap::undefined_value(); // Success.
+ return masm->isolate()->heap()->undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
@@ -688,7 +656,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
interceptor_holder);
__ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
+ masm->isolate()),
5);
// Restore the name_ register.
@@ -722,6 +691,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
StubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
+ Code::ExtraICState extra_ic_state_;
};
@@ -729,9 +699,9 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
Code* code = NULL;
if (kind == Code::LOAD_IC) {
- code = Builtins::builtin(Builtins::LoadIC_Miss);
+ code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
} else {
- code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+ code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
}
Handle<Code> ic(code);
@@ -739,6 +709,14 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
}
+void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
+ Code* code = masm->isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_MissForceGeneric);
+ Handle<Code> ic(code);
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
@@ -776,7 +754,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ push(rax);
__ push(scratch);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1);
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3,
+ 1);
return;
}
@@ -836,7 +817,7 @@ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
ASSERT(cell->value()->IsTheHole());
__ Move(scratch, Handle<Object>(cell));
__ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Factory::the_hole_value());
+ masm->isolate()->factory()->the_hole_value());
__ j(not_equal, miss);
return cell;
}
@@ -885,7 +866,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* lookup_result = Heap::LookupSymbol(name);
+ MaybeObject* lookup_result = heap()->LookupSymbol(name);
if (lookup_result->IsFailure()) {
set_failure(Failure::cast(lookup_result));
return reg;
@@ -896,16 +877,21 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
ASSERT(current->property_dictionary()->FindEntry(name) ==
StringDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
+ MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch1,
+ scratch2);
+ if (negative_lookup->IsFailure()) {
+ set_failure(Failure::cast(negative_lookup));
+ return reg;
+ }
+
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
__ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (Heap::InNewSpace(prototype)) {
+ } else if (heap()->InNewSpace(prototype)) {
// Get the map of the current object.
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
__ Cmp(scratch1, Handle<Map>(current->map()));
@@ -956,7 +942,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
__ j(not_equal, miss);
// Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
// Perform security check for access to the global object and return
// the holder register.
@@ -1039,7 +1025,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ push(receiver); // receiver
__ push(reg); // holder
- if (Heap::InNewSpace(callback_handle->data())) {
+ if (heap()->InNewSpace(callback_handle->data())) {
__ Move(scratch1, callback_handle);
__ push(FieldOperand(scratch1, AccessorInfo::kDataOffset)); // data
} else {
@@ -1230,7 +1216,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
__ push(scratch2); // restore return address
ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
+ isolate());
__ TailCallExternalReference(ref, 5, 1);
}
} else { // !compile_followup_inline
@@ -1245,7 +1232,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
__ push(scratch2); // restore old return address
ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
__ TailCallExternalReference(ref, 5, 1);
}
}
@@ -1291,7 +1278,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (Heap::InNewSpace(function)) {
+ if (heap()->InNewSpace(function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1313,8 +1300,10 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
- kind_);
+ MaybeObject* maybe_obj =
+ isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+ kind_,
+ extra_ic_state_);
Object* obj;
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@@ -1365,14 +1354,16 @@ MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
}
// Invoke the function.
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(FIELD, name);
@@ -1393,7 +1384,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
Label miss;
@@ -1427,7 +1418,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- Factory::fixed_array_map());
+ factory()->fixed_array_map());
__ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
@@ -1477,14 +1468,13 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
}
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
const int kAllocationDelta = 4;
// Load top.
- __ movq(rcx, new_space_allocation_top);
- __ movq(rcx, Operand(rcx, 0));
+ __ Load(rcx, new_space_allocation_top);
// Check if it's the end of elements.
__ lea(rdx, FieldOperand(rbx,
@@ -1493,13 +1483,13 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ cmpq(rdx, rcx);
__ j(not_equal, &call_builtin);
__ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
- __ movq(kScratchRegister, new_space_allocation_limit);
- __ cmpq(rcx, Operand(kScratchRegister, 0));
+ Operand limit_operand =
+ masm()->ExternalOperand(new_space_allocation_limit);
+ __ cmpq(rcx, limit_operand);
__ j(above, &call_builtin);
// We fit and could grow elements.
- __ movq(kScratchRegister, new_space_allocation_top);
- __ movq(Operand(kScratchRegister, 0), rcx);
+ __ Store(new_space_allocation_top, rcx);
__ movq(rcx, Operand(rsp, argc * kPointerSize));
// Push the argument...
@@ -1526,16 +1516,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
}
__ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
+ isolate()),
argc + 1,
1);
}
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1556,7 +1545,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
Label miss, return_undefined, call_builtin;
@@ -1611,15 +1600,14 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
- argc + 1,
- 1);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPop, isolate()),
+ argc + 1,
+ 1);
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1641,7 +1629,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
const int argc = arguments().immediate();
@@ -1650,7 +1638,9 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
@@ -1700,10 +1690,8 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// Restore function name in rcx.
__ Move(rcx, Handle<String>(name));
__ bind(&name_miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1725,7 +1713,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
const int argc = arguments().immediate();
@@ -1734,7 +1722,9 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
@@ -1786,10 +1776,8 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// Restore function name in rcx.
__ Move(rcx, Handle<String>(name));
__ bind(&name_miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1814,7 +1802,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
@@ -1853,14 +1841,16 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
__ bind(&miss);
// rcx: function name.
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
@@ -1873,7 +1863,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
JSFunction* function,
String* name) {
// TODO(872): implement this.
- return Heap::undefined_value();
+ return heap()->undefined_value();
}
@@ -1894,7 +1884,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
@@ -1943,7 +1933,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Check if the argument is a heap number and load its value.
__ bind(&not_smi);
- __ CheckMap(rax, Factory::heap_number_map(), &slow, true);
+ __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
__ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
// Check the sign of the argument. If the argument is positive,
@@ -1968,20 +1958,81 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
__ bind(&miss);
// rcx: function name.
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
+MaybeObject* CallStubCompiler::CompileFastApiCall(
+ const CallOptimization& optimization,
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ ASSERT(optimization.is_simple_api_call());
+ // Bail out if object is a global object as we don't want to
+ // repatch it to global receiver.
+ if (object->IsGlobalObject()) return heap()->undefined_value();
+ if (cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSObject()) return heap()->undefined_value();
+ int depth = optimization.GetPrototypeDepthOfExpectedType(
+ JSObject::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+
+ Label miss, miss_before_stack_reserved;
+
+ GenerateNameCheck(name, &miss_before_stack_reserved);
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(rdx, &miss_before_stack_reserved);
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->call_const(), 1);
+ __ IncrementCounter(counters->call_const_fast_api(), 1);
+
+ // Allocate space for v8::Arguments implicit values. Must be initialized
+ // before calling any runtime function.
+ __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
+
+ // Check that the maps haven't changed and find a Holder as a side effect.
+ CheckPrototypes(JSObject::cast(object), rdx, holder,
+ rbx, rax, rdi, name, depth, &miss);
+
+ // Move the return address on top of the stack.
+ __ movq(rax, Operand(rsp, 3 * kPointerSize));
+ __ movq(Operand(rsp, 0 * kPointerSize), rax);
+
+ MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
+ if (result->IsFailure()) return result;
+
+ __ bind(&miss);
+ __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
+
+ __ bind(&miss_before_stack_reserved);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
+}
+
+
MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@@ -1997,20 +2048,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function_info->builtin_function_id();
+ if (HasCustomCallGenerator(function)) {
MaybeObject* maybe_result = CompileCustomCall(
- id, object, holder, NULL, function, name);
+ object, holder, NULL, function, name);
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
// undefined means bail out to regular compiler.
if (!result->IsUndefined()) return result;
}
- Label miss_in_smi_check;
+ Label miss;
- GenerateNameCheck(name, &miss_in_smi_check);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -2018,42 +2067,26 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ JumpIfSmi(rdx, &miss_in_smi_check);
+ __ JumpIfSmi(rdx, &miss);
}
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- CallOptimization optimization(function);
- int depth = kInvalidProtoDepth;
- Label miss;
-
+ Counters* counters = isolate()->counters();
+ SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
- __ IncrementCounter(&Counters::call_const, 1);
-
- if (optimization.is_simple_api_call() && !object->IsGlobalObject()) {
- depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- }
-
- if (depth != kInvalidProtoDepth) {
- __ IncrementCounter(&Counters::call_const_fast_api, 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before to call any runtime function.
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
- }
+ __ IncrementCounter(counters->call_const(), 1);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, rdi, name, depth, &miss);
+ rbx, rax, rdi, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
if (object->IsGlobalObject()) {
- ASSERT(depth == kInvalidProtoDepth);
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
}
@@ -2123,31 +2156,16 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- if (depth != kInvalidProtoDepth) {
- // Move the return address on top of the stack.
- __ movq(rax, Operand(rsp, 3 * kPointerSize));
- __ movq(Operand(rsp, 0 * kPointerSize), rax);
-
- // rsp[2 * kPointerSize] is uninitialized, rsp[3 * kPointerSize] contains
- // duplicate of return address and will be overwritten.
- MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
- } else {
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
- }
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
- if (depth != kInvalidProtoDepth) {
- __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
- }
-
- // Handle call cache miss.
- __ bind(&miss_in_smi_check);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -2179,7 +2197,7 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), rcx);
+ CallInterceptorCompiler compiler(this, arguments(), rcx, extra_ic_state_);
MaybeObject* result = compiler.Compile(masm(),
object,
holder,
@@ -2209,14 +2227,16 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Invoke the function.
__ movq(rdi, rax);
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle load cache miss.
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(INTERCEPTOR, name);
@@ -2238,11 +2258,9 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function_info->builtin_function_id();
+ if (HasCustomCallGenerator(function)) {
MaybeObject* maybe_result = CompileCustomCall(
- id, object, holder, cell, function, name);
+ object, holder, cell, function, name);
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
// undefined means bail out to regular compiler.
@@ -2270,27 +2288,31 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- __ IncrementCounter(&Counters::call_global_inline, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->call_global_inline(), 1);
ASSERT(function->is_compiled());
ParameterCount expected(function->shared()->formal_parameter_count());
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
if (V8::UseCrankshaft()) {
// TODO(kasperl): For now, we always call indirectly through the
// code field in the function to allow recompilation to take effect
// without changing any of the call sites.
__ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION);
+ __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
} else {
Handle<Code> code(function->code());
__ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
}
// Handle call cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::call_global_inline_miss, 1);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ __ IncrementCounter(counters->call_global_inline_miss(), 1);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(NORMAL, name);
@@ -2319,7 +2341,7 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2364,12 +2386,12 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2413,12 +2435,12 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2455,13 +2477,14 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
__ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
// Return the value (register rax).
- __ IncrementCounter(&Counters::named_store_global_inline, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_store_global_inline(), 1);
__ ret(0);
// Handle store cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2481,7 +2504,8 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_store_field, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_store_field(), 1);
// Check that the name has not changed.
__ Cmp(rcx, Handle<String>(name));
@@ -2497,8 +2521,8 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_store_field, 1);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ DecrementCounter(counters->keyed_store_field(), 1);
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2506,56 +2530,22 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
- JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Check that the map matches.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(receiver->map()));
- __ j(not_equal, &miss);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &miss);
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- Factory::fixed_array_map());
- __ j(not_equal, &miss);
-
- // Check that the key is within bounds.
- if (receiver->IsJSArray()) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ j(above_equal, &miss);
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &miss);
- }
-
- // Do the store and update the write barrier. Make sure to preserve
- // the value in register eax.
- __ movq(rdx, rax);
- __ SmiToInteger32(rcx, rcx);
- __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ RecordWrite(rdi, 0, rdx, rcx);
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ Code* stub;
+ MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ __ DispatchMap(rdx,
+ Handle<Map>(receiver_map),
+ Handle<Code>(stub),
+ DO_SMI_CHECK);
+
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2563,8 +2553,9 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
}
-MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
- JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -2572,31 +2563,26 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
// -- rsp[0] : return address
// -----------------------------------
Label miss;
+ __ JumpIfSmi(rdx, &miss);
- // Check that the map matches.
- __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, false);
-
- // Do the load.
- GenerateFastPixelArrayStore(masm(),
- rdx,
- rcx,
- rax,
- rdi,
- rbx,
- true,
- false,
- &miss,
- &miss,
- NULL,
- &miss);
+ Register map_reg = rbx;
+ __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+ int receiver_count = receiver_maps->length();
+ for (int current = 0; current < receiver_count; ++current) {
+ // Check map and tail call if there's a match
+ Handle<Map> map(receiver_maps->at(current));
+ __ Cmp(map_reg, map);
+ __ j(equal,
+ Handle<Code>(handler_ics->at(current)),
+ RelocInfo::CODE_TARGET);
+ }
- // Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
}
@@ -2610,7 +2596,7 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
// -----------------------------------
Label miss;
- // Chech that receiver is not a smi.
+ // Check that receiver is not a smi.
__ JumpIfSmi(rax, &miss);
// Check the maps of the full prototype chain. Also check that
@@ -2641,7 +2627,7 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, Heap::empty_string());
+ return GetCode(NONEXISTENT, heap()->empty_string());
}
@@ -2780,12 +2766,13 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- __ IncrementCounter(&Counters::named_load_global_stub, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1);
__ movq(rax, rbx);
__ ret(0);
__ bind(&miss);
- __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
+ __ IncrementCounter(counters->named_load_global_stub_miss(), 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
@@ -2804,7 +2791,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_field, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_field(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2813,7 +2801,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_field, 1);
+ __ DecrementCounter(counters->keyed_load_field(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2833,7 +2821,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_callback, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_callback(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2848,7 +2837,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_callback, 1);
+ __ DecrementCounter(counters->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2867,7 +2856,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_constant_function(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2876,7 +2866,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
value, name, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
+ __ DecrementCounter(counters->keyed_load_constant_function(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2894,7 +2884,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_interceptor(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2913,7 +2904,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
name,
&miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
+ __ DecrementCounter(counters->keyed_load_interceptor(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2929,7 +2920,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_array_length, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_array_length(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2937,7 +2929,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_array_length, 1);
+ __ DecrementCounter(counters->keyed_load_array_length(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2953,7 +2945,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_string_length(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2961,7 +2954,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+ __ DecrementCounter(counters->keyed_load_string_length(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2977,7 +2970,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2985,7 +2979,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
+ __ DecrementCounter(counters->keyed_load_function_prototype(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2993,78 +2987,56 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- esp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Check that the map matches.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(receiver->map()));
- __ j(not_equal, &miss);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &miss);
-
- // Get the elements array.
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rcx);
-
- // Check that the key is within bounds.
- __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss);
-
- // Load the result and make sure it's not the hole.
- SmiIndex index = masm()->SmiToIndex(rbx, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rcx,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+ Code* stub;
+ MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ __ DispatchMap(rdx,
+ Handle<Map>(receiver_map),
+ Handle<Code>(stub),
+ DO_SMI_CHECK);
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+ __ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, NULL);
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- esp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
Label miss;
+ __ JumpIfSmi(rdx, &miss);
- // Check that the map matches.
- __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, false);
-
- GenerateFastPixelArrayLoad(masm(),
- rdx,
- rax,
- rbx,
- rcx,
- rax,
- &miss,
- &miss,
- &miss);
+ Register map_reg = rbx;
+ __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+ int receiver_count = receiver_maps->length();
+ for (int current = 0; current < receiver_count; ++current) {
+ // Check map and tail call if there's a match
+ Handle<Map> map(receiver_maps->at(current));
+ __ Cmp(map_reg, map);
+ __ j(equal,
+ Handle<Code>(handler_ics->at(current)),
+ RelocInfo::CODE_TARGET);
+ }
- __ bind(&miss);
+ __ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
}
@@ -3080,7 +3052,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
Label generic_stub_call;
// Use r8 for holding undefined which is used in several places below.
- __ Move(r8, Factory::undefined_value());
+ __ Move(r8, factory()->undefined_value());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check to see whether there are any break points in the function code. If
@@ -3124,7 +3096,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// rbx: initial map
// rdx: JSObject (untagged)
__ movq(Operand(rdx, JSObject::kMapOffset), rbx);
- __ Move(rbx, Factory::empty_fixed_array());
+ __ Move(rbx, factory()->empty_fixed_array());
__ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx);
__ movq(Operand(rdx, JSObject::kElementsOffset), rbx);
@@ -3183,14 +3155,16 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
__ pop(rcx);
__ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize));
__ push(rcx);
- __ IncrementCounter(&Counters::constructed_objects, 1);
- __ IncrementCounter(&Counters::constructed_objects_stub, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->constructed_objects(), 1);
+ __ IncrementCounter(counters->constructed_objects_stub(), 1);
__ ret(0);
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Code* code =
+ isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
@@ -3199,45 +3173,32 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
}
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
- ExternalArrayType array_type, Code::Flags flags) {
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+ MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow;
+ Label slow, miss_force_generic;
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ JumpIfNotSmi(rax, &slow);
-
- // Check that the object is a JS object.
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &slow);
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks. The map is already in rdx.
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // rax: index (as a smi)
- // rdx: JSObject
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::RootIndexForExternalArrayType(array_type));
- __ j(not_equal, &slow);
+ __ JumpIfNotSmi(rax, &miss_force_generic);
// Check that the index is in range.
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(rcx, rax);
__ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
+ __ j(above_equal, &miss_force_generic);
// rax: index (as a smi)
// rdx: receiver (JSObject)
@@ -3245,28 +3206,32 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// rbx: elements array
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
- case kExternalIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
__ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
break;
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(rcx, Operand(rbx, rcx, times_4, 0));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
__ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
break;
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ __ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
+ break;
default:
UNREACHABLE();
break;
@@ -3280,13 +3245,13 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// xmm0: value as double.
ASSERT(kSmiValueSize == 32);
- if (array_type == kExternalUnsignedIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
- NearLabel box_int;
+ Label box_int;
- __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
+ __ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
__ Integer32ToSmi(rax, rcx);
__ ret(0);
@@ -3304,7 +3269,8 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
- } else if (array_type == kExternalFloatArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(rcx, rbx, &slow);
@@ -3319,7 +3285,8 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// Slow case: Jump to runtime.
__ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
// ----------- S t a t e -------------
// -- rax : key
@@ -3327,60 +3294,46 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
- __ push(rbx); // return address
+ Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ jmp(ic, RelocInfo::CODE_TARGET);
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+ // Miss case: Jump to runtime.
+ __ bind(&miss_force_generic);
- // Return the generated code.
- return GetCode(flags);
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
}
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
- ExternalArrayType array_type, Code::Flags flags) {
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+ MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow;
+ Label slow, miss_force_generic;
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
- // Get the map from the receiver.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &slow);
-
- // Check that the object is a JS object.
- __ CmpInstanceType(rbx, JS_OBJECT_TYPE);
- __ j(not_equal, &slow);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::RootIndexForExternalArrayType(array_type));
- __ j(not_equal, &slow);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rcx, &miss_force_generic);
// Check that the index is in range.
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(rdi, rcx); // Untag the index.
__ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
+ __ j(above_equal, &miss_force_generic);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
@@ -3389,92 +3342,127 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
- NearLabel check_heap_number;
- __ JumpIfNotSmi(rax, &check_heap_number);
+ Label check_heap_number;
+ if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ // Float to pixel conversion is only implemented in the runtime for now.
+ __ JumpIfNotSmi(rax, &slow);
+ } else {
+ __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
+ }
// No more branches to slow case on this path. Key and receiver not needed.
__ SmiToInteger32(rdx, rax);
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ { // Clamp the value to [0..255].
+ Label done;
+ __ testl(rdx, Immediate(0xFFFFFF00));
+ __ j(zero, &done, Label::kNear);
+ __ setcc(negative, rdx); // 1 if negative, 0 if positive.
+ __ decb(rdx); // 0 if negative, 255 if positive.
+ __ bind(&done);
+ }
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
// Need to perform int-to-float conversion.
__ cvtlsi2ss(xmm0, rdx);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
break;
- default:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ // Need to perform int-to-float conversion.
+ __ cvtlsi2sd(xmm0, rdx);
+ __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
+ break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
__ ret(0);
- __ bind(&check_heap_number);
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
- __ j(not_equal, &slow);
- // No more branches to slow case on this path.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rdi: untagged index
- // rbx: base pointer of external storage
- // top of FPU stack: value
- if (array_type == kExternalFloatArray) {
- __ cvtsd2ss(xmm0, xmm0);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
-
- // Convert to int32 and store the low byte/word.
- // If the value is NaN or +/-infinity, the result is 0x80000000,
- // which is automatically zero when taken mod 2^n, n < 32.
- // rdx: value (converted to an untagged integer)
+ // TODO(danno): handle heap number -> pixel array conversion
+ if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ __ bind(&check_heap_number);
+ // rax: value
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ // rbx: elements array
+ // rdi: untagged key
+ __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
+ __ j(not_equal, &slow);
+ // No more branches to slow case on this path.
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rdi: untagged index
// rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ cvttsd2si(rdx, xmm0);
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ cvttsd2si(rdx, xmm0);
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray: {
- // Convert to int64, so that NaN and infinities become
- // 0x8000000000000000, which is zero mod 2^32.
- __ cvttsd2siq(rdx, xmm0);
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
+ // top of FPU stack: value
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ __ cvtsd2ss(xmm0, xmm0);
+ __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
+ __ ret(0);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
+ __ ret(0);
+ } else {
+ // Perform float-to-int conversion with truncation (round-to-zero)
+ // behavior.
+
+ // Convert to int32 and store the low byte/word.
+ // If the value is NaN or +/-infinity, the result is 0x80000000,
+ // which is automatically zero when taken mod 2^n, n < 32.
+ // rdx: value (converted to an untagged integer)
+ // rdi: untagged index
+ // rbx: base pointer of external storage
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ cvttsd2si(rdx, xmm0);
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ cvttsd2si(rdx, xmm0);
+ __ movw(Operand(rbx, rdi, times_2, 0), rdx);
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ // Convert to int64, so that NaN and infinities become
+ // 0x8000000000000000, which is zero mod 2^32.
+ __ cvttsd2siq(rdx, xmm0);
+ __ movl(Operand(rbx, rdi, times_4, 0), rdx);
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
}
- default:
- UNREACHABLE();
- break;
+ __ ret(0);
}
- __ ret(0);
}
// Slow case: call runtime.
@@ -3487,21 +3475,116 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ Push(Smi::FromInt(
- Code::ExtractExtraICStateFromFlags(flags) & kStrictMode));
- __ push(rbx); // return address
+ Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ jmp(ic, RelocInfo::CODE_TARGET);
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+ // Miss case: call runtime.
+ __ bind(&miss_force_generic);
- return GetCode(flags);
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss_force_generic;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rax, &miss_force_generic);
+
+ // Get the elements array.
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ AssertFastElements(rcx);
+
+ // Check that the key is within bounds.
+ __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ j(above_equal, &miss_force_generic);
+
+ // Load the result and make sure it's not the hole.
+ SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
+ __ movq(rbx, FieldOperand(rcx,
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &miss_force_generic);
+ __ movq(rax, rbx);
+ __ ret(0);
+
+ __ bind(&miss_force_generic);
+ Code* code = masm->isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_MissForceGeneric);
+ Handle<Code> ic(code);
+ __ jmp(ic, RelocInfo::CODE_TARGET);
}
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+ bool is_js_array) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss_force_generic;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rcx, &miss_force_generic);
+
+ // Get the elements array and make sure it is a fast element array, not 'cow'.
+ __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &miss_force_generic);
+
+ // Check that the key is within bounds.
+ if (is_js_array) {
+ __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ j(above_equal, &miss_force_generic);
+ } else {
+ __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+ __ j(above_equal, &miss_force_generic);
+ }
+
+ // Do the store and update the write barrier. Make sure to preserve
+ // the value in register eax.
+ __ movq(rdx, rax);
+ __ SmiToInteger32(rcx, rcx);
+ __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ rax);
+ __ RecordWrite(rdi, 0, rdx, rcx);
+
+ // Done.
+ __ ret(0);
+
+ // Handle store cache miss.
+ __ bind(&miss_force_generic);
+ Handle<Code> ic_force_generic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
deleted file mode 100644
index c4d7e6566..000000000
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ /dev/null
@@ -1,1292 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::Enter() {
- // Registers live on entry to a JS frame:
- // rsp: stack pointer, points to return address from this function.
- // rbp: base pointer, points to previous JS, ArgumentsAdaptor, or
- // Trampoline frame.
- // rsi: context of this function call.
- // rdi: pointer to this function object.
- Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
- if (FLAG_debug_code) {
- // Verify that rdi contains a JS function. The following code
- // relies on rax being available for use.
- Condition not_smi = NegateCondition(masm()->CheckSmi(rdi));
- __ Check(not_smi,
- "VirtualFrame::Enter - rdi is not a function (smi check).");
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
- __ Check(equal,
- "VirtualFrame::Enter - rdi is not a function (map check).");
- }
-#endif
-
- EmitPush(rbp);
-
- __ movq(rbp, rsp);
-
- // Store the context in the frame. The context is kept in rsi and a
- // copy is stored in the frame. The external reference to rsi
- // remains.
- EmitPush(rsi);
-
- // Store the function in the frame. The frame owns the register
- // reference now (ie, it can keep it in rdi or spill it later).
- Push(rdi);
- SyncElementAt(element_count() - 1);
- cgen()->allocator()->Unuse(rdi);
-}
-
-
-void VirtualFrame::Exit() {
- Comment cmnt(masm(), "[ Exit JS frame");
- // Record the location of the JS exit code for patching when setting
- // break point.
- __ RecordJSReturn();
-
- // Avoid using the leave instruction here, because it is too
- // short. We need the return sequence to be a least the size of a
- // call instruction to support patching the exit code in the
- // debugger. See GenerateReturnSequence for the full return sequence.
- // TODO(X64): A patched call will be very long now. Make sure we
- // have enough room.
- __ movq(rsp, rbp);
- stack_pointer_ = frame_pointer();
- for (int i = element_count() - 1; i > stack_pointer_; i--) {
- FrameElement last = elements_.RemoveLast();
- if (last.is_register()) {
- Unuse(last.reg());
- }
- }
-
- EmitPop(rbp);
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- int count = local_count();
- if (count > 0) {
- Comment cmnt(masm(), "[ Allocate space for locals");
- // The locals are initialized to a constant (the undefined value), but
- // we sync them with the actual frame to allocate space for spilling
- // them later. First sync everything above the stack pointer so we can
- // use pushes to allocate and initialize the locals.
- SyncRange(stack_pointer_ + 1, element_count() - 1);
- Handle<Object> undefined = Factory::undefined_value();
- FrameElement initial_value =
- FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
- if (count < kLocalVarBound) {
- // For fewer locals the unrolled loop is more compact.
-
- // Hope for one of the first eight registers, where the push operation
- // takes only one byte (kScratchRegister needs the REX.W bit).
- Result tmp = cgen()->allocator()->Allocate();
- ASSERT(tmp.is_valid());
- __ movq(tmp.reg(), undefined, RelocInfo::EMBEDDED_OBJECT);
- for (int i = 0; i < count; i++) {
- __ push(tmp.reg());
- }
- } else {
- // For more locals a loop in generated code is more compact.
- Label alloc_locals_loop;
- Result cnt = cgen()->allocator()->Allocate();
- ASSERT(cnt.is_valid());
- __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
-#ifdef DEBUG
- Label loop_size;
- __ bind(&loop_size);
-#endif
- if (is_uint8(count)) {
- // Loading imm8 is shorter than loading imm32.
- // Loading only partial byte register, and using decb below.
- __ movb(cnt.reg(), Immediate(count));
- } else {
- __ movl(cnt.reg(), Immediate(count));
- }
- __ bind(&alloc_locals_loop);
- __ push(kScratchRegister);
- if (is_uint8(count)) {
- __ decb(cnt.reg());
- } else {
- __ decl(cnt.reg());
- }
- __ j(not_zero, &alloc_locals_loop);
-#ifdef DEBUG
- CHECK(masm()->SizeOfCodeGeneratedSince(&loop_size) < kLocalVarBound);
-#endif
- }
- for (int i = 0; i < count; i++) {
- elements_.Add(initial_value);
- stack_pointer_++;
- }
- }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
- ASSERT(elements_[context_index()].is_memory());
- __ movq(Operand(rbp, fp_relative(context_index())), rsi);
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
- ASSERT(elements_[context_index()].is_memory());
- __ movq(rsi, Operand(rbp, fp_relative(context_index())));
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ lea(temp.reg(), ParameterAt(-1));
- Push(&temp);
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ pop(reg);
-}
-
-
-void VirtualFrame::EmitPop(const Operand& operand) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ pop(operand);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(reg);
-}
-
-
-void VirtualFrame::EmitPush(const Operand& operand, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(operand);
-}
-
-
-void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(immediate);
-}
-
-
-void VirtualFrame::EmitPush(Smi* smi_value) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(TypeInfo::Smi()));
- stack_pointer_++;
- __ Push(smi_value);
-}
-
-
-void VirtualFrame::EmitPush(Handle<Object> value) {
- ASSERT(stack_pointer_ == element_count() - 1);
- TypeInfo info = TypeInfo::TypeFromValue(value);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ Push(value);
-}
-
-
-void VirtualFrame::EmitPush(Heap::RootListIndex index, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ PushRoot(index);
-}
-
-
-void VirtualFrame::Push(Expression* expr) {
- ASSERT(expr->IsTrivial());
-
- Literal* lit = expr->AsLiteral();
- if (lit != NULL) {
- Push(lit->handle());
- return;
- }
-
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL) {
- Slot* slot = proxy->var()->AsSlot();
- if (slot->type() == Slot::LOCAL) {
- PushLocalAt(slot->index());
- return;
- }
- if (slot->type() == Slot::PARAMETER) {
- PushParameterAt(slot->index());
- return;
- }
- }
- UNREACHABLE();
-}
-
-
-void VirtualFrame::Push(Handle<Object> value) {
- if (ConstantPoolOverflowed()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- if (value->IsSmi()) {
- __ Move(temp.reg(), Smi::cast(*value));
- } else {
- __ movq(temp.reg(), value, RelocInfo::EMBEDDED_OBJECT);
- }
- Push(&temp);
- } else {
- FrameElement element =
- FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
- elements_.Add(element);
- }
-}
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- ASSERT(height() >= count);
- int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
- // Emit code to lower the stack pointer if necessary.
- if (num_virtual_elements < count) {
- int num_dropped = count - num_virtual_elements;
- stack_pointer_ -= num_dropped;
- __ addq(rsp, Immediate(num_dropped * kPointerSize));
- }
-
- // Discard elements from the virtual frame and free any registers.
- for (int i = 0; i < count; i++) {
- FrameElement dropped = elements_.RemoveLast();
- if (dropped.is_register()) {
- Unuse(dropped.reg());
- }
- }
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
- FrameElement original = elements_[index];
-
- // Is this element the backing store of any copies?
- int new_backing_index = kIllegalIndex;
- if (original.is_copied()) {
- // Verify it is copied, and find first copy.
- for (int i = index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == index) {
- new_backing_index = i;
- break;
- }
- }
- }
-
- if (new_backing_index == kIllegalIndex) {
- // No copies found, return kIllegalIndex.
- if (original.is_register()) {
- Unuse(original.reg());
- }
- elements_[index] = FrameElement::InvalidElement();
- return kIllegalIndex;
- }
-
- // This is the backing store of copies.
- Register backing_reg;
- if (original.is_memory()) {
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- Use(fresh.reg(), new_backing_index);
- backing_reg = fresh.reg();
- __ movq(backing_reg, Operand(rbp, fp_relative(index)));
- } else {
- // The original was in a register.
- backing_reg = original.reg();
- set_register_location(backing_reg, new_backing_index);
- }
- // Invalidate the element at index.
- elements_[index] = FrameElement::InvalidElement();
- // Set the new backing element.
- if (elements_[new_backing_index].is_synced()) {
- elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg,
- FrameElement::SYNCED,
- original.type_info());
- } else {
- elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg,
- FrameElement::NOT_SYNCED,
- original.type_info());
- }
- // Update the other copies.
- for (int i = new_backing_index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == index) {
- elements_[i].set_index(new_backing_index);
- elements_[new_backing_index].set_copied();
- }
- }
- return new_backing_index;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
- ASSERT(index >= 0);
- ASSERT(index <= element_count());
- FrameElement original = elements_[index];
- int new_backing_store_index = InvalidateFrameSlotAt(index);
- if (new_backing_store_index != kIllegalIndex) {
- elements_.Add(CopyElementAt(new_backing_store_index));
- return;
- }
-
- switch (original.type()) {
- case FrameElement::MEMORY: {
- // Emit code to load the original element's data into a register.
- // Push that register as a FrameElement on top of the frame.
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- FrameElement new_element =
- FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED,
- original.type_info());
- Use(fresh.reg(), element_count());
- elements_.Add(new_element);
- __ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
- break;
- }
- case FrameElement::REGISTER:
- Use(original.reg(), element_count());
- // Fall through.
- case FrameElement::CONSTANT:
- case FrameElement::COPY:
- original.clear_sync();
- elements_.Add(original);
- break;
- case FrameElement::INVALID:
- UNREACHABLE();
- break;
- }
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
- // Store the value on top of the frame to the virtual frame slot at
- // a given index. The value on top of the frame is left in place.
- // This is a duplicating operation, so it can create copies.
- ASSERT(index >= 0);
- ASSERT(index < element_count());
-
- int top_index = element_count() - 1;
- FrameElement top = elements_[top_index];
- FrameElement original = elements_[index];
- if (top.is_copy() && top.index() == index) return;
- ASSERT(top.is_valid());
-
- InvalidateFrameSlotAt(index);
-
- // InvalidateFrameSlotAt can potentially change any frame element, due
- // to spilling registers to allocate temporaries in order to preserve
- // the copy-on-write semantics of aliased elements. Reload top from
- // the frame.
- top = elements_[top_index];
-
- if (top.is_copy()) {
- // There are two cases based on the relative positions of the
- // stored-to slot and the backing slot of the top element.
- int backing_index = top.index();
- ASSERT(backing_index != index);
- if (backing_index < index) {
- // 1. The top element is a copy of a slot below the stored-to
- // slot. The stored-to slot becomes an unsynced copy of that
- // same backing slot.
- elements_[index] = CopyElementAt(backing_index);
- } else {
- // 2. The top element is a copy of a slot above the stored-to
- // slot. The stored-to slot becomes the new (unsynced) backing
- // slot and both the top element and the element at the former
- // backing slot become copies of it. The sync state of the top
- // and former backing elements is preserved.
- FrameElement backing_element = elements_[backing_index];
- ASSERT(backing_element.is_memory() || backing_element.is_register());
- if (backing_element.is_memory()) {
- // Because sets of copies are canonicalized to be backed by
- // their lowest frame element, and because memory frame
- // elements are backed by the corresponding stack address, we
- // have to move the actual value down in the stack.
- //
- // TODO(209): considering allocating the stored-to slot to the
- // temp register. Alternatively, allow copies to appear in
- // any order in the frame and lazily move the value down to
- // the slot.
- __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
- __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
- } else {
- set_register_location(backing_element.reg(), index);
- if (backing_element.is_synced()) {
- // If the element is a register, we will not actually move
- // anything on the stack but only update the virtual frame
- // element.
- backing_element.clear_sync();
- }
- }
- elements_[index] = backing_element;
-
- // The old backing element becomes a copy of the new backing
- // element.
- FrameElement new_element = CopyElementAt(index);
- elements_[backing_index] = new_element;
- if (backing_element.is_synced()) {
- elements_[backing_index].set_sync();
- }
-
- // All the copies of the old backing element (including the top
- // element) become copies of the new backing element.
- for (int i = backing_index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
- elements_[i].set_index(index);
- }
- }
- }
- return;
- }
-
- // Move the top element to the stored-to slot and replace it (the
- // top element) with a copy.
- elements_[index] = top;
- if (top.is_memory()) {
- // TODO(209): consider allocating the stored-to slot to the temp
- // register. Alternatively, allow copies to appear in any order
- // in the frame and lazily move the value down to the slot.
- FrameElement new_top = CopyElementAt(index);
- new_top.set_sync();
- elements_[top_index] = new_top;
-
- // The sync state of the former top element is correct (synced).
- // Emit code to move the value down in the frame.
- __ movq(kScratchRegister, Operand(rsp, 0));
- __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
- } else if (top.is_register()) {
- set_register_location(top.reg(), index);
- // The stored-to slot has the (unsynced) register reference and
- // the top element becomes a copy. The sync state of the top is
- // preserved.
- FrameElement new_top = CopyElementAt(index);
- if (top.is_synced()) {
- new_top.set_sync();
- elements_[index].clear_sync();
- }
- elements_[top_index] = new_top;
- } else {
- // The stored-to slot holds the same value as the top but
- // unsynced. (We do not have copies of constants yet.)
- ASSERT(top.is_constant());
- elements_[index].clear_sync();
- }
-}
-
-
-void VirtualFrame::MakeMergable() {
- for (int i = 0; i < element_count(); i++) {
- FrameElement element = elements_[i];
-
- // In all cases we have to reset the number type information
- // to unknown for a mergable frame because of incoming back edges.
- if (element.is_constant() || element.is_copy()) {
- if (element.is_synced()) {
- // Just spill.
- elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
- } else {
- // Allocate to a register.
- FrameElement backing_element; // Invalid if not a copy.
- if (element.is_copy()) {
- backing_element = elements_[element.index()];
- }
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
- elements_[i] =
- FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED,
- TypeInfo::Unknown());
- Use(fresh.reg(), i);
-
- // Emit a move.
- if (element.is_constant()) {
- __ Move(fresh.reg(), element.handle());
- } else {
- ASSERT(element.is_copy());
- // Copies are only backed by register or memory locations.
- if (backing_element.is_register()) {
- // The backing store may have been spilled by allocating,
- // but that's OK. If it was, the value is right where we
- // want it.
- if (!fresh.reg().is(backing_element.reg())) {
- __ movq(fresh.reg(), backing_element.reg());
- }
- } else {
- ASSERT(backing_element.is_memory());
- __ movq(fresh.reg(), Operand(rbp, fp_relative(element.index())));
- }
- }
- }
- // No need to set the copied flag --- there are no copies.
- } else {
- // Clear the copy flag of non-constant, non-copy elements.
- // They cannot be copied because copies are not allowed.
- // The copy flag is not relied on before the end of this loop,
- // including when registers are spilled.
- elements_[i].clear_copied();
- elements_[i].set_type_info(TypeInfo::Unknown());
- }
- }
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
- Comment cmnt(masm(), "[ Merge frame");
- // We should always be merging the code generator's current frame to an
- // expected frame.
- ASSERT(cgen()->frame() == this);
-
- // Adjust the stack pointer upward (toward the top of the virtual
- // frame) if necessary.
- if (stack_pointer_ < expected->stack_pointer_) {
- int difference = expected->stack_pointer_ - stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ subq(rsp, Immediate(difference * kPointerSize));
- }
-
- MergeMoveRegistersToMemory(expected);
- MergeMoveRegistersToRegisters(expected);
- MergeMoveMemoryToRegisters(expected);
-
- // Adjust the stack pointer downward if necessary.
- if (stack_pointer_ > expected->stack_pointer_) {
- int difference = stack_pointer_ - expected->stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ addq(rsp, Immediate(difference * kPointerSize));
- }
-
- // At this point, the frames should be identical.
- ASSERT(Equals(expected));
-}
-
-
-void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- // Move registers, constants, and copies to memory. Perform moves
- // from the top downward in the frame in order to leave the backing
- // stores of copies in registers.
- for (int i = element_count() - 1; i >= 0; i--) {
- FrameElement target = expected->elements_[i];
- if (target.is_register()) continue; // Handle registers later.
- if (target.is_memory()) {
- FrameElement source = elements_[i];
- switch (source.type()) {
- case FrameElement::INVALID:
- // Not a legal merge move.
- UNREACHABLE();
- break;
-
- case FrameElement::MEMORY:
- // Already in place.
- break;
-
- case FrameElement::REGISTER:
- Unuse(source.reg());
- if (!source.is_synced()) {
- __ movq(Operand(rbp, fp_relative(i)), source.reg());
- }
- break;
-
- case FrameElement::CONSTANT:
- if (!source.is_synced()) {
- __ Move(Operand(rbp, fp_relative(i)), source.handle());
- }
- break;
-
- case FrameElement::COPY:
- if (!source.is_synced()) {
- int backing_index = source.index();
- FrameElement backing_element = elements_[backing_index];
- if (backing_element.is_memory()) {
- __ movq(kScratchRegister,
- Operand(rbp, fp_relative(backing_index)));
- __ movq(Operand(rbp, fp_relative(i)), kScratchRegister);
- } else {
- ASSERT(backing_element.is_register());
- __ movq(Operand(rbp, fp_relative(i)), backing_element.reg());
- }
- }
- break;
- }
- }
- elements_[i] = target;
- }
-}
-
-
-void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
- // We have already done X-to-memory moves.
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- // Move the right value into register i if it is currently in a register.
- int index = expected->register_location(i);
- int use_index = register_location(i);
- // Skip if register i is unused in the target or else if source is
- // not a register (this is not a register-to-register move).
- if (index == kIllegalIndex || !elements_[index].is_register()) continue;
-
- Register target = RegisterAllocator::ToRegister(i);
- Register source = elements_[index].reg();
- if (index != use_index) {
- if (use_index == kIllegalIndex) { // Target is currently unused.
- // Copy contents of source from source to target.
- // Set frame element register to target.
- Use(target, index);
- Unuse(source);
- __ movq(target, source);
- } else {
- // Exchange contents of registers source and target.
- // Nothing except the register backing use_index has changed.
- elements_[use_index].set_reg(source);
- set_register_location(target, index);
- set_register_location(source, use_index);
- __ xchg(source, target);
- }
- }
-
- if (!elements_[index].is_synced() &&
- expected->elements_[index].is_synced()) {
- __ movq(Operand(rbp, fp_relative(index)), target);
- }
- elements_[index] = expected->elements_[index];
- }
-}
-
-
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
- // Move memory, constants, and copies to registers. This is the
- // final step and since it is not done from the bottom up, but in
- // register code order, we have special code to ensure that the backing
- // elements of copies are in their correct locations when we
- // encounter the copies.
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int index = expected->register_location(i);
- if (index != kIllegalIndex) {
- FrameElement source = elements_[index];
- FrameElement target = expected->elements_[index];
- Register target_reg = RegisterAllocator::ToRegister(i);
- ASSERT(target.reg().is(target_reg));
- switch (source.type()) {
- case FrameElement::INVALID: // Fall through.
- UNREACHABLE();
- break;
- case FrameElement::REGISTER:
- ASSERT(source.Equals(target));
- // Go to next iteration. Skips Use(target_reg) and syncing
- // below. It is safe to skip syncing because a target
- // register frame element would only be synced if all source
- // elements were.
- continue;
- break;
- case FrameElement::MEMORY:
- ASSERT(index <= stack_pointer_);
- __ movq(target_reg, Operand(rbp, fp_relative(index)));
- break;
-
- case FrameElement::CONSTANT:
- __ Move(target_reg, source.handle());
- break;
-
- case FrameElement::COPY: {
- int backing_index = source.index();
- FrameElement backing = elements_[backing_index];
- ASSERT(backing.is_memory() || backing.is_register());
- if (backing.is_memory()) {
- ASSERT(backing_index <= stack_pointer_);
- // Code optimization if backing store should also move
- // to a register: move backing store to its register first.
- if (expected->elements_[backing_index].is_register()) {
- FrameElement new_backing = expected->elements_[backing_index];
- Register new_backing_reg = new_backing.reg();
- ASSERT(!is_used(new_backing_reg));
- elements_[backing_index] = new_backing;
- Use(new_backing_reg, backing_index);
- __ movq(new_backing_reg,
- Operand(rbp, fp_relative(backing_index)));
- __ movq(target_reg, new_backing_reg);
- } else {
- __ movq(target_reg, Operand(rbp, fp_relative(backing_index)));
- }
- } else {
- __ movq(target_reg, backing.reg());
- }
- }
- }
- // Ensure the proper sync state.
- if (target.is_synced() && !source.is_synced()) {
- __ movq(Operand(rbp, fp_relative(index)), target_reg);
- }
- Use(target_reg, index);
- elements_[index] = target;
- }
- }
-}
-
-
-Result VirtualFrame::Pop() {
- FrameElement element = elements_.RemoveLast();
- int index = element_count();
- ASSERT(element.is_valid());
-
- // Get number type information of the result.
- TypeInfo info;
- if (!element.is_copy()) {
- info = element.type_info();
- } else {
- info = elements_[element.index()].type_info();
- }
-
- bool pop_needed = (stack_pointer_ == index);
- if (pop_needed) {
- stack_pointer_--;
- if (element.is_memory()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ pop(temp.reg());
- temp.set_type_info(info);
- return temp;
- }
-
- __ addq(rsp, Immediate(kPointerSize));
- }
- ASSERT(!element.is_memory());
-
- // The top element is a register, constant, or a copy. Unuse
- // registers and follow copies to their backing store.
- if (element.is_register()) {
- Unuse(element.reg());
- } else if (element.is_copy()) {
- ASSERT(element.index() < index);
- index = element.index();
- element = elements_[index];
- }
- ASSERT(!element.is_copy());
-
- // The element is memory, a register, or a constant.
- if (element.is_memory()) {
- // Memory elements could only be the backing store of a copy.
- // Allocate the original to a register.
- ASSERT(index <= stack_pointer_);
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- Use(temp.reg(), index);
- FrameElement new_element =
- FrameElement::RegisterElement(temp.reg(),
- FrameElement::SYNCED,
- element.type_info());
- // Preserve the copy flag on the element.
- if (element.is_copied()) new_element.set_copied();
- elements_[index] = new_element;
- __ movq(temp.reg(), Operand(rbp, fp_relative(index)));
- return Result(temp.reg(), info);
- } else if (element.is_register()) {
- return Result(element.reg(), info);
- } else {
- ASSERT(element.is_constant());
- return Result(element.handle());
- }
-}
-
-
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallStub(stub);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
- PrepareForCall(0, 0);
- arg->ToRegister(rax);
- arg->Unuse();
- return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- PrepareForCall(0, 0);
-
- if (arg0->is_register() && arg0->reg().is(rax)) {
- if (arg1->is_register() && arg1->reg().is(rdx)) {
- // Wrong registers.
- __ xchg(rax, rdx);
- } else {
- // Register rdx is free for arg0, which frees rax for arg1.
- arg0->ToRegister(rdx);
- arg1->ToRegister(rax);
- }
- } else {
- // Register rax is free for arg1, which guarantees rdx is free for
- // arg0.
- arg1->ToRegister(rax);
- arg0->ToRegister(rdx);
- }
-
- arg0->Unuse();
- arg1->Unuse();
- return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallJSFunction(int arg_count) {
- Result function = Pop();
-
- // InvokeFunction requires function in rdi. Move it in there.
- function.ToRegister(rdi);
- function.Unuse();
-
- // +1 for receiver.
- PrepareForCall(arg_count + 1, arg_count + 1);
- ASSERT(cgen()->HasValidEntryRegisters());
- ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION);
- RestoreContextRegister();
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
- // Emit code to write elements below the stack pointer to their
- // (already allocated) stack address.
- ASSERT(index <= stack_pointer_);
- FrameElement element = elements_[index];
- ASSERT(!element.is_synced());
- switch (element.type()) {
- case FrameElement::INVALID:
- break;
-
- case FrameElement::MEMORY:
- // This function should not be called with synced elements.
- // (memory elements are always synced).
- UNREACHABLE();
- break;
-
- case FrameElement::REGISTER:
- __ movq(Operand(rbp, fp_relative(index)), element.reg());
- break;
-
- case FrameElement::CONSTANT:
- __ Move(Operand(rbp, fp_relative(index)), element.handle());
- break;
-
- case FrameElement::COPY: {
- int backing_index = element.index();
- FrameElement backing_element = elements_[backing_index];
- if (backing_element.is_memory()) {
- __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
- __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
- } else {
- ASSERT(backing_element.is_register());
- __ movq(Operand(rbp, fp_relative(index)), backing_element.reg());
- }
- break;
- }
- }
- elements_[index].set_sync();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
- // Sync an element of the frame that is just above the stack pointer
- // by pushing it.
- ASSERT(index == stack_pointer_ + 1);
- stack_pointer_++;
- FrameElement element = elements_[index];
-
- switch (element.type()) {
- case FrameElement::INVALID:
- __ Push(Smi::FromInt(0));
- break;
-
- case FrameElement::MEMORY:
- // No memory elements exist above the stack pointer.
- UNREACHABLE();
- break;
-
- case FrameElement::REGISTER:
- __ push(element.reg());
- break;
-
- case FrameElement::CONSTANT:
- __ Move(kScratchRegister, element.handle());
- __ push(kScratchRegister);
- break;
-
- case FrameElement::COPY: {
- int backing_index = element.index();
- FrameElement backing = elements_[backing_index];
- ASSERT(backing.is_memory() || backing.is_register());
- if (backing.is_memory()) {
- __ push(Operand(rbp, fp_relative(backing_index)));
- } else {
- __ push(backing.reg());
- }
- break;
- }
- }
- elements_[index].set_sync();
-}
-
-
-// Clear the dirty bits for the range of elements in
-// [min(stack_pointer_ + 1,begin), end].
-void VirtualFrame::SyncRange(int begin, int end) {
- ASSERT(begin >= 0);
- ASSERT(end < element_count());
- // Sync elements below the range if they have not been materialized
- // on the stack.
- int start = Min(begin, stack_pointer_ + 1);
- int end_or_stack_pointer = Min(stack_pointer_, end);
- // Emit normal push instructions for elements above stack pointer
- // and use mov instructions if we are below stack pointer.
- int i = start;
-
- while (i <= end_or_stack_pointer) {
- if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
- i++;
- }
- while (i <= end) {
- SyncElementByPushing(i);
- i++;
- }
-}
-
-
-//------------------------------------------------------------------------------
-// Virtual frame stub and IC calling functions.
-
-Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(f, arg_count);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(id, arg_count);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
- PrepareForCall(0, 0);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ DebugBreak();
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
-}
-#endif
-
-
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ InvokeBuiltin(id, flag);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-// This function assumes that the only results that could be in a_reg or b_reg
-// are a and b. Other results can be live, but must not be in a_reg or b_reg.
-void VirtualFrame::MoveResultsToRegisters(Result* a,
- Result* b,
- Register a_reg,
- Register b_reg) {
- ASSERT(!a_reg.is(b_reg));
- // Assert that cgen()->allocator()->count(a_reg) is accounted for by a and b.
- ASSERT(cgen()->allocator()->count(a_reg) <= 2);
- ASSERT(cgen()->allocator()->count(a_reg) != 2 || a->reg().is(a_reg));
- ASSERT(cgen()->allocator()->count(a_reg) != 2 || b->reg().is(a_reg));
- ASSERT(cgen()->allocator()->count(a_reg) != 1 ||
- (a->is_register() && a->reg().is(a_reg)) ||
- (b->is_register() && b->reg().is(a_reg)));
- // Assert that cgen()->allocator()->count(b_reg) is accounted for by a and b.
- ASSERT(cgen()->allocator()->count(b_reg) <= 2);
- ASSERT(cgen()->allocator()->count(b_reg) != 2 || a->reg().is(b_reg));
- ASSERT(cgen()->allocator()->count(b_reg) != 2 || b->reg().is(b_reg));
- ASSERT(cgen()->allocator()->count(b_reg) != 1 ||
- (a->is_register() && a->reg().is(b_reg)) ||
- (b->is_register() && b->reg().is(b_reg)));
-
- if (a->is_register() && a->reg().is(a_reg)) {
- b->ToRegister(b_reg);
- } else if (!cgen()->allocator()->is_used(a_reg)) {
- a->ToRegister(a_reg);
- b->ToRegister(b_reg);
- } else if (cgen()->allocator()->is_used(b_reg)) {
- // a must be in b_reg, b in a_reg.
- __ xchg(a_reg, b_reg);
- // Results a and b will be invalidated, so it is ok if they are switched.
- } else {
- b->ToRegister(b_reg);
- a->ToRegister(a_reg);
- }
- a->Unuse();
- b->Unuse();
-}
-
-
-Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
- // Name and receiver are on the top of the frame. Both are dropped.
- // The IC expects name in rcx and receiver in rax.
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- Result name = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&name, &receiver, rcx, rax);
-
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
- // Key and receiver are on top of the frame. Put them in rax and rdx.
- Result key = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&key, &receiver, rax, rdx);
-
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallStoreIC(Handle<String> name,
- bool is_contextual,
- StrictModeFlag strict_mode) {
- // Value and (if not contextual) receiver are on top of the frame.
- // The IC expects name in rcx, value in rax, and receiver in rdx.
- Handle<Code> ic(Builtins::builtin(
- (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- Result value = Pop();
- RelocInfo::Mode mode;
- if (is_contextual) {
- PrepareForCall(0, 0);
- value.ToRegister(rax);
- __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- value.Unuse();
- mode = RelocInfo::CODE_TARGET_CONTEXT;
- } else {
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&value, &receiver, rax, rdx);
- mode = RelocInfo::CODE_TARGET;
- }
- __ Move(rcx, name);
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
- // Value, key, and receiver are on the top of the frame. The IC
- // expects value in rax, key in rcx, and receiver in rdx.
- Result value = Pop();
- Result key = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- if (!cgen()->allocator()->is_used(rax) ||
- (value.is_register() && value.reg().is(rax))) {
- if (!cgen()->allocator()->is_used(rax)) {
- value.ToRegister(rax);
- }
- MoveResultsToRegisters(&key, &receiver, rcx, rdx);
- value.Unuse();
- } else if (!cgen()->allocator()->is_used(rcx) ||
- (key.is_register() && key.reg().is(rcx))) {
- if (!cgen()->allocator()->is_used(rcx)) {
- key.ToRegister(rcx);
- }
- MoveResultsToRegisters(&value, &receiver, rax, rdx);
- key.Unuse();
- } else if (!cgen()->allocator()->is_used(rdx) ||
- (receiver.is_register() && receiver.reg().is(rdx))) {
- if (!cgen()->allocator()->is_used(rdx)) {
- receiver.ToRegister(rdx);
- }
- MoveResultsToRegisters(&key, &value, rcx, rax);
- receiver.Unuse();
- } else {
- // All three registers are used, and no value is in the correct place.
- // We have one of the two circular permutations of rax, rcx, rdx.
- ASSERT(value.is_register());
- if (value.reg().is(rcx)) {
- __ xchg(rax, rdx);
- __ xchg(rax, rcx);
- } else {
- __ xchg(rax, rcx);
- __ xchg(rax, rdx);
- }
- value.Unuse();
- key.Unuse();
- receiver.Unuse();
- }
-
- Handle<Code> ic(Builtins::builtin(
- (strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
-Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
- int arg_count,
- int loop_nesting) {
- // Function name, arguments, and receiver are found on top of the frame
- // and dropped by the call. The IC expects the name in rcx and the rest
- // on the stack, and drops them all.
- InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
- Result name = Pop();
- // Spill args, receiver, and function. The call will drop args and
- // receiver.
- PrepareForCall(arg_count + 1, arg_count + 1);
- name.ToRegister(rcx);
- name.Unuse();
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
- int arg_count,
- int loop_nesting) {
- // Function name, arguments, and receiver are found on top of the frame
- // and dropped by the call. The IC expects the name in rcx and the rest
- // on the stack, and drops them all.
- InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic =
- StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
- Result name = Pop();
- // Spill args, receiver, and function. The call will drop args and
- // receiver.
- PrepareForCall(arg_count + 1, arg_count + 1);
- name.ToRegister(rcx);
- name.Unuse();
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallConstructor(int arg_count) {
- // Arguments, receiver, and function are on top of the frame. The
- // IC expects arg count in rax, function in rdi, and the arguments
- // and receiver on the stack.
- Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
- // Duplicate the function before preparing the frame.
- PushElementAt(arg_count);
- Result function = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args.
- function.ToRegister(rdi);
-
- // Constructors are called with the number of arguments in register
- // rax for now. Another option would be to have separate construct
- // call trampolines per different arguments counts encountered.
- Result num_args = cgen()->allocator()->Allocate(rax);
- ASSERT(num_args.is_valid());
- __ Set(num_args.reg(), arg_count);
-
- function.Unuse();
- num_args.Unuse();
- return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- ASSERT(cgen()->HasValidEntryRegisters());
- // Grow the expression stack by handler size less one (the return
- // address is already pushed by a call instruction).
- Adjust(kHandlerSize - 1);
- __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
deleted file mode 100644
index 7396db17f..000000000
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ /dev/null
@@ -1,593 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_VIRTUAL_FRAME_X64_H_
-#define V8_X64_VIRTUAL_FRAME_X64_H_
-
-#include "type-info.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "codegen.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, but no attempt is made to require it
- // to stay spilled. It is intended as documentation while the code
- // generator is being transformed.
- class SpilledScope BASE_EMBEDDED {
- public:
- SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
- ASSERT(cgen()->has_valid_frame());
- cgen()->frame()->SpillAll();
- cgen()->set_in_spilled_code(true);
- }
-
- ~SpilledScope() {
- cgen()->set_in_spilled_code(previous_state_);
- }
-
- private:
- bool previous_state_;
-
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
- MacroAssembler* masm() { return cgen()->masm(); }
-
- // Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index,
- TypeInfo info = TypeInfo::Uninitialized());
-
- // The number of elements on the virtual frame.
- int element_count() { return elements_.length(); }
-
- // The height of the virtual expression stack.
- int height() {
- return element_count() - expression_base_index();
- }
-
- int register_location(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num];
- }
-
- inline int register_location(Register reg);
-
- inline void set_register_location(Register reg, int index);
-
- bool is_used(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num] != kIllegalIndex;
- }
-
- inline bool is_used(Register reg);
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget count elements from the top of the frame all in-memory
- // (including synced) and adjust the stack pointer downward, to
- // match an external frame effect (examples include a call removing
- // its arguments, and exiting a try/catch removing an exception
- // handler). No code will be emitted.
- void Forget(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_ -= count;
- ForgetElements(count);
- }
-
- // Forget count elements from the top of the frame without adjusting
- // the stack pointer downward. This is used, for example, before
- // merging frames at break, continue, and return targets.
- void ForgetElements(int count);
-
- // Spill all values from the frame to memory.
- inline void SpillAll();
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_location(reg));
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references).
- Register SpillAnyRegister();
-
- // Spill the top element of the frame to memory.
- void SpillTop() { SpillElementAt(element_count() - 1); }
-
- // Sync the range of elements in [begin, end] with memory.
- void SyncRange(int begin, int end);
-
- // Make this frame so that an arbitrary frame of the same height can
- // be merged to it. Copies and constants are removed from the frame.
- void MakeMergable();
-
- // Prepare this virtual frame for merging to an expected frame by
- // performing some state changes that do not require generating
- // code. It is guaranteed that no code will be generated.
- void PrepareMergeTo(VirtualFrame* expected);
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(VirtualFrame* expected);
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Use(i);
- }
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by spilling locals. This
- // avoids generating unnecessary merge code when jumping to the
- // shared return site. Emits code for spills.
- inline void PrepareForReturn();
-
- // Number of local variables after when we use a loop for allocating.
- static const int kLocalVarBound = 14;
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // An element of the expression stack as an assembly operand.
- Operand ElementAt(int index) const {
- return Operand(rsp, index * kPointerSize);
- }
-
- // Random-access store to a frame-top relative frame element. The result
- // becomes owned by the frame and is invalidated.
- void SetElementAt(int index, Result* value);
-
- // Set a frame element to a constant. The index is frame-top relative.
- inline void SetElementAt(int index, Handle<Object> value);
-
- void PushElementAt(int index) {
- PushFrameSlotAt(element_count() - index - 1);
- }
-
- void StoreToElementAt(int index) {
- StoreToFrameSlotAt(element_count() - index - 1);
- }
-
- // A frame-allocated local as an assembly operand.
- Operand LocalAt(int index) {
- ASSERT(0 <= index);
- ASSERT(index < local_count());
- return Operand(rbp, kLocal0Offset - index * kPointerSize);
- }
-
- // Push a copy of the value of a local frame slot on top of the frame.
- void PushLocalAt(int index) {
- PushFrameSlotAt(local0_index() + index);
- }
-
- // Push the value of a local frame slot on top of the frame and invalidate
- // the local slot. The slot should be written to before trying to read
- // from it again.
- void TakeLocalAt(int index) {
- TakeFrameSlotAt(local0_index() + index);
- }
-
- // Store the top value on the virtual frame into a local frame slot. The
- // value is left in place on top of the frame.
- void StoreToLocalAt(int index) {
- StoreToFrameSlotAt(local0_index() + index);
- }
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // Push the function on top of the frame.
- void PushFunction() { PushFrameSlotAt(function_index()); }
-
- // Save the value of the esi register to the context frame slot.
- void SaveContextRegister();
-
- // Restore the esi register from the value of the context frame
- // slot.
- void RestoreContextRegister();
-
- // A parameter as an assembly operand.
- Operand ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index < parameter_count());
- return Operand(rbp, (1 + parameter_count() - index) * kPointerSize);
- }
-
- // Push a copy of the value of a parameter frame slot on top of the frame.
- void PushParameterAt(int index) {
- PushFrameSlotAt(param0_index() + index);
- }
-
- // Push the value of a paramter frame slot on top of the frame and
- // invalidate the parameter slot. The slot should be written to before
- // trying to read from it again.
- void TakeParameterAt(int index) {
- TakeFrameSlotAt(param0_index() + index);
- }
-
- // Store the top value on the virtual frame into a parameter frame slot.
- // The value is left in place on top of the frame.
- void StoreToParameterAt(int index) {
- StoreToFrameSlotAt(param0_index() + index);
- }
-
- // The receiver frame slot.
- Operand Receiver() { return ParameterAt(-1); }
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- inline Result CallStub(CodeStub* stub, int arg_count);
-
- // Call stub that takes a single argument passed in eax. The
- // argument is given as a result which does not have to be eax or
- // even a register. The argument is consumed by the call.
- Result CallStub(CodeStub* stub, Result* arg);
-
- // Call stub that takes a pair of arguments passed in edx (arg0, rdx) and
- // eax (arg1, rax). The arguments are given as results which do not have
- // to be in the proper registers or even in registers. The
- // arguments are consumed by the call.
- Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
- // Call JS function from top of the stack with arguments
- // taken from the stack.
- Result CallJSFunction(int arg_count);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- Result CallRuntime(Runtime::Function* f, int arg_count);
- Result CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void DebugBreak();
-#endif
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- Result InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count);
-
- // Call load IC. Name and receiver are found on top of the frame.
- // Both are dropped.
- Result CallLoadIC(RelocInfo::Mode mode);
-
- // Call keyed load IC. Key and receiver are found on top of the
- // frame. Both are dropped.
- Result CallKeyedLoadIC(RelocInfo::Mode mode);
-
- // Call store IC. If the load is contextual, value is found on top of the
- // frame. If not, value and receiver are on the frame. Both are dropped.
- Result CallStoreIC(Handle<String> name, bool is_contextual,
- StrictModeFlag strict_mode);
-
- // Call keyed store IC. Value, key, and receiver are found on top
- // of the frame. All three are dropped.
- Result CallKeyedStoreIC(StrictModeFlag strict_mode);
-
- // Call call IC. Function name, arguments, and receiver are found on top
- // of the frame and dropped by the call.
- // The argument count does not include the receiver.
- Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
- // Call keyed call IC. Same calling convention as CallCallIC.
- Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
- // Allocate and call JS function as constructor. Arguments,
- // receiver (global object), and function are found on top of the
- // frame. Function is not dropped. The argument count does not
- // include the receiver.
- Result CallConstructor(int arg_count);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
-
- // Drop one element.
- void Drop() { Drop(1); }
-
- // Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(element_count() - 1); }
-
- // Duplicate the n'th element from the top of the frame.
- // Dup(1) is equivalent to Dup().
- void Dup(int n) {
- ASSERT(n > 0);
- PushFrameSlotAt(element_count() - n);
- }
-
- // Pop an element from the top of the expression stack. Returns a
- // Result, which may be a constant or a register.
- Result Pop();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
- void EmitPop(const Operand& operand);
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(const Operand& operand,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Heap::RootListIndex index,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Immediate immediate,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Smi* value);
- // Uses kScratchRegister, emits appropriate relocation info.
- void EmitPush(Handle<Object> value);
-
- inline bool ConstantPoolOverflowed();
-
- // Push an element on the virtual frame.
- void Push(Handle<Object> value);
- inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
- inline void Push(Smi* value);
-
- // Pushing a result invalidates it (its contents become owned by the
- // frame).
- void Push(Result* result) {
- if (result->is_register()) {
- Push(result->reg(), result->type_info());
- } else {
- ASSERT(result->is_constant());
- Push(result->handle());
- }
- result->Unuse();
- }
-
- // Pushing an expression expects that the expression is trivial (according
- // to Expression::IsTrivial).
- void Push(Expression* expr);
-
- // Nip removes zero or more elements from immediately below the top
- // of the frame, leaving the previous top-of-frame value on top of
- // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
- inline void Nip(int num_dropped);
-
- inline void SetTypeForLocalAt(int index, TypeInfo info);
- inline void SetTypeForParamAt(int index, TypeInfo info);
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- ZoneList<FrameElement> elements_;
-
- // The index of the element that is at the processor's stack pointer
- // (the esp register).
- int stack_pointer_;
-
- // The index of the register frame element using each register, or
- // kIllegalIndex if a register is not on the frame.
- int register_locations_[RegisterAllocator::kNumRegisters];
-
- // The number of frame-allocated locals and parameters respectively.
- inline int parameter_count();
- inline int local_count();
-
- // The index of the element that is at the processor's frame pointer
- // (the ebp register). The parameters, receiver, and return address
- // are below the frame pointer.
- int frame_pointer() { return parameter_count() + 2; }
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() { return 1; }
-
- // The index of the context slot in the frame. It is immediately
- // above the frame pointer.
- int context_index() { return frame_pointer() + 1; }
-
- // The index of the function slot in the frame. It is above the frame
- // pointer and the context slot.
- int function_index() { return frame_pointer() + 2; }
-
- // The index of the first local. Between the frame pointer and the
- // locals lie the context and the function.
- int local0_index() { return frame_pointer() + 3; }
-
- // The index of the base of the expression stack.
- int expression_base_index() { return local0_index() + local_count(); }
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- int fp_relative(int index) {
- ASSERT(index < element_count());
- ASSERT(frame_pointer() < element_count()); // FP is on the frame.
- return (frame_pointer() - index) * kPointerSize;
- }
-
- // Record an occurrence of a register in the virtual frame. This has the
- // effect of incrementing the register's external reference count and
- // of updating the index of the register's location in the frame.
- void Use(Register reg, int index) {
- ASSERT(!is_used(reg));
- set_register_location(reg, index);
- cgen()->allocator()->Use(reg);
- }
-
- // Record that a register reference has been dropped from the frame. This
- // decrements the register's external reference count and invalidates the
- // index of the register's location in the frame.
- void Unuse(Register reg) {
- ASSERT(is_used(reg));
- set_register_location(reg, kIllegalIndex);
- cgen()->allocator()->Unuse(reg);
- }
-
- // Spill the element at a particular index---write it to memory if
- // necessary, free any associated register, and forget its value if
- // constant.
- void SpillElementAt(int index);
-
- // Sync the element at a particular index. If it is a register or
- // constant that disagrees with the value on the stack, write it to memory.
- // Keep the element type as register or constant, and clear the dirty bit.
- void SyncElementAt(int index);
-
- // Sync a single unsynced element that lies beneath or at the stack pointer.
- void SyncElementBelowStackPointer(int index);
-
- // Sync a single unsynced element that lies just above the stack pointer.
- void SyncElementByPushing(int index);
-
- // Push a copy of a frame slot (typically a local or parameter) on top of
- // the frame.
- inline void PushFrameSlotAt(int index);
-
- // Push a the value of a frame slot (typically a local or parameter) on
- // top of the frame and invalidate the slot.
- void TakeFrameSlotAt(int index);
-
- // Store the value on top of the frame to a frame slot (typically a local
- // or parameter).
- void StoreToFrameSlotAt(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // Move frame elements currently in registers or constants, that
- // should be in memory in the expected frame, to memory.
- void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
- // Make the register-to-register moves necessary to
- // merge this frame with the expected frame.
- // Register to memory moves must already have been made,
- // and memory to register moves must follow this call.
- // This is because some new memory-to-register moves are
- // created in order to break cycles of register moves.
- // Used in the implementation of MergeTo().
- void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
- // Make the memory-to-register and constant-to-register moves
- // needed to make this frame equal the expected frame.
- // Called after all register-to-memory and register-to-register
- // moves have been made. After this function returns, the frames
- // should be equal.
- void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
- // Invalidates a frame slot (puts an invalid frame element in it).
- // Copies on the frame are correctly handled, and if this slot was
- // the backing store of copies, the index of the new backing store
- // is returned. Otherwise, returns kIllegalIndex.
- // Register counts are correctly updated.
- int InvalidateFrameSlotAt(int index);
-
- // This function assumes that a and b are the only results that could be in
- // the registers a_reg or b_reg. Other results can be live, but must not
- // be in the registers a_reg or b_reg. The results a and b are invalidated.
- void MoveResultsToRegisters(Result* a,
- Result* b,
- Register a_reg,
- Register b_reg);
-
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- Result RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
- inline bool Equals(VirtualFrame* other);
-
- // Classes that need raw access to the elements_ array.
- friend class FrameRegisterState;
- friend class JumpTarget;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_VIRTUAL_FRAME_X64_H_
diff --git a/deps/v8/src/zone-inl.h b/deps/v8/src/zone-inl.h
index 467296039..6e2d558a6 100644
--- a/deps/v8/src/zone-inl.h
+++ b/deps/v8/src/zone-inl.h
@@ -28,6 +28,7 @@
#ifndef V8_ZONE_INL_H_
#define V8_ZONE_INL_H_
+#include "isolate.h"
#include "zone.h"
#include "v8-counters.h"
@@ -35,8 +36,19 @@ namespace v8 {
namespace internal {
+AssertNoZoneAllocation::AssertNoZoneAllocation()
+ : prev_(Isolate::Current()->zone_allow_allocation()) {
+ Isolate::Current()->set_zone_allow_allocation(false);
+}
+
+
+AssertNoZoneAllocation::~AssertNoZoneAllocation() {
+ Isolate::Current()->set_zone_allow_allocation(prev_);
+}
+
+
inline void* Zone::New(int size) {
- ASSERT(AssertNoZoneAllocation::allow_allocation());
+ ASSERT(Isolate::Current()->zone_allow_allocation());
ASSERT(ZoneScope::nesting() > 0);
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignment);
@@ -54,7 +66,7 @@ inline void* Zone::New(int size) {
template <typename T>
T* Zone::NewArray(int length) {
- return static_cast<T*>(Zone::New(length * sizeof(T)));
+ return static_cast<T*>(New(length * sizeof(T)));
}
@@ -65,7 +77,7 @@ bool Zone::excess_allocation() {
void Zone::adjust_segment_bytes_allocated(int delta) {
segment_bytes_allocated_ += delta;
- Counters::zone_segment_bytes.Set(segment_bytes_allocated_);
+ isolate_->counters()->zone_segment_bytes()->Set(segment_bytes_allocated_);
}
@@ -78,6 +90,51 @@ ZoneSplayTree<Config>::~ZoneSplayTree() {
}
+// TODO(isolates): for performance reasons, this should be replaced with a new
+// operator that takes the zone in which the object should be
+// allocated.
+void* ZoneObject::operator new(size_t size) {
+ return ZONE->New(static_cast<int>(size));
+}
+
+void* ZoneObject::operator new(size_t size, Zone* zone) {
+ return zone->New(static_cast<int>(size));
+}
+
+
+inline void* ZoneListAllocationPolicy::New(int size) {
+ return ZONE->New(size);
+}
+
+
+template <typename T>
+void* ZoneList<T>::operator new(size_t size) {
+ return ZONE->New(static_cast<int>(size));
+}
+
+
+template <typename T>
+void* ZoneList<T>::operator new(size_t size, Zone* zone) {
+ return zone->New(static_cast<int>(size));
+}
+
+
+ZoneScope::ZoneScope(Isolate* isolate, ZoneScopeMode mode)
+ : isolate_(isolate), mode_(mode) {
+ isolate_->zone()->scope_nesting_++;
+}
+
+
+bool ZoneScope::ShouldDeleteOnExit() {
+ return isolate_->zone()->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
+}
+
+
+int ZoneScope::nesting() {
+ return Isolate::Current()->zone()->scope_nesting_;
+}
+
+
} } // namespace v8::internal
#endif // V8_ZONE_INL_H_
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc
index f8dbaabc7..42ce8c5cb 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone.cc
@@ -34,20 +34,28 @@ namespace v8 {
namespace internal {
-Address Zone::position_ = 0;
-Address Zone::limit_ = 0;
-int Zone::zone_excess_limit_ = 256 * MB;
-int Zone::segment_bytes_allocated_ = 0;
+Zone::Zone()
+ : zone_excess_limit_(256 * MB),
+ segment_bytes_allocated_(0),
+ position_(0),
+ limit_(0),
+ scope_nesting_(0),
+ segment_head_(NULL) {
+}
unsigned Zone::allocation_size_ = 0;
-bool AssertNoZoneAllocation::allow_allocation_ = true;
-int ZoneScope::nesting_ = 0;
+ZoneScope::~ZoneScope() {
+ ASSERT_EQ(Isolate::Current(), isolate_);
+ if (ShouldDeleteOnExit()) isolate_->zone()->DeleteAll();
+ isolate_->zone()->scope_nesting_--;
+}
+
// Segments represent chunks of memory: They have starting address
// (encoded in the this pointer) and a size in bytes. Segments are
// chained together forming a LIFO structure with the newest segment
-// available as Segment::head(). Segments are allocated using malloc()
+// available as segment_head_. Segments are allocated using malloc()
// and de-allocated using free().
class Segment {
@@ -61,45 +69,38 @@ class Segment {
Address start() const { return address(sizeof(Segment)); }
Address end() const { return address(size_); }
- static Segment* head() { return head_; }
- static void set_head(Segment* head) { head_ = head; }
-
- // Creates a new segment, sets it size, and pushes it to the front
- // of the segment chain. Returns the new segment.
- static Segment* New(int size) {
- Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
- Zone::adjust_segment_bytes_allocated(size);
- if (result != NULL) {
- result->next_ = head_;
- result->size_ = size;
- head_ = result;
- }
- return result;
- }
-
- // Deletes the given segment. Does not touch the segment chain.
- static void Delete(Segment* segment, int size) {
- Zone::adjust_segment_bytes_allocated(-size);
- Malloced::Delete(segment);
- }
-
- static int bytes_allocated() { return bytes_allocated_; }
-
private:
// Computes the address of the nth byte in this segment.
Address address(int n) const {
return Address(this) + n;
}
- static Segment* head_;
- static int bytes_allocated_;
Segment* next_;
int size_;
+
+ friend class Zone;
};
-Segment* Segment::head_ = NULL;
-int Segment::bytes_allocated_ = 0;
+// Creates a new segment, sets it size, and pushes it to the front
+// of the segment chain. Returns the new segment.
+Segment* Zone::NewSegment(int size) {
+ Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
+ adjust_segment_bytes_allocated(size);
+ if (result != NULL) {
+ result->next_ = segment_head_;
+ result->size_ = size;
+ segment_head_ = result;
+ }
+ return result;
+}
+
+
+// Deletes the given segment. Does not touch the segment chain.
+void Zone::DeleteSegment(Segment* segment, int size) {
+ adjust_segment_bytes_allocated(-size);
+ Malloced::Delete(segment);
+}
void Zone::DeleteAll() {
@@ -109,14 +110,14 @@ void Zone::DeleteAll() {
#endif
// Find a segment with a suitable size to keep around.
- Segment* keep = Segment::head();
+ Segment* keep = segment_head_;
while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
keep = keep->next();
}
// Traverse the chained list of segments, zapping (in debug mode)
// and freeing every segment except the one we wish to keep.
- Segment* current = Segment::head();
+ Segment* current = segment_head_;
while (current != NULL) {
Segment* next = current->next();
if (current == keep) {
@@ -128,7 +129,7 @@ void Zone::DeleteAll() {
// Zap the entire current segment (including the header).
memset(current, kZapDeadByte, size);
#endif
- Segment::Delete(current, size);
+ DeleteSegment(current, size);
}
current = next;
}
@@ -150,7 +151,7 @@ void Zone::DeleteAll() {
}
// Update the head segment to be the kept segment (if any).
- Segment::set_head(keep);
+ segment_head_ = keep;
}
@@ -164,7 +165,7 @@ Address Zone::NewExpand(int size) {
// strategy, where we increase the segment size every time we expand
// except that we employ a maximum segment size when we delete. This
// is to avoid excessive malloc() and free() overhead.
- Segment* head = Segment::head();
+ Segment* head = segment_head_;
int old_size = (head == NULL) ? 0 : head->size();
static const int kSegmentOverhead = sizeof(Segment) + kAlignment;
int new_size = kSegmentOverhead + size + (old_size << 1);
@@ -177,7 +178,7 @@ Address Zone::NewExpand(int size) {
// requested size.
new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
}
- Segment* segment = Segment::New(new_size);
+ Segment* segment = NewSegment(new_size);
if (segment == NULL) {
V8::FatalProcessOutOfMemory("Zone");
return NULL;
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index e299f158a..a5e437f7f 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -28,6 +28,8 @@
#ifndef V8_ZONE_H_
#define V8_ZONE_H_
+#include "allocation.h"
+
namespace v8 {
namespace internal {
@@ -39,6 +41,7 @@ enum ZoneScopeMode {
DONT_DELETE_ON_EXIT
};
+class Segment;
// The Zone supports very fast allocation of small chunks of
// memory. The chunks cannot be deallocated individually, but instead
@@ -57,23 +60,25 @@ class Zone {
public:
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
// allocating new segments of memory on demand using malloc().
- static inline void* New(int size);
+ inline void* New(int size);
template <typename T>
- static inline T* NewArray(int length);
+ inline T* NewArray(int length);
// Delete all objects and free all memory allocated in the Zone.
- static void DeleteAll();
+ void DeleteAll();
// Returns true if more memory has been allocated in zones than
// the limit allows.
- static inline bool excess_allocation();
+ inline bool excess_allocation();
- static inline void adjust_segment_bytes_allocated(int delta);
+ inline void adjust_segment_bytes_allocated(int delta);
static unsigned allocation_size_;
private:
+ friend class Isolate;
+ friend class ZoneScope;
// All pointers returned from New() have this alignment.
static const int kAlignment = kPointerSize;
@@ -88,30 +93,39 @@ class Zone {
static const int kMaximumKeptSegmentSize = 64 * KB;
// Report zone excess when allocation exceeds this limit.
- static int zone_excess_limit_;
+ int zone_excess_limit_;
// The number of bytes allocated in segments. Note that this number
// includes memory allocated from the OS but not yet allocated from
// the zone.
- static int segment_bytes_allocated_;
-
- // The Zone is intentionally a singleton; you should not try to
- // allocate instances of the class.
- Zone() { UNREACHABLE(); }
+ int segment_bytes_allocated_;
+ // Each isolate gets its own zone.
+ Zone();
// Expand the Zone to hold at least 'size' more bytes and allocate
// the bytes. Returns the address of the newly allocated chunk of
// memory in the Zone. Should only be called if there isn't enough
// room in the Zone already.
- static Address NewExpand(int size);
+ Address NewExpand(int size);
+
+ // Creates a new segment, sets it size, and pushes it to the front
+ // of the segment chain. Returns the new segment.
+ Segment* NewSegment(int size);
+ // Deletes the given segment. Does not touch the segment chain.
+ void DeleteSegment(Segment* segment, int size);
// The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable
// is guaranteed to be aligned as dictated by kAlignment.
- static Address position_;
- static Address limit_;
+ Address position_;
+ Address limit_;
+
+ int scope_nesting_;
+
+ Segment* segment_head_;
+ Isolate* isolate_;
};
@@ -120,7 +134,8 @@ class Zone {
class ZoneObject {
public:
// Allocate a new ZoneObject of 'size' bytes in the Zone.
- void* operator new(size_t size) { return Zone::New(static_cast<int>(size)); }
+ INLINE(void* operator new(size_t size));
+ INLINE(void* operator new(size_t size, Zone* zone));
// Ideally, the delete operator should be private instead of
// public, but unfortunately the compiler sometimes synthesizes
@@ -136,14 +151,10 @@ class ZoneObject {
class AssertNoZoneAllocation {
public:
- AssertNoZoneAllocation() : prev_(allow_allocation_) {
- allow_allocation_ = false;
- }
- ~AssertNoZoneAllocation() { allow_allocation_ = prev_; }
- static bool allow_allocation() { return allow_allocation_; }
+ inline AssertNoZoneAllocation();
+ inline ~AssertNoZoneAllocation();
private:
bool prev_;
- static bool allow_allocation_;
};
@@ -153,7 +164,7 @@ class AssertNoZoneAllocation {
class ZoneListAllocationPolicy {
public:
// Allocate 'size' bytes of memory in the zone.
- static void* New(int size) { return Zone::New(size); }
+ INLINE(static void* New(int size));
// De-allocation attempts are silently ignored.
static void Delete(void* p) { }
@@ -167,6 +178,9 @@ class ZoneListAllocationPolicy {
template<typename T>
class ZoneList: public List<T, ZoneListAllocationPolicy> {
public:
+ INLINE(void* operator new(size_t size));
+ INLINE(void* operator new(size_t size, Zone* zone));
+
// Construct a new ZoneList with the given capacity; the length is
// always zero. The capacity must be non-negative.
explicit ZoneList(int capacity)
@@ -189,18 +203,11 @@ typedef ZoneList<Handle<Map> > ZoneMapList;
// outer-most scope.
class ZoneScope BASE_EMBEDDED {
public:
- explicit ZoneScope(ZoneScopeMode mode) : mode_(mode) {
- nesting_++;
- }
+ INLINE(ZoneScope(Isolate* isolate, ZoneScopeMode mode));
- virtual ~ZoneScope() {
- if (ShouldDeleteOnExit()) Zone::DeleteAll();
- --nesting_;
- }
+ virtual ~ZoneScope();
- bool ShouldDeleteOnExit() {
- return nesting_ == 1 && mode_ == DELETE_ON_EXIT;
- }
+ inline bool ShouldDeleteOnExit();
// For ZoneScopes that do not delete on exit by default, call this
// method to request deletion on exit.
@@ -208,11 +215,11 @@ class ZoneScope BASE_EMBEDDED {
mode_ = DELETE_ON_EXIT;
}
- static int nesting() { return nesting_; }
+ inline static int nesting();
private:
+ Isolate* isolate_;
ZoneScopeMode mode_;
- static int nesting_;
};